From cc41f4ee29c8f71b64db3a24670a86cc3726acf1 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Fri, 1 Apr 2011 14:32:09 -0700 Subject: [PATCH] --- yaml --- r: 245631 b: refs/heads/master c: 304529b1b6f8612ccbb4582e997051b48b94f4a4 h: refs/heads/master i: 245629: 45094867824a4e601ccf038d1c4dde91f63e5b5d 245627: 42606af8f27ee09d045459a70e862678e591989b 245623: b00a5f41b6c7b81c88964d7ec084d4f1793a334e 245615: cc7c50a8fa47c74b09295e8bd8c60ec8c7a9d49b 245599: 1a6c568df27b478a7eb23802567ece244d0bc1d5 245567: 67648d6f37f245bc115c917543e7430048be9115 245503: 0ddf95c0b7b7fa09762f8d70f81afa7854636e93 v: v3 --- [refs] | 2 +- trunk/Documentation/00-INDEX | 2 + trunk/Documentation/ABI/testing/sysfs-power | 14 - trunk/Documentation/DocBook/genericirq.tmpl | 82 +- .../Documentation/DocBook/media-entities.tmpl | 1 - .../DocBook/v4l/media-ioc-setup-link.xml | 2 +- .../Documentation/DocBook/v4l/pixfmt-y12.xml | 79 - trunk/Documentation/DocBook/v4l/pixfmt.xml | 1 - .../DocBook/v4l/subdev-formats.xml | 59 - trunk/Documentation/cgroups/memory.txt | 15 +- .../feature-removal-schedule.txt | 8 + trunk/Documentation/flexible-arrays.txt | 4 +- trunk/Documentation/hwmon/adm1021 | 36 +- trunk/Documentation/hwmon/lm90 | 29 +- trunk/Documentation/kernel-parameters.txt | 2 +- trunk/Documentation/{virtual => }/kvm/api.txt | 0 .../Documentation/{virtual => }/kvm/cpuid.txt | 0 .../{virtual => }/kvm/locking.txt | 0 trunk/Documentation/{virtual => }/kvm/mmu.txt | 0 trunk/Documentation/{virtual => }/kvm/msr.txt | 0 .../{virtual => }/kvm/ppc-pv.txt | 0 .../{virtual => }/kvm/review-checklist.txt | 2 +- .../{virtual => }/kvm/timekeeping.txt | 0 .../{virtual => }/lguest/.gitignore | 0 .../{virtual => }/lguest/Makefile | 0 .../{virtual => }/lguest/extract | 0 .../{virtual => }/lguest/lguest.c | 0 .../{virtual => }/lguest/lguest.txt | 3 +- trunk/Documentation/power/devices.txt | 14 +- trunk/Documentation/power/notifiers.txt | 51 +- trunk/Documentation/trace/kprobetrace.txt | 1 + .../{virtual => }/uml/UserModeLinux-HOWTO.txt | 0 .../video4linux/sh_mobile_ceu_camera.txt | 6 +- trunk/Documentation/virtual/00-INDEX | 10 - trunk/Documentation/workqueue.txt | 40 - .../Documentation/x86/x86_64/boot-options.txt | 2 +- trunk/MAINTAINERS | 124 +- trunk/Makefile | 3 +- trunk/arch/alpha/include/asm/unistd.h | 6 +- trunk/arch/alpha/kernel/smp.c | 3 +- trunk/arch/alpha/kernel/systbls.S | 12 +- trunk/arch/alpha/kernel/time.c | 3 +- trunk/arch/arm/boot/compressed/Makefile | 2 +- trunk/arch/arm/boot/compressed/head.S | 35 +- trunk/arch/arm/boot/compressed/vmlinux.lds.in | 1 - trunk/arch/arm/common/vic.c | 69 +- trunk/arch/arm/configs/at91x40_defconfig | 48 - trunk/arch/arm/include/asm/i8253.h | 15 - trunk/arch/arm/include/asm/kprobes.h | 3 - trunk/arch/arm/include/asm/mach/time.h | 1 + trunk/arch/arm/include/asm/system.h | 2 +- trunk/arch/arm/kernel/kprobes-decode.c | 777 ++++---- trunk/arch/arm/kernel/kprobes.c | 3 +- trunk/arch/arm/kernel/leds.c | 28 +- trunk/arch/arm/kernel/perf_event.c | 3 +- trunk/arch/arm/kernel/ptrace.c | 8 - trunk/arch/arm/kernel/signal.c | 90 +- trunk/arch/arm/kernel/smp.c | 7 +- trunk/arch/arm/kernel/sys_oabi-compat.c | 2 +- trunk/arch/arm/kernel/time.c | 35 +- trunk/arch/arm/mach-at91/Kconfig | 1 - trunk/arch/arm/mach-at91/board-eb01.c | 7 +- trunk/arch/arm/mach-at91/include/mach/cpu.h | 28 - trunk/arch/arm/mach-davinci/Kconfig | 6 - .../arm/mach-davinci/board-mityomapl138.c | 4 +- trunk/arch/arm/mach-davinci/cpufreq.c | 4 +- trunk/arch/arm/mach-davinci/devices-da8xx.c | 12 +- trunk/arch/arm/mach-davinci/dm355.c | 2 +- trunk/arch/arm/mach-davinci/dm644x.c | 2 +- .../mach-davinci/include/mach/debug-macro.S | 13 +- .../arm/mach-davinci/include/mach/serial.h | 2 +- trunk/arch/arm/mach-exynos4/pm.c | 45 +- trunk/arch/arm/mach-footbridge/Kconfig | 2 - trunk/arch/arm/mach-footbridge/isa-timer.c | 45 +- .../arch/arm/mach-integrator/integrator_ap.c | 26 +- trunk/arch/arm/mach-mx3/mach-vpr200.c | 11 +- trunk/arch/arm/mach-mx5/board-mx53_loco.c | 2 +- trunk/arch/arm/mach-mxs/clock-mx28.c | 7 +- trunk/arch/arm/mach-omap1/pm_bus.c | 69 +- trunk/arch/arm/mach-omap2/Makefile | 8 +- trunk/arch/arm/mach-omap2/board-rx51.c | 9 +- trunk/arch/arm/mach-omap2/clkt34xx_dpll3m2.c | 1 - trunk/arch/arm/mach-omap2/clock44xx_data.c | 9 +- trunk/arch/arm/mach-omap2/cm2xxx_3xxx.c | 17 - trunk/arch/arm/mach-omap2/control.c | 8 +- .../arm/mach-omap2/omap_hwmod_2420_data.c | 6 +- .../arm/mach-omap2/omap_hwmod_2430_data.c | 7 +- .../arm/mach-omap2/omap_hwmod_3xxx_data.c | 8 +- .../arm/mach-omap2/omap_hwmod_44xx_data.c | 2 +- trunk/arch/arm/mach-omap2/omap_l3_smx.c | 4 +- trunk/arch/arm/mach-omap2/pm.c | 1 - trunk/arch/arm/mach-omap2/pm_bus.c | 85 + trunk/arch/arm/mach-omap2/voltage.c | 1 + trunk/arch/arm/mach-pxa/balloon3.c | 1 + trunk/arch/arm/mach-pxa/clock-pxa2xx.c | 18 +- trunk/arch/arm/mach-pxa/clock-pxa3xx.c | 17 +- trunk/arch/arm/mach-pxa/clock.h | 7 +- trunk/arch/arm/mach-pxa/cm-x270.c | 1 + trunk/arch/arm/mach-pxa/cm-x2xx.c | 23 +- trunk/arch/arm/mach-pxa/colibri-evalboard.c | 1 + .../arch/arm/mach-pxa/colibri-pxa270-income.c | 1 + trunk/arch/arm/mach-pxa/colibri-pxa270.c | 1 + trunk/arch/arm/mach-pxa/generic.h | 8 +- trunk/arch/arm/mach-pxa/hx4700.c | 2 +- trunk/arch/arm/mach-pxa/irq.c | 17 +- trunk/arch/arm/mach-pxa/lpd270.c | 20 +- trunk/arch/arm/mach-pxa/lubbock.c | 21 +- trunk/arch/arm/mach-pxa/magician.c | 2 +- trunk/arch/arm/mach-pxa/mainstone.c | 22 +- trunk/arch/arm/mach-pxa/mfp-pxa2xx.c | 12 +- trunk/arch/arm/mach-pxa/mfp-pxa3xx.c | 21 +- trunk/arch/arm/mach-pxa/mioa701.c | 43 +- trunk/arch/arm/mach-pxa/palmld.c | 1 + trunk/arch/arm/mach-pxa/palmtreo.c | 1 + trunk/arch/arm/mach-pxa/palmz72.c | 24 +- trunk/arch/arm/mach-pxa/pxa25x.c | 25 +- trunk/arch/arm/mach-pxa/pxa27x.c | 25 +- trunk/arch/arm/mach-pxa/pxa3xx.c | 25 +- trunk/arch/arm/mach-pxa/pxa95x.c | 20 +- trunk/arch/arm/mach-pxa/raumfeld.c | 1 + trunk/arch/arm/mach-pxa/smemc.c | 29 +- trunk/arch/arm/mach-pxa/trizeps4.c | 1 + trunk/arch/arm/mach-pxa/viper.c | 12 +- trunk/arch/arm/mach-pxa/vpac270.c | 1 + .../arm/mach-realview/include/mach/barriers.h | 2 +- trunk/arch/arm/mach-s3c2410/irq.c | 30 +- trunk/arch/arm/mach-s3c2410/mach-bast.c | 17 +- trunk/arch/arm/mach-s3c2410/pm.c | 13 +- trunk/arch/arm/mach-s3c2410/s3c2410.c | 5 - trunk/arch/arm/mach-s3c2412/irq.c | 2 + trunk/arch/arm/mach-s3c2412/mach-jive.c | 19 +- trunk/arch/arm/mach-s3c2412/pm.c | 27 +- trunk/arch/arm/mach-s3c2412/s3c2412.c | 4 - trunk/arch/arm/mach-s3c2416/irq.c | 2 + trunk/arch/arm/mach-s3c2416/pm.c | 27 +- trunk/arch/arm/mach-s3c2416/s3c2416.c | 5 - trunk/arch/arm/mach-s3c2440/mach-osiris.c | 18 +- trunk/arch/arm/mach-s3c2440/s3c2440.c | 8 - trunk/arch/arm/mach-s3c2440/s3c2442.c | 6 - trunk/arch/arm/mach-s3c2440/s3c244x-irq.c | 4 + trunk/arch/arm/mach-s3c2440/s3c244x.c | 62 +- trunk/arch/arm/mach-s3c64xx/irq-pm.c | 18 +- trunk/arch/arm/mach-s5pv210/pm.c | 25 +- trunk/arch/arm/mach-sa1100/irq.c | 19 +- trunk/arch/arm/mach-shmobile/pm_runtime.c | 145 +- .../arm/mach-tegra/include/mach/barriers.h | 2 +- trunk/arch/arm/mm/init.c | 16 +- trunk/arch/arm/mm/proc-xscale.S | 2 +- trunk/arch/arm/plat-mxc/gpio.c | 7 - trunk/arch/arm/plat-mxc/ssi-fiq.S | 2 - trunk/arch/arm/plat-omap/gpio.c | 35 +- trunk/arch/arm/plat-omap/iommu.c | 2 - trunk/arch/arm/plat-omap/omap_device.c | 23 - trunk/arch/arm/plat-pxa/gpio.c | 17 +- trunk/arch/arm/plat-pxa/mfp.c | 1 + trunk/arch/arm/plat-s3c24xx/dma.c | 68 +- trunk/arch/arm/plat-s3c24xx/irq-pm.c | 7 +- trunk/arch/arm/plat-s5p/irq-pm.c | 7 +- .../arch/arm/plat-samsung/include/plat/cpu.h | 6 - trunk/arch/arm/plat-samsung/include/plat/pm.h | 6 +- trunk/arch/arm/vfp/vfpmodule.c | 19 +- trunk/arch/avr32/mach-at32ap/intc.c | 38 +- trunk/arch/blackfin/kernel/nmi.c | 30 +- trunk/arch/blackfin/kernel/time-ts.c | 35 +- trunk/arch/blackfin/mach-common/dpmc.c | 3 + trunk/arch/blackfin/mach-common/smp.c | 3 - trunk/arch/cris/arch-v32/kernel/smp.c | 13 +- trunk/arch/ia64/kernel/cpufreq/acpi-cpufreq.c | 44 +- trunk/arch/ia64/kernel/cyclone.c | 6 +- trunk/arch/ia64/kernel/irq_ia64.c | 2 - trunk/arch/ia64/kernel/time.c | 9 +- trunk/arch/ia64/sn/kernel/sn2/timer.c | 6 +- trunk/arch/ia64/xen/irq_xen.c | 10 +- trunk/arch/m32r/kernel/smp.c | 4 +- trunk/arch/m68k/atari/atakeyb.c | 9 +- trunk/arch/m68k/atari/stdma.c | 2 +- trunk/arch/m68k/include/asm/atarikb.h | 2 + trunk/arch/m68k/include/asm/bitops_mm.h | 87 +- trunk/arch/m68k/include/asm/unistd.h | 46 +- trunk/arch/m68k/kernel/Makefile_mm | 2 +- trunk/arch/m68k/kernel/entry_mm.S | 348 ++++ trunk/arch/m68k/kernel/syscalltable.S | 195 +- trunk/arch/m68k/mm/motorola.c | 2 - trunk/arch/microblaze/kernel/timer.c | 6 +- trunk/arch/mips/Kbuild.platforms | 1 - trunk/arch/mips/Kconfig | 67 +- trunk/arch/mips/Makefile | 12 - trunk/arch/mips/alchemy/common/dbdma.c | 123 +- trunk/arch/mips/alchemy/common/dma.c | 46 +- trunk/arch/mips/alchemy/common/irq.c | 345 ++-- trunk/arch/mips/alchemy/common/platform.c | 250 +-- trunk/arch/mips/alchemy/common/setup.c | 4 +- trunk/arch/mips/alchemy/common/time.c | 3 +- .../mips/alchemy/devboards/db1200/setup.c | 7 - .../alchemy/devboards/db1x00/board_setup.c | 61 +- .../alchemy/devboards/pb1000/board_setup.c | 2 +- .../alchemy/devboards/pb1500/board_setup.c | 2 +- trunk/arch/mips/alchemy/devboards/prom.c | 2 +- trunk/arch/mips/alchemy/gpr/board_setup.c | 14 +- trunk/arch/mips/alchemy/gpr/init.c | 2 +- trunk/arch/mips/alchemy/mtx-1/board_setup.c | 2 +- trunk/arch/mips/alchemy/mtx-1/init.c | 2 +- trunk/arch/mips/alchemy/mtx-1/platform.c | 4 +- trunk/arch/mips/alchemy/xxs1500/board_setup.c | 11 +- trunk/arch/mips/alchemy/xxs1500/init.c | 7 +- trunk/arch/mips/ar7/gpio.c | 4 +- trunk/arch/mips/bcm47xx/nvram.c | 3 +- trunk/arch/mips/bcm47xx/setup.c | 130 +- .../arch/mips/bcm63xx/boards/board_bcm963xx.c | 16 +- .../boot/compressed/calc_vmlinuz_load_addr.c | 2 +- .../arch/mips/boot/compressed/uart-alchemy.c | 2 +- trunk/arch/mips/cavium-octeon/Kconfig | 15 +- trunk/arch/mips/cavium-octeon/csrc-octeon.c | 3 +- trunk/arch/mips/cavium-octeon/setup.c | 7 + trunk/arch/mips/cavium-octeon/smp.c | 17 +- trunk/arch/mips/configs/lemote2f_defconfig | 6 +- trunk/arch/mips/configs/malta_defconfig | 2 +- trunk/arch/mips/configs/mtx1_defconfig | 4 +- trunk/arch/mips/configs/nlm_xlr_defconfig | 574 ------ trunk/arch/mips/include/asm/cache.h | 2 +- trunk/arch/mips/include/asm/cevt-r4k.h | 3 - trunk/arch/mips/include/asm/cpu.h | 27 - trunk/arch/mips/include/asm/dma-mapping.h | 2 - trunk/arch/mips/include/asm/hugetlb.h | 1 - trunk/arch/mips/include/asm/i8253.h | 5 - trunk/arch/mips/include/asm/jump_label.h | 22 +- .../mips/include/asm/mach-au1x00/au1000.h | 334 +++- .../mips/include/asm/mach-au1x00/au1000_dma.h | 4 + .../include/asm/mach-au1x00/au1xxx_dbdma.h | 8 + .../include/asm/mach-au1x00/gpio-au1000.h | 122 +- .../mips/include/asm/mach-bcm47xx/nvram.h | 12 +- .../include/asm/mach-bcm63xx/bcm963xx_tag.h | 2 +- .../mach-cavium-octeon/kernel-entry-init.h | 5 - .../mips/include/asm/mach-lantiq/lantiq.h | 63 - .../include/asm/mach-lantiq/lantiq_platform.h | 53 - trunk/arch/mips/include/asm/mach-lantiq/war.h | 24 - .../mips/include/asm/mach-lantiq/xway/irq.h | 18 - .../include/asm/mach-lantiq/xway/lantiq_irq.h | 66 - .../include/asm/mach-lantiq/xway/lantiq_soc.h | 141 -- .../include/asm/mach-lantiq/xway/xway_dma.h | 60 - .../asm/mach-netlogic/cpu-feature-overrides.h | 47 - .../arch/mips/include/asm/mach-netlogic/irq.h | 14 - .../arch/mips/include/asm/mach-netlogic/war.h | 26 - trunk/arch/mips/include/asm/module.h | 2 - .../mips/include/asm/netlogic/interrupt.h | 45 - .../mips/include/asm/netlogic/mips-extns.h | 76 - .../mips/include/asm/netlogic/psb-bootinfo.h | 109 -- .../arch/mips/include/asm/netlogic/xlr/gpio.h | 73 - .../mips/include/asm/netlogic/xlr/iomap.h | 131 -- .../arch/mips/include/asm/netlogic/xlr/pic.h | 231 --- .../arch/mips/include/asm/netlogic/xlr/xlr.h | 75 - trunk/arch/mips/include/asm/ptrace.h | 3 +- trunk/arch/mips/include/asm/thread_info.h | 3 - trunk/arch/mips/include/asm/time.h | 6 + trunk/arch/mips/jazz/jazzdma.c | 5 +- trunk/arch/mips/jz4740/dma.c | 4 +- trunk/arch/mips/jz4740/setup.c | 32 - trunk/arch/mips/jz4740/time.c | 5 +- trunk/arch/mips/jz4740/timer.c | 2 - trunk/arch/mips/kernel/Makefile | 1 - trunk/arch/mips/kernel/cevt-txx9.c | 3 +- trunk/arch/mips/kernel/cpu-probe.c | 83 +- trunk/arch/mips/kernel/csrc-bcm1480.c | 3 +- trunk/arch/mips/kernel/csrc-ioasic.c | 4 +- trunk/arch/mips/kernel/csrc-powertv.c | 35 +- trunk/arch/mips/kernel/csrc-r4k.c | 4 +- trunk/arch/mips/kernel/csrc-sb1250.c | 3 +- trunk/arch/mips/kernel/entry.S | 7 +- trunk/arch/mips/kernel/ftrace.c | 5 +- trunk/arch/mips/kernel/i8253.c | 78 +- trunk/arch/mips/kernel/ptrace.c | 43 +- trunk/arch/mips/kernel/scall32-o32.S | 5 +- trunk/arch/mips/kernel/scall64-64.S | 5 +- trunk/arch/mips/kernel/scall64-n32.S | 5 +- trunk/arch/mips/kernel/scall64-o32.S | 5 +- trunk/arch/mips/kernel/smtc.c | 2 +- trunk/arch/mips/kernel/syscall.c | 120 ++ trunk/arch/mips/kernel/traps.c | 6 +- trunk/arch/mips/kernel/vmlinux.lds.S | 2 - trunk/arch/mips/lantiq/Kconfig | 23 - trunk/arch/mips/lantiq/Makefile | 11 - trunk/arch/mips/lantiq/Platform | 8 - trunk/arch/mips/lantiq/clk.c | 140 -- trunk/arch/mips/lantiq/clk.h | 18 - trunk/arch/mips/lantiq/devices.c | 122 -- trunk/arch/mips/lantiq/devices.h | 23 - trunk/arch/mips/lantiq/early_printk.c | 33 - trunk/arch/mips/lantiq/irq.c | 326 ---- trunk/arch/mips/lantiq/machtypes.h | 20 - trunk/arch/mips/lantiq/prom.c | 71 - trunk/arch/mips/lantiq/prom.h | 25 - trunk/arch/mips/lantiq/setup.c | 66 - trunk/arch/mips/lantiq/xway/Kconfig | 23 - trunk/arch/mips/lantiq/xway/Makefile | 7 - trunk/arch/mips/lantiq/xway/clk-ase.c | 48 - trunk/arch/mips/lantiq/xway/clk-xway.c | 223 --- trunk/arch/mips/lantiq/xway/devices.c | 121 -- trunk/arch/mips/lantiq/xway/devices.h | 20 - trunk/arch/mips/lantiq/xway/dma.c | 253 --- trunk/arch/mips/lantiq/xway/ebu.c | 53 - trunk/arch/mips/lantiq/xway/gpio.c | 195 -- trunk/arch/mips/lantiq/xway/gpio_ebu.c | 126 -- trunk/arch/mips/lantiq/xway/gpio_stp.c | 157 -- trunk/arch/mips/lantiq/xway/mach-easy50601.c | 57 - trunk/arch/mips/lantiq/xway/mach-easy50712.c | 74 - trunk/arch/mips/lantiq/xway/pmu.c | 70 - trunk/arch/mips/lantiq/xway/prom-ase.c | 39 - trunk/arch/mips/lantiq/xway/prom-xway.c | 54 - trunk/arch/mips/lantiq/xway/reset.c | 91 - trunk/arch/mips/lantiq/xway/setup-ase.c | 19 - trunk/arch/mips/lantiq/xway/setup-xway.c | 20 - trunk/arch/mips/lib/Makefile | 1 - .../loongson/common/cs5536/cs5536_mfgpt.c | 5 +- trunk/arch/mips/loongson/common/env.c | 5 +- trunk/arch/mips/mm/Makefile | 4 +- trunk/arch/mips/mm/c-r4k.c | 3 +- trunk/arch/mips/mm/mmap.c | 122 -- trunk/arch/mips/mm/tlbex.c | 5 +- trunk/arch/mips/mti-malta/malta-init.c | 14 +- trunk/arch/mips/mti-malta/malta-int.c | 5 +- trunk/arch/mips/netlogic/Kconfig | 5 - trunk/arch/mips/netlogic/xlr/Makefile | 5 - trunk/arch/mips/netlogic/xlr/irq.c | 300 --- trunk/arch/mips/netlogic/xlr/platform.c | 98 - trunk/arch/mips/netlogic/xlr/setup.c | 188 -- trunk/arch/mips/netlogic/xlr/smp.c | 225 --- trunk/arch/mips/netlogic/xlr/smpboot.S | 94 - trunk/arch/mips/netlogic/xlr/time.c | 51 - trunk/arch/mips/netlogic/xlr/xlr_console.c | 46 - trunk/arch/mips/pci/Makefile | 2 - trunk/arch/mips/pci/ops-lantiq.c | 116 -- trunk/arch/mips/pci/pci-lantiq.c | 297 --- trunk/arch/mips/pci/pci-lantiq.h | 18 - trunk/arch/mips/pci/pci-xlr.c | 214 --- .../mips/pmc-sierra/msp71xx/msp_irq_per.c | 2 +- trunk/arch/mips/pmc-sierra/yosemite/smp.c | 4 - trunk/arch/mips/power/hibernate.S | 2 +- trunk/arch/mips/rb532/gpio.c | 2 +- trunk/arch/mips/sgi-ip22/ip22-platform.c | 4 +- trunk/arch/mips/sgi-ip22/ip22-time.c | 4 +- trunk/arch/mips/sgi-ip27/ip27-hubio.c | 3 +- trunk/arch/mips/sgi-ip27/ip27-irq.c | 2 - trunk/arch/mips/sgi-ip27/ip27-klnuma.c | 3 + trunk/arch/mips/sgi-ip27/ip27-timer.c | 16 +- trunk/arch/mips/sibyte/bcm1480/smp.c | 7 +- trunk/arch/mips/sibyte/sb1250/smp.c | 7 +- trunk/arch/mips/sni/time.c | 4 +- trunk/arch/mn10300/kernel/smp.c | 5 +- trunk/arch/parisc/kernel/smp.c | 5 +- trunk/arch/parisc/mm/init.c | 4 +- trunk/arch/powerpc/include/asm/8xx_immap.h | 4 +- trunk/arch/powerpc/include/asm/mpic.h | 3 + trunk/arch/powerpc/include/asm/uninorth.h | 2 +- trunk/arch/powerpc/kernel/ptrace.c | 12 +- trunk/arch/powerpc/kernel/smp.c | 4 +- trunk/arch/powerpc/platforms/83xx/suspend.c | 7 +- trunk/arch/powerpc/platforms/cell/spu_base.c | 28 +- trunk/arch/powerpc/platforms/powermac/pic.c | 42 +- trunk/arch/powerpc/sysdev/fsl_msi.c | 7 +- trunk/arch/powerpc/sysdev/ipic.c | 36 +- trunk/arch/powerpc/sysdev/mpic.c | 48 +- trunk/arch/s390/Kconfig | 1 - trunk/arch/s390/crypto/prng.c | 2 +- trunk/arch/s390/include/asm/cacheflush.h | 1 - trunk/arch/s390/include/asm/diag.h | 17 +- trunk/arch/s390/include/asm/ftrace.h | 4 +- trunk/arch/s390/include/asm/jump_label.h | 37 - trunk/arch/s390/include/asm/mmu_context.h | 2 +- trunk/arch/s390/kernel/Makefile | 2 +- trunk/arch/s390/kernel/diag.c | 21 + trunk/arch/s390/kernel/dis.c | 1 - trunk/arch/s390/kernel/entry.S | 2 +- trunk/arch/s390/kernel/entry64.S | 2 +- trunk/arch/s390/kernel/jump_label.c | 59 - trunk/arch/s390/kernel/smp.c | 6 +- trunk/arch/s390/mm/cmm.c | 2 +- trunk/arch/s390/mm/fault.c | 2 +- trunk/arch/s390/mm/pageattr.c | 5 - trunk/arch/s390/oprofile/hwsampler.c | 14 +- trunk/arch/s390/oprofile/hwsampler.h | 4 +- trunk/arch/s390/oprofile/init.c | 8 +- trunk/arch/sh/Kconfig | 1 + trunk/arch/sh/configs/apsh4ad0a_defconfig | 1 + trunk/arch/sh/configs/sdk7786_defconfig | 1 + .../arch/sh/kernel/cpu/shmobile/pm_runtime.c | 33 +- trunk/arch/sh/kernel/ptrace_32.c | 4 - trunk/arch/sh/kernel/smp.c | 2 - trunk/arch/sparc/include/asm/jump_label.h | 25 +- trunk/arch/sparc/include/asm/topology_64.h | 6 +- trunk/arch/sparc/kernel/apc.c | 2 +- trunk/arch/sparc/kernel/pci_sabre.c | 5 +- trunk/arch/sparc/kernel/pci_schizo.c | 8 +- trunk/arch/sparc/kernel/pmc.c | 2 +- trunk/arch/sparc/kernel/smp_32.c | 14 +- trunk/arch/sparc/kernel/smp_64.c | 1 - trunk/arch/sparc/kernel/time_32.c | 2 +- trunk/arch/sparc/lib/checksum_32.S | 12 +- trunk/arch/tile/kernel/smp.c | 6 +- trunk/arch/um/Kconfig.um | 2 +- trunk/arch/um/include/asm/thread_info.h | 5 +- trunk/arch/um/kernel/smp.c | 2 +- trunk/arch/um/os-Linux/util.c | 23 +- trunk/arch/um/sys-i386/Makefile | 2 +- trunk/arch/um/sys-i386/atomic64_cx8_32.S | 225 --- trunk/arch/unicore32/kernel/irq.c | 23 +- trunk/arch/x86/Kconfig | 5 +- trunk/arch/x86/boot/memory.c | 2 +- trunk/arch/x86/include/asm/alternative-asm.h | 9 - trunk/arch/x86/include/asm/alternative.h | 3 +- trunk/arch/x86/include/asm/amd_iommu_proto.h | 13 +- trunk/arch/x86/include/asm/amd_iommu_types.h | 28 +- trunk/arch/x86/include/asm/apicdef.h | 1 - trunk/arch/x86/include/asm/cpufeature.h | 1 - trunk/arch/x86/include/asm/ftrace.h | 7 +- trunk/arch/x86/include/asm/i8253.h | 2 - trunk/arch/x86/include/asm/io_apic.h | 2 +- trunk/arch/x86/include/asm/jump_label.h | 27 +- trunk/arch/x86/include/asm/pgtable_types.h | 1 - trunk/arch/x86/include/asm/setup.h | 2 +- trunk/arch/x86/include/asm/stacktrace.h | 3 + trunk/arch/x86/include/asm/uaccess.h | 2 +- trunk/arch/x86/include/asm/uv/uv_bau.h | 17 +- trunk/arch/x86/include/asm/uv/uv_hub.h | 2 - trunk/arch/x86/include/asm/uv/uv_mmrs.h | 16 +- trunk/arch/x86/include/asm/x86_init.h | 12 - trunk/arch/x86/include/asm/xen/page.h | 5 +- trunk/arch/x86/include/asm/xen/pci.h | 16 - trunk/arch/x86/kernel/Makefile | 2 +- trunk/arch/x86/kernel/acpi/sleep.c | 5 + trunk/arch/x86/kernel/alternative.c | 11 +- trunk/arch/x86/kernel/amd_iommu.c | 526 +++--- trunk/arch/x86/kernel/amd_iommu_init.c | 48 +- trunk/arch/x86/kernel/apb_timer.c | 10 +- trunk/arch/x86/kernel/apic/io_apic.c | 10 +- trunk/arch/x86/kernel/apic/x2apic_uv_x.c | 48 +- trunk/arch/x86/kernel/apm_32.c | 4 + trunk/arch/x86/kernel/cpu/Makefile | 1 + trunk/arch/x86/kernel/cpu/amd.c | 2 +- trunk/arch/x86/kernel/cpu/common.c | 3 +- .../x86/kernel/cpu/cpufreq/Kconfig} | 13 +- trunk/arch/x86/kernel/cpu/cpufreq/Makefile | 21 + .../x86/kernel/cpu}/cpufreq/acpi-cpufreq.c | 45 +- .../x86/kernel/cpu}/cpufreq/cpufreq-nforce2.c | 6 +- .../x86/kernel/cpu}/cpufreq/e_powersaver.c | 0 .../x86/kernel/cpu}/cpufreq/elanfreq.c | 0 .../x86/kernel/cpu}/cpufreq/gx-suspmod.c | 21 +- .../x86/kernel/cpu}/cpufreq/longhaul.c | 11 +- .../x86/kernel/cpu}/cpufreq/longhaul.h | 0 .../x86/kernel/cpu}/cpufreq/longrun.c | 17 +- .../x86/kernel/cpu}/cpufreq/mperf.c | 0 .../x86/kernel/cpu}/cpufreq/mperf.h | 0 .../x86/kernel/cpu}/cpufreq/p4-clockmod.c | 10 +- .../x86/kernel/cpu}/cpufreq/pcc-cpufreq.c | 51 +- .../x86/kernel/cpu}/cpufreq/powernow-k6.c | 0 .../x86/kernel/cpu}/cpufreq/powernow-k7.c | 33 +- .../x86/kernel/cpu}/cpufreq/powernow-k7.h | 0 .../x86/kernel/cpu}/cpufreq/powernow-k8.c | 100 +- .../x86/kernel/cpu}/cpufreq/powernow-k8.h | 2 + .../x86/kernel/cpu}/cpufreq/sc520_freq.c | 6 +- .../kernel/cpu}/cpufreq/speedstep-centrino.c | 23 +- .../x86/kernel/cpu}/cpufreq/speedstep-ich.c | 28 +- .../x86/kernel/cpu}/cpufreq/speedstep-lib.c | 43 +- .../x86/kernel/cpu}/cpufreq/speedstep-lib.h | 0 .../x86/kernel/cpu}/cpufreq/speedstep-smi.c | 41 +- trunk/arch/x86/kernel/cpu/intel.c | 19 +- trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c | 1 - .../arch/x86/kernel/cpu/mcheck/therm_throt.c | 12 +- trunk/arch/x86/kernel/cpu/perf_event.c | 44 +- trunk/arch/x86/kernel/cpu/perf_event_amd.c | 14 +- trunk/arch/x86/kernel/cpu/perf_event_intel.c | 153 +- trunk/arch/x86/kernel/cpu/perf_event_p4.c | 24 +- trunk/arch/x86/kernel/devicetree.c | 2 +- trunk/arch/x86/kernel/dumpstack.c | 16 + trunk/arch/x86/kernel/hpet.c | 72 +- trunk/arch/x86/kernel/i8253.c | 86 +- trunk/arch/x86/kernel/kprobes.c | 5 +- trunk/arch/x86/kernel/kvmclock.c | 6 +- trunk/arch/x86/kernel/module.c | 1 - .../kernel/{amd_gart_64.c => pci-gart_64.c} | 0 trunk/arch/x86/kernel/pci-iommu_table.c | 18 +- trunk/arch/x86/kernel/ptrace.c | 36 +- trunk/arch/x86/kernel/reboot_32.S | 12 +- trunk/arch/x86/kernel/smp.c | 5 +- trunk/arch/x86/kernel/stacktrace.c | 13 + trunk/arch/x86/kernel/x86_init.c | 4 - trunk/arch/x86/lguest/boot.c | 6 +- trunk/arch/x86/lib/clear_page_64.S | 33 +- trunk/arch/x86/lib/copy_user_64.S | 69 +- trunk/arch/x86/lib/memcpy_64.S | 45 +- trunk/arch/x86/lib/memmove_64.S | 29 +- trunk/arch/x86/lib/memset_64.S | 54 +- trunk/arch/x86/mm/init.c | 24 +- trunk/arch/x86/mm/numa_64.c | 2 +- trunk/arch/x86/oprofile/backtrace.c | 13 + trunk/arch/x86/pci/xen.c | 96 +- .../arch/x86/platform/ce4100/falconfalls.dts | 6 +- trunk/arch/x86/platform/uv/tlb_uv.c | 92 +- trunk/arch/x86/platform/uv/uv_time.c | 6 +- trunk/arch/x86/xen/enlighten.c | 20 +- trunk/arch/x86/xen/irq.c | 2 +- trunk/arch/x86/xen/mmu.c | 53 +- trunk/arch/x86/xen/p2m.c | 43 +- trunk/arch/x86/xen/setup.c | 10 +- trunk/arch/x86/xen/smp.c | 13 +- trunk/arch/x86/xen/time.c | 14 +- trunk/arch/x86/xen/xen-ops.h | 2 +- trunk/block/blk-cgroup.c | 7 - trunk/block/blk-cgroup.h | 3 - trunk/block/blk-core.c | 4 +- trunk/block/blk-throttle.c | 9 +- trunk/block/cfq-iosched.c | 11 +- trunk/drivers/Kconfig | 3 - trunk/drivers/acpi/processor_perflib.c | 6 +- trunk/drivers/acpi/scan.c | 4 - trunk/drivers/ata/libahci.c | 21 + trunk/drivers/ata/libata-eh.c | 2 +- trunk/drivers/atm/fore200e.c | 7 +- trunk/drivers/base/Kconfig | 7 + trunk/drivers/base/base.h | 2 + trunk/drivers/base/dd.c | 6 +- trunk/drivers/base/firmware_class.c | 5 - trunk/drivers/base/platform.c | 138 +- trunk/drivers/base/power/Makefile | 4 +- trunk/drivers/base/power/clock_ops.c | 431 ----- trunk/drivers/base/power/generic_ops.c | 39 - trunk/drivers/base/power/main.c | 83 +- trunk/drivers/base/power/runtime.c | 29 +- trunk/drivers/base/power/sysfs.c | 4 +- trunk/drivers/base/power/wakeup.c | 3 +- trunk/drivers/base/sys.c | 202 +- trunk/drivers/block/DAC960.c | 1 + trunk/drivers/block/amiflop.c | 1 + trunk/drivers/block/ataflop.c | 1 + trunk/drivers/block/floppy.c | 1 + trunk/drivers/block/paride/pcd.c | 1 + trunk/drivers/block/paride/pd.c | 1 + trunk/drivers/block/paride/pf.c | 1 + trunk/drivers/block/rbd.c | 181 +- trunk/drivers/block/swim.c | 1 + trunk/drivers/block/swim3.c | 1 + trunk/drivers/block/ub.c | 1 + trunk/drivers/block/xsysace.c | 1 + trunk/drivers/cdrom/cdrom.c | 6 +- trunk/drivers/cdrom/gdrom.c | 1 + trunk/drivers/cdrom/viocd.c | 1 + trunk/drivers/char/hpet.c | 6 +- trunk/drivers/char/hw_random/n2-drv.c | 7 +- trunk/drivers/char/ipmi/ipmi_si_intf.c | 7 +- .../char/xilinx_hwicap/xilinx_hwicap.c | 14 +- trunk/drivers/clk/clkdev.c | 19 +- trunk/drivers/clocksource/Kconfig | 2 - trunk/drivers/clocksource/Makefile | 1 - trunk/drivers/clocksource/cyclone.c | 10 +- trunk/drivers/clocksource/i8253.c | 88 - trunk/drivers/cpufreq/Kconfig | 23 +- trunk/drivers/cpufreq/Makefile | 26 - trunk/drivers/cpufreq/cpufreq.c | 215 ++- trunk/drivers/cpufreq/cpufreq_performance.c | 5 +- trunk/drivers/cpufreq/cpufreq_powersave.c | 5 +- trunk/drivers/cpufreq/cpufreq_stats.c | 24 +- trunk/drivers/cpufreq/cpufreq_userspace.c | 13 +- trunk/drivers/cpufreq/freq_table.c | 19 +- trunk/drivers/edac/ppc4xx_edac.c | 2 +- trunk/drivers/firewire/ohci.c | 39 +- trunk/drivers/firmware/iscsi_ibft_find.c | 51 +- trunk/drivers/gpu/drm/Kconfig | 1 - trunk/drivers/gpu/drm/drm_fb_helper.c | 53 +- trunk/drivers/gpu/drm/drm_irq.c | 23 - trunk/drivers/gpu/drm/drm_mm.c | 6 +- trunk/drivers/gpu/drm/i915/i915_dma.c | 2 +- trunk/drivers/gpu/drm/i915/i915_drv.c | 2 +- trunk/drivers/gpu/drm/i915/intel_display.c | 8 +- trunk/drivers/gpu/drm/i915/intel_dp.c | 17 +- trunk/drivers/gpu/drm/i915/intel_drv.h | 1 - trunk/drivers/gpu/drm/i915/intel_fb.c | 10 - trunk/drivers/gpu/drm/i915/intel_lvds.c | 3 - trunk/drivers/gpu/drm/nouveau/nouveau_mem.c | 2 + trunk/drivers/gpu/drm/nouveau/nouveau_sgdma.c | 3 +- trunk/drivers/gpu/drm/nouveau/nouveau_state.c | 5 - trunk/drivers/gpu/drm/radeon/evergreen.c | 22 +- trunk/drivers/gpu/drm/radeon/evergreend.h | 6 - trunk/drivers/gpu/drm/radeon/ni.c | 18 +- .../drivers/gpu/drm/radeon/radeon_atombios.c | 23 +- .../gpu/drm/radeon/radeon_atpx_handler.c | 29 +- trunk/drivers/gpu/drm/radeon/radeon_cursor.c | 6 +- trunk/drivers/gpu/drm/radeon/radeon_gart.c | 6 +- trunk/drivers/gpu/drm/radeon/radeon_kms.c | 16 - trunk/drivers/gpu/drm/radeon/reg_srcs/cayman | 1 - .../drivers/gpu/drm/radeon/reg_srcs/evergreen | 1 - trunk/drivers/gpu/drm/radeon/reg_srcs/r600 | 1 - trunk/drivers/gpu/vga/vga_switcheroo.c | 6 +- trunk/drivers/hwmon/Kconfig | 11 +- trunk/drivers/hwmon/lm85.c | 6 +- trunk/drivers/hwmon/lm90.c | 22 +- trunk/drivers/hwmon/twl4030-madc-hwmon.c | 3 +- trunk/drivers/i2c/busses/i2c-i801.c | 5 - trunk/drivers/i2c/busses/i2c-mpc.c | 9 +- trunk/drivers/i2c/busses/i2c-parport.c | 27 +- trunk/drivers/i2c/busses/i2c-pnx.c | 2 +- trunk/drivers/infiniband/core/cma.c | 207 +- trunk/drivers/infiniband/core/iwcm.c | 2 +- trunk/drivers/infiniband/core/ucma.c | 7 - trunk/drivers/infiniband/hw/cxgb4/cm.c | 46 +- trunk/drivers/infiniband/hw/cxgb4/device.c | 115 +- trunk/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 36 +- trunk/drivers/infiniband/hw/cxgb4/provider.c | 2 + trunk/drivers/infiniband/hw/cxgb4/qp.c | 3 +- .../infiniband/hw/ipath/ipath_driver.c | 9 +- trunk/drivers/infiniband/hw/nes/nes_cm.c | 16 +- trunk/drivers/infiniband/hw/nes/nes_verbs.c | 2 +- trunk/drivers/infiniband/hw/qib/qib_iba6120.c | 2 +- trunk/drivers/infiniband/hw/qib/qib_iba7220.c | 2 +- trunk/drivers/infiniband/hw/qib/qib_iba7322.c | 5 +- trunk/drivers/infiniband/hw/qib/qib_pcie.c | 5 +- trunk/drivers/input/keyboard/atakbd.c | 5 +- trunk/drivers/input/mouse/atarimouse.c | 15 +- trunk/drivers/input/touchscreen/ads7846.c | 13 +- trunk/drivers/input/touchscreen/wm831x-ts.c | 75 +- trunk/drivers/leds/leds-lm3530.c | 1 - trunk/drivers/lguest/Kconfig | 6 +- trunk/drivers/lguest/Makefile | 2 +- .../media/common/tuners/tda18271-common.c | 11 +- .../drivers/media/common/tuners/tda18271-fe.c | 21 +- .../media/common/tuners/tda18271-maps.c | 12 +- trunk/drivers/media/dvb/b2c2/flexcop-pci.c | 2 +- trunk/drivers/media/dvb/dvb-usb/Kconfig | 6 +- .../media/dvb/dvb-usb/dib0700_devices.c | 6 +- trunk/drivers/media/dvb/ngene/ngene-core.c | 1 - trunk/drivers/media/media-entity.c | 8 +- trunk/drivers/media/radio/radio-sf16fmr2.c | 2 +- trunk/drivers/media/radio/saa7706h.c | 2 +- trunk/drivers/media/radio/tef6862.c | 2 +- trunk/drivers/media/rc/imon.c | 31 +- trunk/drivers/media/rc/ite-cir.c | 1 - trunk/drivers/media/rc/mceusb.c | 2 - trunk/drivers/media/rc/rc-main.c | 4 +- trunk/drivers/media/video/Kconfig | 2 +- trunk/drivers/media/video/cx18/cx18-streams.c | 10 +- trunk/drivers/media/video/cx23885/Kconfig | 1 - trunk/drivers/media/video/cx88/cx88-input.c | 2 +- trunk/drivers/media/video/imx074.c | 2 +- trunk/drivers/media/video/m52790.c | 2 +- trunk/drivers/media/video/omap3isp/isp.c | 34 +- trunk/drivers/media/video/omap3isp/isp.h | 12 +- trunk/drivers/media/video/omap3isp/ispccdc.c | 37 +- .../drivers/media/video/omap3isp/isppreview.c | 2 +- trunk/drivers/media/video/omap3isp/ispqueue.c | 6 +- .../drivers/media/video/omap3isp/ispresizer.c | 75 +- trunk/drivers/media/video/omap3isp/ispstat.h | 6 +- trunk/drivers/media/video/omap3isp/ispvideo.c | 108 +- trunk/drivers/media/video/omap3isp/ispvideo.h | 3 - .../media/video/s5p-fimc/fimc-capture.c | 8 +- .../drivers/media/video/s5p-fimc/fimc-core.c | 74 +- .../media/video/sh_mobile_ceu_camera.c | 10 +- trunk/drivers/media/video/sh_mobile_csi2.c | 11 +- trunk/drivers/media/video/soc_camera.c | 55 +- trunk/drivers/media/video/tda9840.c | 2 +- trunk/drivers/media/video/tea6415c.c | 2 +- trunk/drivers/media/video/tea6420.c | 2 +- trunk/drivers/media/video/upd64031a.c | 2 +- trunk/drivers/media/video/upd64083.c | 2 +- trunk/drivers/media/video/v4l2-dev.c | 15 +- trunk/drivers/media/video/v4l2-device.c | 5 +- trunk/drivers/media/video/v4l2-subdev.c | 14 +- trunk/drivers/media/video/videobuf2-core.c | 17 +- .../media/video/videobuf2-dma-contig.c | 2 +- trunk/drivers/message/i2o/i2o_block.c | 1 + trunk/drivers/mfd/asic3.c | 2 +- trunk/drivers/mfd/omap-usb-host.c | 17 +- trunk/drivers/mfd/twl4030-power.c | 3 +- trunk/drivers/mmc/core/bus.c | 1 - trunk/drivers/mmc/host/omap.c | 2 +- trunk/drivers/mmc/host/sdhci-of-core.c | 7 +- trunk/drivers/mmc/host/sdhci-pci.c | 1 - trunk/drivers/mmc/host/sdhci.c | 9 +- trunk/drivers/mmc/host/tmio_mmc_pio.c | 10 +- trunk/drivers/mtd/maps/Kconfig | 7 - trunk/drivers/mtd/maps/Makefile | 1 - trunk/drivers/mtd/maps/lantiq-flash.c | 251 --- trunk/drivers/mtd/maps/physmap_of.c | 7 +- trunk/drivers/mtd/nand/au1550nd.c | 3 +- trunk/drivers/mtd/nand/diskonchip.c | 2 +- trunk/drivers/net/Kconfig | 15 +- trunk/drivers/net/Makefile | 7 +- trunk/drivers/net/amd8111e.c | 2 +- trunk/drivers/net/arm/etherh.c | 4 +- trunk/drivers/net/atarilance.c | 2 +- trunk/drivers/net/atl1c/atl1c.h | 6 +- trunk/drivers/net/atl1c/atl1c_main.c | 14 +- trunk/drivers/net/benet/be.h | 2 +- trunk/drivers/net/benet/be_cmds.c | 2 +- trunk/drivers/net/benet/be_main.c | 19 +- trunk/drivers/net/bnx2.c | 2 - trunk/drivers/net/bnx2x/bnx2x_cmn.c | 34 +- trunk/drivers/net/bonding/bond_3ad.c | 7 +- trunk/drivers/net/bonding/bond_3ad.h | 10 +- trunk/drivers/net/can/mscan/mpc5xxx_can.c | 7 +- trunk/drivers/net/can/sja1000/sja1000.c | 2 +- trunk/drivers/net/can/slcan.c | 4 +- trunk/drivers/net/ehea/ehea_ethtool.c | 21 +- trunk/drivers/net/ehea/ehea_main.c | 15 +- trunk/drivers/net/fs_enet/fs_enet-main.c | 9 +- trunk/drivers/net/fs_enet/mac-fec.c | 8 +- trunk/drivers/net/fs_enet/mii-fec.c | 7 +- trunk/drivers/net/ftmac100.c | 8 +- trunk/drivers/net/hydra.c | 14 +- trunk/drivers/net/lantiq_etop.c | 805 -------- trunk/drivers/net/mii.c | 4 - trunk/drivers/net/ne-h8300.c | 16 +- trunk/drivers/net/netconsole.c | 8 - trunk/drivers/net/pch_gbe/pch_gbe_main.c | 23 +- trunk/drivers/net/r8169.c | 99 +- trunk/drivers/net/sfc/mcdi.c | 49 +- trunk/drivers/net/sfc/nic.c | 7 - trunk/drivers/net/sfc/nic.h | 2 - trunk/drivers/net/sfc/siena.c | 25 +- trunk/drivers/net/slip.c | 4 +- trunk/drivers/net/sunhme.c | 7 +- trunk/drivers/net/tg3.c | 8 +- trunk/drivers/net/usb/cdc_ether.c | 16 +- trunk/drivers/net/usb/cdc_ncm.c | 4 +- trunk/drivers/net/usb/ipheth.c | 14 +- trunk/drivers/net/usb/smsc95xx.c | 2 +- trunk/drivers/net/usb/usbnet.c | 18 +- trunk/drivers/net/veth.c | 12 - trunk/drivers/net/vmxnet3/vmxnet3_drv.c | 10 +- trunk/drivers/net/vmxnet3/vmxnet3_ethtool.c | 3 - trunk/drivers/net/wireless/ath/ath9k/main.c | 8 - trunk/drivers/net/wireless/ath/ath9k/recv.c | 2 +- trunk/drivers/net/wireless/b43/main.c | 1 - .../net/wireless/iwlegacy/iwl-4965-tx.c | 28 +- .../drivers/net/wireless/iwlegacy/iwl-core.c | 7 - trunk/drivers/net/wireless/iwlegacy/iwl-dev.h | 6 - trunk/drivers/net/wireless/iwlegacy/iwl-led.c | 20 +- .../net/wireless/iwlegacy/iwl4965-base.c | 8 +- .../net/wireless/iwlwifi/iwl-agn-rxon.c | 7 +- .../drivers/net/wireless/iwlwifi/iwl-agn-tx.c | 27 +- trunk/drivers/net/wireless/libertas/cmd.c | 6 +- trunk/drivers/net/zorro8390.c | 12 +- trunk/drivers/pci/intel-iommu.c | 1 - trunk/drivers/pci/iov.c | 1 - trunk/drivers/pci/pci.h | 37 + trunk/drivers/pci/setup-bus.c | 4 +- trunk/drivers/pcmcia/pcmcia_resource.c | 2 +- trunk/drivers/platform/x86/eeepc-laptop.c | 57 +- trunk/drivers/platform/x86/sony-laptop.c | 130 +- trunk/drivers/platform/x86/thinkpad_acpi.c | 6 +- trunk/drivers/rapidio/switches/idt_gen2.c | 9 - trunk/drivers/rapidio/switches/idtcps.c | 6 - trunk/drivers/rapidio/switches/tsi57x.c | 6 - trunk/drivers/rtc/class.c | 23 +- trunk/drivers/rtc/rtc-davinci.c | 5 +- trunk/drivers/rtc/rtc-ds1286.c | 2 +- trunk/drivers/rtc/rtc-ep93xx.c | 5 +- trunk/drivers/rtc/rtc-m41t80.c | 5 +- trunk/drivers/rtc/rtc-max8925.c | 8 +- trunk/drivers/rtc/rtc-max8998.c | 5 +- trunk/drivers/rtc/rtc-mc13xxx.c | 8 +- trunk/drivers/rtc/rtc-msm6242.c | 3 +- trunk/drivers/rtc/rtc-mxc.c | 19 +- trunk/drivers/rtc/rtc-pcap.c | 4 +- trunk/drivers/rtc/rtc-rp5c01.c | 5 +- trunk/drivers/rtc/rtc-s3c.c | 13 +- trunk/drivers/s390/block/dasd.c | 11 +- trunk/drivers/s390/block/dasd_diag.c | 2 +- trunk/drivers/s390/char/sclp_cmd.c | 2 - trunk/drivers/s390/char/tape_block.c | 1 + trunk/drivers/s390/kvm/kvm_virtio.c | 2 +- trunk/drivers/scsi/device_handler/scsi_dh.c | 9 +- trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.c | 23 +- trunk/drivers/scsi/pmcraid.c | 3 - trunk/drivers/scsi/qlogicpti.c | 7 +- trunk/drivers/scsi/scsi_lib.c | 27 +- trunk/drivers/scsi/scsi_scan.c | 2 - trunk/drivers/scsi/scsi_sysfs.c | 16 +- trunk/drivers/ssb/pci.c | 16 +- trunk/drivers/ssb/sprom.c | 43 +- trunk/drivers/ssb/ssb_private.h | 3 +- .../staging/ft1000/ft1000-pcmcia/ft1000_hw.c | 4 + .../ft1000/ft1000-pcmcia/ft1000_proc.c | 3 + trunk/drivers/staging/gma500/Kconfig | 2 +- .../staging/intel_sst/intelmid_v1_control.c | 1 - .../staging/intel_sst/intelmid_v2_control.c | 1 - .../staging/olpc_dcon/olpc_dcon_xo_1.c | 1 - .../staging/rt2860/common/cmm_data_pci.c | 2 +- .../staging/rt2860/common/cmm_data_usb.c | 2 +- trunk/drivers/staging/rts_pstor/debug.h | 2 +- trunk/drivers/staging/rts_pstor/ms.c | 1 - trunk/drivers/staging/rts_pstor/rtsx_chip.c | 5 +- trunk/drivers/staging/rts_pstor/rtsx_scsi.c | 1 - trunk/drivers/staging/rts_pstor/sd.c | 4 +- trunk/drivers/staging/rts_pstor/trace.h | 2 +- trunk/drivers/staging/rts_pstor/xd.c | 1 - trunk/drivers/staging/solo6x10/Kconfig | 1 - trunk/drivers/staging/spectra/ffsport.c | 2 +- .../staging/tidspbridge/dynload/cload.c | 2 +- trunk/drivers/staging/tty/specialix.c | 2 +- trunk/drivers/staging/usbip/vhci_hcd.c | 11 +- trunk/drivers/staging/usbip/vhci_sysfs.c | 7 +- trunk/drivers/staging/wlan-ng/cfg80211.c | 2 +- trunk/drivers/tty/serial/Kconfig | 8 - trunk/drivers/tty/serial/Makefile | 1 - trunk/drivers/tty/serial/lantiq.c | 756 -------- trunk/drivers/tty/serial/of_serial.c | 7 +- trunk/drivers/usb/gadget/fsl_qe_udc.c | 7 +- trunk/drivers/usb/host/ehci-omap.c | 20 - trunk/drivers/usb/host/isp1760-hcd.c | 1 - trunk/drivers/usb/host/xhci-hub.c | 19 +- trunk/drivers/usb/musb/musb_gadget.c | 6 +- trunk/drivers/usb/musb/omap2430.c | 2 +- trunk/drivers/vhost/vhost.c | 2 +- trunk/drivers/video/acornfb.c | 26 +- trunk/drivers/video/atafb.c | 2 +- trunk/drivers/video/fbmem.c | 223 +-- trunk/drivers/watchdog/Kconfig | 6 - trunk/drivers/watchdog/Makefile | 1 - trunk/drivers/watchdog/lantiq_wdt.c | 261 --- trunk/drivers/watchdog/mpc8xxx_wdt.c | 7 +- trunk/drivers/watchdog/mtx-1_wdt.c | 21 +- trunk/drivers/xen/Makefile | 24 +- trunk/drivers/xen/balloon.c | 25 +- trunk/drivers/xen/events.c | 152 +- trunk/drivers/xen/gntalloc.c | 14 +- trunk/drivers/xen/gntdev.c | 16 +- trunk/drivers/xen/grant-table.c | 31 +- trunk/drivers/xen/manage.c | 8 +- trunk/drivers/xen/sys-hypervisor.c | 2 +- trunk/fs/block_dev.c | 27 +- trunk/fs/btrfs/acl.c | 5 +- trunk/fs/btrfs/ctree.h | 2 +- trunk/fs/btrfs/extent-tree.c | 37 +- trunk/fs/btrfs/ioctl.c | 24 +- trunk/fs/ceph/addr.c | 7 - trunk/fs/ceph/caps.c | 30 +- trunk/fs/ceph/file.c | 5 +- trunk/fs/ceph/inode.c | 7 +- trunk/fs/ceph/mds_client.c | 2 +- trunk/fs/ceph/snap.c | 2 +- trunk/fs/ceph/super.h | 4 +- trunk/fs/ceph/xattr.c | 12 +- trunk/fs/cifs/cifs_unicode.c | 14 +- trunk/fs/cifs/connect.c | 125 +- trunk/fs/cifs/sess.c | 19 +- trunk/fs/configfs/dir.c | 39 +- trunk/fs/debugfs/file.c | 17 +- trunk/fs/file.c | 18 +- trunk/fs/fuse/dir.c | 2 +- trunk/fs/hpfs/Kconfig | 1 + trunk/fs/hpfs/alloc.c | 118 +- trunk/fs/hpfs/anode.c | 138 +- trunk/fs/hpfs/buffer.c | 24 +- trunk/fs/hpfs/dir.c | 22 +- trunk/fs/hpfs/dnode.c | 174 +- trunk/fs/hpfs/ea.c | 136 +- trunk/fs/hpfs/file.c | 31 +- trunk/fs/hpfs/hpfs.h | 439 ++--- trunk/fs/hpfs/hpfs_fn.h | 80 +- trunk/fs/hpfs/inode.c | 47 +- trunk/fs/hpfs/map.c | 56 +- trunk/fs/hpfs/name.c | 33 + trunk/fs/hpfs/namei.c | 106 +- trunk/fs/hpfs/super.c | 118 +- trunk/fs/logfs/super.c | 8 +- trunk/fs/namei.c | 2 +- trunk/fs/nfs/namespace.c | 4 +- trunk/fs/nfs/nfs4_fs.h | 1 - trunk/fs/nfs/nfs4filelayout.c | 27 +- trunk/fs/nfs/nfs4filelayout.h | 2 +- trunk/fs/nfs/nfs4filelayoutdev.c | 34 +- trunk/fs/nfs/nfs4proc.c | 124 +- trunk/fs/nfs/nfs4state.c | 51 +- trunk/fs/nfs/nfs4xdr.c | 53 +- trunk/fs/nfs/pnfs.c | 42 +- trunk/fs/nfs/pnfs.h | 6 +- trunk/fs/nfs/read.c | 4 +- trunk/fs/nfs/super.c | 13 +- trunk/fs/nfs/write.c | 8 +- trunk/fs/nilfs2/alloc.c | 2 +- trunk/fs/ocfs2/cluster/heartbeat.c | 61 +- trunk/fs/ocfs2/dir.c | 2 +- trunk/fs/ocfs2/dlm/dlmdomain.c | 3 +- trunk/fs/ocfs2/dlm/dlmmaster.c | 3 - trunk/fs/ocfs2/file.c | 12 - trunk/fs/ocfs2/journal.c | 3 - trunk/fs/ocfs2/ocfs2_fs.h | 2 +- trunk/fs/partitions/efi.c | 6 - trunk/fs/proc/task_mmu.c | 12 +- trunk/fs/ubifs/log.c | 20 + trunk/fs/ubifs/replay.c | 18 +- trunk/fs/ubifs/super.c | 15 +- trunk/fs/xfs/linux-2.6/xfs_sync.c | 1 - trunk/fs/xfs/xfs_trans_ail.c | 47 +- trunk/include/asm-generic/vmlinux.lds.h | 34 +- trunk/include/drm/drm_fb_helper.h | 3 +- trunk/include/drm/drm_mm.h | 2 +- trunk/include/drm/drm_pciids.h | 5 - trunk/include/drm/radeon_drm.h | 2 - trunk/include/linux/bootmem.h | 2 - trunk/include/linux/bsearch.h | 9 - trunk/include/linux/capability.h | 13 +- trunk/include/linux/clockchips.h | 56 +- trunk/include/linux/clocksource.h | 34 +- trunk/include/linux/cpufreq.h | 52 +- trunk/include/linux/cred.h | 10 +- trunk/include/linux/device.h | 8 + trunk/include/linux/dynamic_debug.h | 2 + trunk/include/linux/fb.h | 1 - trunk/include/linux/flex_array.h | 2 +- trunk/include/linux/fs.h | 1 + trunk/include/linux/ftrace.h | 35 +- trunk/include/linux/ftrace_event.h | 1 - trunk/include/linux/gfp.h | 2 - trunk/include/linux/huge_mm.h | 2 +- trunk/include/linux/init.h | 14 +- trunk/include/linux/init_task.h | 1 + trunk/include/linux/irq.h | 179 +- trunk/include/linux/irqdesc.h | 11 +- trunk/include/linux/jump_label.h | 89 +- trunk/include/linux/jump_label_ref.h | 44 + trunk/include/linux/kernel.h | 1 - trunk/include/linux/kmod.h | 1 - trunk/include/linux/list.h | 35 +- trunk/include/linux/mfd/wm831x/pdata.h | 2 - trunk/include/linux/mm.h | 27 +- trunk/include/linux/module.h | 37 +- trunk/include/linux/moduleparam.h | 7 +- trunk/include/linux/mutex.h | 2 +- trunk/include/linux/nfs_fs_sb.h | 1 - trunk/include/linux/nfs_xdr.h | 3 - trunk/include/linux/of_device.h | 8 +- trunk/include/linux/pci-ats.h | 52 - trunk/include/linux/pci_ids.h | 4 + trunk/include/linux/percpu.h | 2 +- trunk/include/linux/perf_event.h | 121 +- trunk/include/linux/platform_device.h | 63 +- trunk/include/linux/pm.h | 39 +- trunk/include/linux/pm_runtime.h | 42 - trunk/include/linux/proc_fs.h | 2 - trunk/include/linux/ptrace.h | 13 +- trunk/include/linux/rculist.h | 16 +- trunk/include/linux/sched.h | 64 +- trunk/include/linux/seqlock.h | 4 +- trunk/include/linux/ssb/ssb.h | 4 +- trunk/include/linux/string.h | 1 - trunk/include/linux/sunrpc/sched.h | 5 +- trunk/include/linux/sysdev.h | 11 + trunk/include/linux/time.h | 1 + trunk/include/linux/tracepoint.h | 22 +- trunk/include/linux/usb/usbnet.h | 1 - trunk/include/linux/v4l2-mediabus.h | 7 +- trunk/include/linux/videodev2.h | 1 - trunk/include/media/v4l2-device.h | 2 +- trunk/include/net/inet_ecn.h | 16 +- trunk/include/net/ip_vs.h | 17 - trunk/include/net/llc_pdu.h | 8 +- trunk/include/net/xfrm.h | 3 - trunk/include/rdma/iw_cm.h | 11 +- trunk/include/rdma/rdma_cm.h | 10 - trunk/include/rdma/rdma_user_cm.h | 5 +- trunk/include/scsi/scsi_device.h | 1 - trunk/include/trace/events/gfpflags.h | 6 +- trunk/include/xen/events.h | 9 +- trunk/init/Kconfig | 21 +- trunk/init/main.c | 2 +- trunk/kernel/Makefile | 6 +- trunk/kernel/capability.c | 12 - trunk/kernel/cpuset.c | 2 +- trunk/kernel/cred.c | 12 +- trunk/kernel/events/Makefile | 6 - trunk/kernel/exit.c | 2 +- trunk/kernel/extable.c | 8 - trunk/kernel/fork.c | 5 +- trunk/kernel/freezer.c | 4 +- trunk/kernel/hrtimer.c | 10 +- trunk/kernel/hung_task.c | 2 +- trunk/kernel/{events => }/hw_breakpoint.c | 0 trunk/kernel/irq/Kconfig | 4 - trunk/kernel/irq/Makefile | 1 - trunk/kernel/irq/chip.c | 3 - trunk/kernel/irq/debug.h | 1 - trunk/kernel/irq/generic-chip.c | 354 ---- trunk/kernel/irq/irqdesc.c | 22 +- trunk/kernel/irq/manage.c | 3 +- trunk/kernel/irq/proc.c | 2 +- trunk/kernel/irq/settings.h | 17 - trunk/kernel/jump_label.c | 539 +++--- trunk/kernel/kexec.c | 9 +- trunk/kernel/kmod.c | 16 +- trunk/kernel/lockdep.c | 206 +- trunk/kernel/module.c | 105 +- trunk/kernel/mutex-debug.c | 2 +- trunk/kernel/mutex-debug.h | 2 +- trunk/kernel/mutex.c | 9 +- trunk/kernel/mutex.h | 2 +- trunk/kernel/params.c | 23 +- trunk/kernel/{events/core.c => perf_event.c} | 44 +- trunk/kernel/power/Kconfig | 10 +- trunk/kernel/power/hibernate.c | 58 +- trunk/kernel/power/main.c | 1 - trunk/kernel/power/power.h | 4 - trunk/kernel/power/snapshot.c | 33 +- trunk/kernel/power/suspend.c | 14 +- trunk/kernel/power/user.c | 5 +- trunk/kernel/ptrace.c | 17 - trunk/kernel/sched.c | 1658 ++++++++++------- trunk/kernel/sched_debug.c | 6 +- trunk/kernel/sched_fair.c | 126 +- trunk/kernel/sched_features.h | 6 - trunk/kernel/sched_idletask.c | 2 +- trunk/kernel/sched_rt.c | 83 +- trunk/kernel/sched_stoptask.c | 5 +- trunk/kernel/sys.c | 3 + trunk/kernel/time/clockevents.c | 64 - trunk/kernel/time/clocksource.c | 42 +- trunk/kernel/time/tick-broadcast.c | 12 +- trunk/kernel/time/timekeeping.c | 56 +- trunk/kernel/trace/Kconfig | 2 +- trunk/kernel/trace/ftrace.c | 1287 ++++--------- trunk/kernel/trace/trace.c | 16 +- trunk/kernel/trace/trace.h | 2 - trunk/kernel/trace/trace_events.c | 1 - trunk/kernel/trace/trace_functions.c | 2 - trunk/kernel/trace/trace_irqsoff.c | 1 - trunk/kernel/trace/trace_kprobe.c | 1 + trunk/kernel/trace/trace_output.c | 3 - trunk/kernel/trace/trace_printk.c | 120 +- trunk/kernel/trace/trace_sched_wakeup.c | 1 - trunk/kernel/trace/trace_selftest.c | 214 +-- trunk/kernel/trace/trace_selftest_dynamic.c | 6 - trunk/kernel/trace/trace_stack.c | 1 - trunk/kernel/tracepoint.c | 23 +- trunk/kernel/watchdog.c | 5 +- trunk/kernel/workqueue.c | 8 +- trunk/lib/Kconfig.debug | 19 +- trunk/lib/Makefile | 3 +- trunk/lib/bsearch.c | 53 - trunk/lib/dma-debug.c | 18 +- trunk/lib/flex_array.c | 24 +- trunk/lib/string.c | 29 - trunk/lib/vsprintf.c | 2 +- trunk/lib/xz/xz_dec_lzma2.c | 6 +- trunk/mm/huge_memory.c | 43 +- trunk/mm/kmemleak.c | 7 +- trunk/mm/memory.c | 21 +- trunk/mm/mlock.c | 5 +- trunk/mm/mmap.c | 11 +- trunk/mm/oom_kill.c | 9 +- trunk/mm/page_alloc.c | 57 +- trunk/mm/page_cgroup.c | 2 +- trunk/mm/shmem.c | 149 +- trunk/mm/slub.c | 4 +- trunk/mm/swap.c | 3 - trunk/mm/vmscan.c | 2 +- trunk/net/8021q/vlan.c | 3 - trunk/net/8021q/vlan_dev.c | 3 + trunk/net/9p/client.c | 2 +- trunk/net/9p/protocol.c | 1 - trunk/net/9p/trans_common.c | 11 +- trunk/net/bluetooth/hci_core.c | 5 +- trunk/net/bluetooth/hci_event.c | 2 + trunk/net/bluetooth/l2cap_core.c | 1 - trunk/net/bridge/br_input.c | 2 +- trunk/net/bridge/br_netfilter.c | 2 +- trunk/net/bridge/netfilter/ebtables.c | 64 +- trunk/net/can/bcm.c | 7 +- trunk/net/can/raw.c | 7 +- trunk/net/ceph/messenger.c | 26 +- trunk/net/ceph/osd_client.c | 4 +- trunk/net/core/dev.c | 38 +- trunk/net/dccp/options.c | 2 - trunk/net/dsa/Kconfig | 4 +- trunk/net/dsa/mv88e6131.c | 26 +- trunk/net/ipv4/devinet.c | 2 +- trunk/net/ipv4/fib_trie.c | 3 + trunk/net/ipv4/ip_fragment.c | 31 +- trunk/net/ipv4/route.c | 7 - trunk/net/ipv4/tcp_cubic.c | 9 +- trunk/net/ipv4/xfrm4_output.c | 8 +- trunk/net/ipv4/xfrm4_state.c | 1 - trunk/net/ipv6/addrconf.c | 2 +- trunk/net/ipv6/esp6.c | 2 +- trunk/net/ipv6/netfilter/ip6t_REJECT.c | 4 +- trunk/net/ipv6/route.c | 8 +- trunk/net/ipv6/udp.c | 2 +- trunk/net/ipv6/xfrm6_output.c | 6 +- trunk/net/ipv6/xfrm6_state.c | 1 - trunk/net/l2tp/l2tp_ip.c | 2 +- trunk/net/mac80211/cfg.c | 2 - trunk/net/mac80211/debugfs_netdev.c | 4 +- trunk/net/mac80211/tx.c | 4 - trunk/net/netfilter/ipvs/ip_vs_app.c | 17 +- trunk/net/netfilter/ipvs/ip_vs_conn.c | 16 +- trunk/net/netfilter/ipvs/ip_vs_core.c | 103 +- trunk/net/netfilter/ipvs/ip_vs_ctl.c | 126 +- trunk/net/netfilter/ipvs/ip_vs_est.c | 14 +- trunk/net/netfilter/ipvs/ip_vs_proto.c | 11 +- trunk/net/netfilter/ipvs/ip_vs_sync.c | 65 +- trunk/net/netfilter/nf_conntrack_netlink.c | 4 - trunk/net/netfilter/x_tables.c | 4 +- trunk/net/netfilter/xt_DSCP.c | 2 +- trunk/net/netfilter/xt_conntrack.c | 5 + trunk/net/sctp/ulpevent.c | 2 +- trunk/net/sunrpc/Kconfig | 9 +- trunk/net/sunrpc/auth_gss/auth_gss.c | 8 +- trunk/net/sunrpc/clnt.c | 5 +- trunk/net/sunrpc/xprt.c | 1 - trunk/net/unix/af_unix.c | 16 +- trunk/net/xfrm/xfrm_policy.c | 14 +- trunk/net/xfrm/xfrm_replay.c | 5 +- trunk/net/xfrm/xfrm_user.c | 3 - trunk/scripts/Makefile.build | 12 +- trunk/scripts/mod/modpost.c | 16 +- trunk/scripts/mod/modpost.h | 27 +- trunk/scripts/module-common.lds | 11 - trunk/scripts/recordmcount.c | 168 +- trunk/scripts/recordmcount.h | 174 +- trunk/scripts/recordmcount.pl | 5 - trunk/security/selinux/hooks.c | 3 +- trunk/security/selinux/ss/policydb.c | 10 +- trunk/sound/aoa/codecs/tas.c | 2 +- trunk/sound/pci/au88x0/au88x0_pcm.c | 7 +- trunk/sound/pci/hda/patch_realtek.c | 30 +- trunk/sound/pci/hda/patch_via.c | 10 +- trunk/sound/soc/codecs/ssm2602.c | 10 +- trunk/sound/soc/codecs/uda134x.c | 2 + trunk/sound/soc/codecs/wm8903.c | 2 +- trunk/sound/soc/davinci/davinci-mcasp.c | 19 +- trunk/sound/soc/jz4740/jz4740-i2s.c | 2 +- trunk/sound/soc/mid-x86/sst_platform.c | 6 - trunk/sound/soc/samsung/goni_wm8994.c | 8 +- trunk/sound/soc/soc-core.c | 2 - trunk/sound/usb/format.c | 4 +- trunk/sound/usb/quirks.c | 1 - .../perf/Documentation/perf-script-perl.txt | 1 + .../perf/Documentation/perf-script-python.txt | 1 + .../tools/perf/Documentation/perf-script.txt | 52 +- trunk/tools/perf/Makefile | 123 +- trunk/tools/perf/builtin-record.c | 2 +- trunk/tools/perf/builtin-script.c | 296 +-- trunk/tools/perf/builtin-stat.c | 573 +----- trunk/tools/perf/builtin-test.c | 2 +- trunk/tools/perf/builtin-top.c | 8 +- trunk/tools/perf/config/utilities.mak | 188 -- .../tools/perf/{config => }/feature-tests.mak | 16 +- trunk/tools/perf/util/evlist.c | 153 +- trunk/tools/perf/util/evlist.h | 3 +- .../perf/util/include/asm/alternative-asm.h | 8 - trunk/tools/perf/util/parse-events.c | 123 +- trunk/tools/perf/util/probe-finder.c | 73 - trunk/tools/perf/util/probe-finder.h | 2 - trunk/tools/perf/util/python.c | 5 +- trunk/tools/perf/util/session.c | 12 - trunk/tools/perf/util/session.h | 3 - trunk/tools/perf/util/symbol.c | 629 +++---- trunk/tools/perf/util/symbol.h | 78 +- 1155 files changed, 11448 insertions(+), 25157 deletions(-) delete mode 100644 trunk/Documentation/DocBook/v4l/pixfmt-y12.xml rename trunk/Documentation/{virtual => }/kvm/api.txt (100%) rename trunk/Documentation/{virtual => }/kvm/cpuid.txt (100%) rename trunk/Documentation/{virtual => }/kvm/locking.txt (100%) rename trunk/Documentation/{virtual => }/kvm/mmu.txt (100%) rename trunk/Documentation/{virtual => }/kvm/msr.txt (100%) rename trunk/Documentation/{virtual => }/kvm/ppc-pv.txt (100%) rename trunk/Documentation/{virtual => }/kvm/review-checklist.txt (95%) rename trunk/Documentation/{virtual => }/kvm/timekeeping.txt (100%) rename trunk/Documentation/{virtual => }/lguest/.gitignore (100%) rename trunk/Documentation/{virtual => }/lguest/Makefile (100%) rename trunk/Documentation/{virtual => }/lguest/extract (100%) rename trunk/Documentation/{virtual => }/lguest/lguest.c (100%) rename trunk/Documentation/{virtual => }/lguest/lguest.txt (97%) rename trunk/Documentation/{virtual => }/uml/UserModeLinux-HOWTO.txt (100%) delete mode 100644 trunk/Documentation/virtual/00-INDEX delete mode 100644 trunk/arch/arm/configs/at91x40_defconfig delete mode 100644 trunk/arch/arm/include/asm/i8253.h create mode 100644 trunk/arch/arm/mach-omap2/pm_bus.c delete mode 100644 trunk/arch/mips/configs/nlm_xlr_defconfig delete mode 100644 trunk/arch/mips/include/asm/mach-lantiq/lantiq.h delete mode 100644 trunk/arch/mips/include/asm/mach-lantiq/lantiq_platform.h delete mode 100644 trunk/arch/mips/include/asm/mach-lantiq/war.h delete mode 100644 trunk/arch/mips/include/asm/mach-lantiq/xway/irq.h delete mode 100644 trunk/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h delete mode 100644 trunk/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h delete mode 100644 trunk/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h delete mode 100644 trunk/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h delete mode 100644 trunk/arch/mips/include/asm/mach-netlogic/irq.h delete mode 100644 trunk/arch/mips/include/asm/mach-netlogic/war.h delete mode 100644 trunk/arch/mips/include/asm/netlogic/interrupt.h delete mode 100644 trunk/arch/mips/include/asm/netlogic/mips-extns.h delete mode 100644 trunk/arch/mips/include/asm/netlogic/psb-bootinfo.h delete mode 100644 trunk/arch/mips/include/asm/netlogic/xlr/gpio.h delete mode 100644 trunk/arch/mips/include/asm/netlogic/xlr/iomap.h delete mode 100644 trunk/arch/mips/include/asm/netlogic/xlr/pic.h delete mode 100644 trunk/arch/mips/include/asm/netlogic/xlr/xlr.h delete mode 100644 trunk/arch/mips/lantiq/Kconfig delete mode 100644 trunk/arch/mips/lantiq/Makefile delete mode 100644 trunk/arch/mips/lantiq/Platform delete mode 100644 trunk/arch/mips/lantiq/clk.c delete mode 100644 trunk/arch/mips/lantiq/clk.h delete mode 100644 trunk/arch/mips/lantiq/devices.c delete mode 100644 trunk/arch/mips/lantiq/devices.h delete mode 100644 trunk/arch/mips/lantiq/early_printk.c delete mode 100644 trunk/arch/mips/lantiq/irq.c delete mode 100644 trunk/arch/mips/lantiq/machtypes.h delete mode 100644 trunk/arch/mips/lantiq/prom.c delete mode 100644 trunk/arch/mips/lantiq/prom.h delete mode 100644 trunk/arch/mips/lantiq/setup.c delete mode 100644 trunk/arch/mips/lantiq/xway/Kconfig delete mode 100644 trunk/arch/mips/lantiq/xway/Makefile delete mode 100644 trunk/arch/mips/lantiq/xway/clk-ase.c delete mode 100644 trunk/arch/mips/lantiq/xway/clk-xway.c delete mode 100644 trunk/arch/mips/lantiq/xway/devices.c delete mode 100644 trunk/arch/mips/lantiq/xway/devices.h delete mode 100644 trunk/arch/mips/lantiq/xway/dma.c delete mode 100644 trunk/arch/mips/lantiq/xway/ebu.c delete mode 100644 trunk/arch/mips/lantiq/xway/gpio.c delete mode 100644 trunk/arch/mips/lantiq/xway/gpio_ebu.c delete mode 100644 trunk/arch/mips/lantiq/xway/gpio_stp.c delete mode 100644 trunk/arch/mips/lantiq/xway/mach-easy50601.c delete mode 100644 trunk/arch/mips/lantiq/xway/mach-easy50712.c delete mode 100644 trunk/arch/mips/lantiq/xway/pmu.c delete mode 100644 trunk/arch/mips/lantiq/xway/prom-ase.c delete mode 100644 trunk/arch/mips/lantiq/xway/prom-xway.c delete mode 100644 trunk/arch/mips/lantiq/xway/reset.c delete mode 100644 trunk/arch/mips/lantiq/xway/setup-ase.c delete mode 100644 trunk/arch/mips/lantiq/xway/setup-xway.c delete mode 100644 trunk/arch/mips/mm/mmap.c delete mode 100644 trunk/arch/mips/netlogic/Kconfig delete mode 100644 trunk/arch/mips/netlogic/xlr/Makefile delete mode 100644 trunk/arch/mips/netlogic/xlr/irq.c delete mode 100644 trunk/arch/mips/netlogic/xlr/platform.c delete mode 100644 trunk/arch/mips/netlogic/xlr/setup.c delete mode 100644 trunk/arch/mips/netlogic/xlr/smp.c delete mode 100644 trunk/arch/mips/netlogic/xlr/smpboot.S delete mode 100644 trunk/arch/mips/netlogic/xlr/time.c delete mode 100644 trunk/arch/mips/netlogic/xlr/xlr_console.c delete mode 100644 trunk/arch/mips/pci/ops-lantiq.c delete mode 100644 trunk/arch/mips/pci/pci-lantiq.c delete mode 100644 trunk/arch/mips/pci/pci-lantiq.h delete mode 100644 trunk/arch/mips/pci/pci-xlr.c delete mode 100644 trunk/arch/s390/include/asm/jump_label.h delete mode 100644 trunk/arch/s390/kernel/jump_label.c delete mode 100644 trunk/arch/um/sys-i386/atomic64_cx8_32.S rename trunk/{drivers/cpufreq/Kconfig.x86 => arch/x86/kernel/cpu/cpufreq/Kconfig} (97%) create mode 100644 trunk/arch/x86/kernel/cpu/cpufreq/Makefile rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/acpi-cpufreq.c (94%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/cpufreq-nforce2.c (97%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/e_powersaver.c (100%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/elanfreq.c (100%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/gx-suspmod.c (95%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/longhaul.c (98%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/longhaul.h (100%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/longrun.c (94%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/mperf.c (100%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/mperf.h (100%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/p4-clockmod.c (96%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/pcc-cpufreq.c (91%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/powernow-k6.c (100%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/powernow-k7.c (95%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/powernow-k7.h (100%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/powernow-k8.c (93%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/powernow-k8.h (98%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/sc520_freq.c (95%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/speedstep-centrino.c (96%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/speedstep-ich.c (92%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/speedstep-lib.c (90%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/speedstep-lib.h (100%) rename trunk/{drivers => arch/x86/kernel/cpu}/cpufreq/speedstep-smi.c (90%) rename trunk/arch/x86/kernel/{amd_gart_64.c => pci-gart_64.c} (100%) delete mode 100644 trunk/drivers/base/power/clock_ops.c delete mode 100644 trunk/drivers/clocksource/Kconfig delete mode 100644 trunk/drivers/clocksource/i8253.c delete mode 100644 trunk/drivers/mtd/maps/lantiq-flash.c delete mode 100644 trunk/drivers/net/lantiq_etop.c delete mode 100644 trunk/drivers/tty/serial/lantiq.c delete mode 100644 trunk/drivers/watchdog/lantiq_wdt.c delete mode 100644 trunk/include/linux/bsearch.h create mode 100644 trunk/include/linux/jump_label_ref.h delete mode 100644 trunk/include/linux/pci-ats.h delete mode 100644 trunk/kernel/events/Makefile rename trunk/kernel/{events => }/hw_breakpoint.c (100%) delete mode 100644 trunk/kernel/irq/generic-chip.c rename trunk/kernel/{events/core.c => perf_event.c} (99%) delete mode 100644 trunk/lib/bsearch.c delete mode 100644 trunk/tools/perf/config/utilities.mak rename trunk/tools/perf/{config => }/feature-tests.mak (86%) delete mode 100644 trunk/tools/perf/util/include/asm/alternative-asm.h diff --git a/[refs] b/[refs] index 5ea6abb0fd32..3878d641094d 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 7e6628e4bcb3b3546c625ec63ca724f28ab14f0c +refs/heads/master: 304529b1b6f8612ccbb4582e997051b48b94f4a4 diff --git a/trunk/Documentation/00-INDEX b/trunk/Documentation/00-INDEX index 1b777b960492..c17cd4bb2290 100644 --- a/trunk/Documentation/00-INDEX +++ b/trunk/Documentation/00-INDEX @@ -328,6 +328,8 @@ sysrq.txt - info on the magic SysRq key. telephony/ - directory with info on telephony (e.g. voice over IP) support. +uml/ + - directory with information about User Mode Linux. unicode.txt - info on the Unicode character/font mapping used in Linux. unshare.txt diff --git a/trunk/Documentation/ABI/testing/sysfs-power b/trunk/Documentation/ABI/testing/sysfs-power index b464d12761ba..194ca446ac28 100644 --- a/trunk/Documentation/ABI/testing/sysfs-power +++ b/trunk/Documentation/ABI/testing/sysfs-power @@ -158,17 +158,3 @@ Description: successful, will make the kernel abort a subsequent transition to a sleep state if any wakeup events are reported after the write has returned. - -What: /sys/power/reserved_size -Date: May 2011 -Contact: Rafael J. Wysocki -Description: - The /sys/power/reserved_size file allows user space to control - the amount of memory reserved for allocations made by device - drivers during the "device freeze" stage of hibernation. It can - be written a string representing a non-negative integer that - will be used as the amount of memory to reserve for allocations - made by device drivers' "freeze" callbacks, in bytes. - - Reading from this file will display the current value, which is - set to 1 MB by default. diff --git a/trunk/Documentation/DocBook/genericirq.tmpl b/trunk/Documentation/DocBook/genericirq.tmpl index b3422341d65c..fb10fd08c05c 100644 --- a/trunk/Documentation/DocBook/genericirq.tmpl +++ b/trunk/Documentation/DocBook/genericirq.tmpl @@ -191,8 +191,8 @@ Whenever an interrupt triggers, the lowlevel arch code calls into the generic interrupt code by calling desc->handle_irq(). - This highlevel IRQ handling function only uses desc->irq_data.chip - primitives referenced by the assigned chip descriptor structure. + This highlevel IRQ handling function only uses desc->chip primitives + referenced by the assigned chip descriptor structure. @@ -206,11 +206,11 @@ enable_irq() disable_irq_nosync() (SMP only) synchronize_irq() (SMP only) - irq_set_irq_type() - irq_set_irq_wake() - irq_set_handler_data() - irq_set_chip() - irq_set_chip_data() + set_irq_type() + set_irq_wake() + set_irq_data() + set_irq_chip() + set_irq_chip_data() See the autogenerated function documentation for details. @@ -225,8 +225,6 @@ handle_fasteoi_irq handle_simple_irq handle_percpu_irq - handle_edge_eoi_irq - handle_bad_irq The interrupt flow handlers (either predefined or architecture specific) are assigned to specific interrupts by the architecture @@ -243,13 +241,13 @@ default_enable(struct irq_data *data) { - desc->irq_data.chip->irq_unmask(data); + desc->chip->irq_unmask(data); } default_disable(struct irq_data *data) { if (!delay_disable(data)) - desc->irq_data.chip->irq_mask(data); + desc->chip->irq_mask(data); } default_ack(struct irq_data *data) @@ -286,9 +284,9 @@ noop(struct irq_data *data)) The following control flow is implemented (simplified excerpt): -desc->irq_data.chip->irq_mask_ack(); -handle_irq_event(desc->action); -desc->irq_data.chip->irq_unmask(); +desc->chip->irq_mask(); +handle_IRQ_event(desc->action); +desc->chip->irq_unmask(); @@ -302,8 +300,8 @@ desc->irq_data.chip->irq_unmask(); The following control flow is implemented (simplified excerpt): -handle_irq_event(desc->action); -desc->irq_data.chip->irq_eoi(); +handle_IRQ_event(desc->action); +desc->chip->irq_eoi(); @@ -317,17 +315,17 @@ desc->irq_data.chip->irq_eoi(); The following control flow is implemented (simplified excerpt): if (desc->status & running) { - desc->irq_data.chip->irq_mask_ack(); + desc->chip->irq_mask(); desc->status |= pending | masked; return; } -desc->irq_data.chip->irq_ack(); +desc->chip->irq_ack(); desc->status |= running; do { if (desc->status & masked) - desc->irq_data.chip->irq_unmask(); + desc->chip->irq_unmask(); desc->status &= ~pending; - handle_irq_event(desc->action); + handle_IRQ_event(desc->action); } while (status & pending); desc->status &= ~running; @@ -346,7 +344,7 @@ desc->status &= ~running; The following control flow is implemented (simplified excerpt): -handle_irq_event(desc->action); +handle_IRQ_event(desc->action); @@ -364,29 +362,12 @@ handle_irq_event(desc->action); The following control flow is implemented (simplified excerpt): -if (desc->irq_data.chip->irq_ack) - desc->irq_data.chip->irq_ack(); -handle_irq_event(desc->action); -if (desc->irq_data.chip->irq_eoi) - desc->irq_data.chip->irq_eoi(); +handle_IRQ_event(desc->action); +if (desc->chip->irq_eoi) + desc->chip->irq_eoi(); - - EOI Edge IRQ flow handler - - handle_edge_eoi_irq provides an abnomination of the edge - handler which is solely used to tame a badly wreckaged - irq controller on powerpc/cell. - - - - Bad IRQ flow handler - - handle_bad_irq is used for spurious interrupts which - have no real handler assigned.. - - Quirks and optimizations @@ -429,7 +410,6 @@ if (desc->irq_data.chip->irq_eoi) irq_mask_ack() - Optional, recommended for performance irq_mask() irq_unmask() - irq_eoi() - Optional, required for eoi flow handlers irq_retrigger() - Optional irq_set_type() - Optional irq_set_wake() - Optional @@ -444,24 +424,32 @@ if (desc->irq_data.chip->irq_eoi) __do_IRQ entry point - The original implementation __do_IRQ() was an alternative entry - point for all types of interrupts. It not longer exists. + The original implementation __do_IRQ() is an alternative entry + point for all types of interrupts. This handler turned out to be not suitable for all interrupt hardware and was therefore reimplemented with split - functionality for edge/level/simple/percpu interrupts. This is not + functionality for egde/level/simple/percpu interrupts. This is not only a functional optimization. It also shortens code paths for interrupts. + + To make use of the split implementation, replace the call to + __do_IRQ by a call to desc->handle_irq() and associate + the appropriate handler function to desc->handle_irq(). + In most cases the generic handler implementations should + be sufficient. + Locking on SMP The locking of chip registers is up to the architecture that - defines the chip primitives. The per-irq structure is - protected via desc->lock, by the generic layer. + defines the chip primitives. There is a chip->lock field that can be used + for serialization, but the generic layer does not touch it. The per-irq + structure is protected via desc->lock, by the generic layer. diff --git a/trunk/Documentation/DocBook/media-entities.tmpl b/trunk/Documentation/DocBook/media-entities.tmpl index fea63b45471a..5d259c632cdf 100644 --- a/trunk/Documentation/DocBook/media-entities.tmpl +++ b/trunk/Documentation/DocBook/media-entities.tmpl @@ -294,7 +294,6 @@ - diff --git a/trunk/Documentation/DocBook/v4l/media-ioc-setup-link.xml b/trunk/Documentation/DocBook/v4l/media-ioc-setup-link.xml index cec97af4dab4..2331e76ded17 100644 --- a/trunk/Documentation/DocBook/v4l/media-ioc-setup-link.xml +++ b/trunk/Documentation/DocBook/v4l/media-ioc-setup-link.xml @@ -34,7 +34,7 @@ request - MEDIA_IOC_SETUP_LINK + MEDIA_IOC_ENUM_LINKS diff --git a/trunk/Documentation/DocBook/v4l/pixfmt-y12.xml b/trunk/Documentation/DocBook/v4l/pixfmt-y12.xml deleted file mode 100644 index ff417b858cc9..000000000000 --- a/trunk/Documentation/DocBook/v4l/pixfmt-y12.xml +++ /dev/null @@ -1,79 +0,0 @@ - - - V4L2_PIX_FMT_Y12 ('Y12 ') - &manvol; - - - V4L2_PIX_FMT_Y12 - Grey-scale image - - - Description - - This is a grey-scale image with a depth of 12 bits per pixel. Pixels -are stored in 16-bit words with unused high bits padded with 0. The least -significant byte is stored at lower memory addresses (little-endian). - - - <constant>V4L2_PIX_FMT_Y12</constant> 4 × 4 -pixel image - - - Byte Order. - Each cell is one byte. - - - - - - start + 0: - Y'00low - Y'00high - Y'01low - Y'01high - Y'02low - Y'02high - Y'03low - Y'03high - - - start + 8: - Y'10low - Y'10high - Y'11low - Y'11high - Y'12low - Y'12high - Y'13low - Y'13high - - - start + 16: - Y'20low - Y'20high - Y'21low - Y'21high - Y'22low - Y'22high - Y'23low - Y'23high - - - start + 24: - Y'30low - Y'30high - Y'31low - Y'31high - Y'32low - Y'32high - Y'33low - Y'33high - - - - - - - - - diff --git a/trunk/Documentation/DocBook/v4l/pixfmt.xml b/trunk/Documentation/DocBook/v4l/pixfmt.xml index 40af4beb48b9..c6fdcbbd1b41 100644 --- a/trunk/Documentation/DocBook/v4l/pixfmt.xml +++ b/trunk/Documentation/DocBook/v4l/pixfmt.xml @@ -696,7 +696,6 @@ information. &sub-packed-yuv; &sub-grey; &sub-y10; - &sub-y12; &sub-y16; &sub-yuyv; &sub-uyvy; diff --git a/trunk/Documentation/DocBook/v4l/subdev-formats.xml b/trunk/Documentation/DocBook/v4l/subdev-formats.xml index d7ccd25edcc1..7041127d6dfc 100644 --- a/trunk/Documentation/DocBook/v4l/subdev-formats.xml +++ b/trunk/Documentation/DocBook/v4l/subdev-formats.xml @@ -456,23 +456,6 @@ b1 b0 - - V4L2_MBUS_FMT_SGBRG8_1X8 - 0x3013 - - - - - - - - - - g7 - g6 - g5 - g4 - g3 - g2 - g1 - g0 - V4L2_MBUS_FMT_SGRBG8_1X8 0x3002 @@ -490,23 +473,6 @@ g1 g0 - - V4L2_MBUS_FMT_SRGGB8_1X8 - 0x3014 - - - - - - - - - - r7 - r6 - r5 - r4 - r3 - r2 - r1 - r0 - V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8 0x300b @@ -2193,31 +2159,6 @@ u1 u0 - - V4L2_MBUS_FMT_Y12_1X12 - 0x2013 - - - - - - - - - - - - - - - - - - y11 - y10 - y9 - y8 - y7 - y6 - y5 - y4 - y3 - y2 - y1 - y0 - V4L2_MBUS_FMT_UYVY8_1X16 0x200f diff --git a/trunk/Documentation/cgroups/memory.txt b/trunk/Documentation/cgroups/memory.txt index 7c163477fcd8..b6ed61c95856 100644 --- a/trunk/Documentation/cgroups/memory.txt +++ b/trunk/Documentation/cgroups/memory.txt @@ -52,10 +52,8 @@ Brief summary of control files. tasks # attach a task(thread) and show list of threads cgroup.procs # show list of processes cgroup.event_control # an interface for event_fd() - memory.usage_in_bytes # show current res_counter usage for memory - (See 5.5 for details) - memory.memsw.usage_in_bytes # show current res_counter usage for memory+Swap - (See 5.5 for details) + memory.usage_in_bytes # show current memory(RSS+Cache) usage. + memory.memsw.usage_in_bytes # show current memory+Swap usage memory.limit_in_bytes # set/show limit of memory usage memory.memsw.limit_in_bytes # set/show limit of memory+Swap usage memory.failcnt # show the number of memory usage hits limits @@ -455,15 +453,6 @@ memory under it will be reclaimed. You can reset failcnt by writing 0 to failcnt file. # echo 0 > .../memory.failcnt -5.5 usage_in_bytes - -For efficiency, as other kernel components, memory cgroup uses some optimization -to avoid unnecessary cacheline false sharing. usage_in_bytes is affected by the -method and doesn't show 'exact' value of memory(and swap) usage, it's an fuzz -value for efficient access. (Of course, when necessary, it's synchronized.) -If you want to know more exact memory usage, you should use RSS+CACHE(+SWAP) -value in memory.stat(see 5.2). - 6. Hierarchy support The memory controller supports a deep hierarchy and hierarchical accounting. diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt index f6a24e8aa11e..492e81df2968 100644 --- a/trunk/Documentation/feature-removal-schedule.txt +++ b/trunk/Documentation/feature-removal-schedule.txt @@ -460,6 +460,14 @@ Who: Thomas Gleixner ---------------------------- +What: The acpi_sleep=s4_nonvs command line option +When: 2.6.37 +Files: arch/x86/kernel/acpi/sleep.c +Why: superseded by acpi_sleep=nonvs +Who: Rafael J. Wysocki + +---------------------------- + What: PCI DMA unmap state API When: August 2012 Why: PCI DMA unmap state API (include/linux/pci-dma.h) was replaced diff --git a/trunk/Documentation/flexible-arrays.txt b/trunk/Documentation/flexible-arrays.txt index df904aec9904..cb8a3a00cc92 100644 --- a/trunk/Documentation/flexible-arrays.txt +++ b/trunk/Documentation/flexible-arrays.txt @@ -66,10 +66,10 @@ trick is to ensure that any needed memory allocations are done before entering atomic context, using: int flex_array_prealloc(struct flex_array *array, unsigned int start, - unsigned int nr_elements, gfp_t flags); + unsigned int end, gfp_t flags); This function will ensure that memory for the elements indexed in the range -defined by start and nr_elements has been allocated. Thereafter, a +defined by start and end has been allocated. Thereafter, a flex_array_put() call on an element in that range is guaranteed not to block. diff --git a/trunk/Documentation/hwmon/adm1021 b/trunk/Documentation/hwmon/adm1021 index 02ad96cf9b2b..03d02bfb3df1 100644 --- a/trunk/Documentation/hwmon/adm1021 +++ b/trunk/Documentation/hwmon/adm1021 @@ -14,6 +14,10 @@ Supported chips: Prefix: 'gl523sm' Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e Datasheet: + * Intel Xeon Processor + Prefix: - any other - may require 'force_adm1021' parameter + Addresses scanned: none + Datasheet: Publicly available at Intel website * Maxim MAX1617 Prefix: 'max1617' Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e @@ -87,27 +91,21 @@ will do no harm, but will return 'old' values. It is possible to make ADM1021-clones do faster measurements, but there is really no good reason for that. +Xeon support +------------ -Netburst-based Xeon support ---------------------------- +Some Xeon processors have real max1617, adm1021, or compatible chips +within them, with two temperature sensors. -Some Xeon processors based on the Netburst (early Pentium 4, from 2001 to -2003) microarchitecture had real MAX1617, ADM1021, or compatible chips -within them, with two temperature sensors. Other Xeon processors of this -era (with 400 MHz FSB) had chips with only one temperature sensor. +Other Xeons have chips with only one sensor. -If you have such an old Xeon, and you get two valid temperatures when -loading the adm1021 module, then things are good. +If you have a Xeon, and the adm1021 module loads, and both temperatures +appear valid, then things are good. -If nothing happens when loading the adm1021 module, and you are certain -that your specific Xeon processor model includes compatible sensors, you -will have to explicitly instantiate the sensor chips from user-space. See -method 4 in Documentation/i2c/instantiating-devices. Possible slave -addresses are 0x18, 0x1a, 0x29, 0x2b, 0x4c, or 0x4e. It is likely that -only temp2 will be correct and temp1 will have to be ignored. +If the adm1021 module doesn't load, you should try this: + modprobe adm1021 force_adm1021=BUS,ADDRESS + ADDRESS can only be 0x18, 0x1a, 0x29, 0x2b, 0x4c, or 0x4e. -Previous generations of the Xeon processor (based on Pentium II/III) -didn't have these sensors. Next generations of Xeon processors (533 MHz -FSB and faster) lost them, until the Core-based generation which -introduced integrated digital thermal sensors. These are supported by -the coretemp driver. +If you have dual Xeons you may have appear to have two separate +adm1021-compatible chips, or two single-temperature sensors, at distinct +addresses. diff --git a/trunk/Documentation/hwmon/lm90 b/trunk/Documentation/hwmon/lm90 index f3efd18e87f4..fa475c0a48a3 100644 --- a/trunk/Documentation/hwmon/lm90 +++ b/trunk/Documentation/hwmon/lm90 @@ -32,16 +32,6 @@ Supported chips: Addresses scanned: I2C 0x4c and 0x4d Datasheet: Publicly available at the ON Semiconductor website http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461 - * Analog Devices ADT7461A - Prefix: 'adt7461a' - Addresses scanned: I2C 0x4c and 0x4d - Datasheet: Publicly available at the ON Semiconductor website - http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461A - * ON Semiconductor NCT1008 - Prefix: 'nct1008' - Addresses scanned: I2C 0x4c and 0x4d - Datasheet: Publicly available at the ON Semiconductor website - http://www.onsemi.com/PowerSolutions/product.do?id=NCT1008 * Maxim MAX6646 Prefix: 'max6646' Addresses scanned: I2C 0x4d @@ -159,7 +149,7 @@ ADM1032: * ALERT is triggered by open remote sensor. * SMBus PEC support for Write Byte and Receive Byte transactions. -ADT7461, ADT7461A, NCT1008: +ADT7461: * Extended temperature range (breaks compatibility) * Lower resolution for remote temperature @@ -205,9 +195,9 @@ are exported, one for each channel, but these values are of course linked. Only the local hysteresis can be set from user-space, and the same delta applies to the remote hysteresis. -The lm90 driver will not update its values more frequently than configured with -the update_interval attribute; reading them more often will do no harm, but will -return 'old' values. +The lm90 driver will not update its values more frequently than every +other second; reading them more often will do no harm, but will return +'old' values. SMBus Alert Support ------------------- @@ -215,12 +205,11 @@ SMBus Alert Support This driver has basic support for SMBus alert. When an alert is received, the status register is read and the faulty temperature channel is logged. -The Analog Devices chips (ADM1032, ADT7461 and ADT7461A) and ON -Semiconductor chips (NCT1008) do not implement the SMBus alert protocol -properly so additional care is needed: the ALERT output is disabled when -an alert is received, and is re-enabled only when the alarm is gone. -Otherwise the chip would block alerts from other chips in the bus as long -as the alarm is active. +The Analog Devices chips (ADM1032 and ADT7461) do not implement the SMBus +alert protocol properly so additional care is needed: the ALERT output is +disabled when an alert is received, and is re-enabled only when the alarm +is gone. Otherwise the chip would block alerts from other chips in the bus +as long as the alarm is active. PEC Support ----------- diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index 259037b873b7..cc85a9278190 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -245,7 +245,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. acpi_sleep= [HW,ACPI] Sleep options Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, - old_ordering, nonvs, sci_force_enable } + old_ordering, s4_nonvs, sci_force_enable } See Documentation/power/video.txt for information on s3_bios and s3_mode. s3_beep is for debugging; it makes the PC's speaker beep diff --git a/trunk/Documentation/virtual/kvm/api.txt b/trunk/Documentation/kvm/api.txt similarity index 100% rename from trunk/Documentation/virtual/kvm/api.txt rename to trunk/Documentation/kvm/api.txt diff --git a/trunk/Documentation/virtual/kvm/cpuid.txt b/trunk/Documentation/kvm/cpuid.txt similarity index 100% rename from trunk/Documentation/virtual/kvm/cpuid.txt rename to trunk/Documentation/kvm/cpuid.txt diff --git a/trunk/Documentation/virtual/kvm/locking.txt b/trunk/Documentation/kvm/locking.txt similarity index 100% rename from trunk/Documentation/virtual/kvm/locking.txt rename to trunk/Documentation/kvm/locking.txt diff --git a/trunk/Documentation/virtual/kvm/mmu.txt b/trunk/Documentation/kvm/mmu.txt similarity index 100% rename from trunk/Documentation/virtual/kvm/mmu.txt rename to trunk/Documentation/kvm/mmu.txt diff --git a/trunk/Documentation/virtual/kvm/msr.txt b/trunk/Documentation/kvm/msr.txt similarity index 100% rename from trunk/Documentation/virtual/kvm/msr.txt rename to trunk/Documentation/kvm/msr.txt diff --git a/trunk/Documentation/virtual/kvm/ppc-pv.txt b/trunk/Documentation/kvm/ppc-pv.txt similarity index 100% rename from trunk/Documentation/virtual/kvm/ppc-pv.txt rename to trunk/Documentation/kvm/ppc-pv.txt diff --git a/trunk/Documentation/virtual/kvm/review-checklist.txt b/trunk/Documentation/kvm/review-checklist.txt similarity index 95% rename from trunk/Documentation/virtual/kvm/review-checklist.txt rename to trunk/Documentation/kvm/review-checklist.txt index a850986ed684..730475ae1b8d 100644 --- a/trunk/Documentation/virtual/kvm/review-checklist.txt +++ b/trunk/Documentation/kvm/review-checklist.txt @@ -7,7 +7,7 @@ Review checklist for kvm patches 2. Patches should be against kvm.git master branch. 3. If the patch introduces or modifies a new userspace API: - - the API must be documented in Documentation/virtual/kvm/api.txt + - the API must be documented in Documentation/kvm/api.txt - the API must be discoverable using KVM_CHECK_EXTENSION 4. New state must include support for save/restore. diff --git a/trunk/Documentation/virtual/kvm/timekeeping.txt b/trunk/Documentation/kvm/timekeeping.txt similarity index 100% rename from trunk/Documentation/virtual/kvm/timekeeping.txt rename to trunk/Documentation/kvm/timekeeping.txt diff --git a/trunk/Documentation/virtual/lguest/.gitignore b/trunk/Documentation/lguest/.gitignore similarity index 100% rename from trunk/Documentation/virtual/lguest/.gitignore rename to trunk/Documentation/lguest/.gitignore diff --git a/trunk/Documentation/virtual/lguest/Makefile b/trunk/Documentation/lguest/Makefile similarity index 100% rename from trunk/Documentation/virtual/lguest/Makefile rename to trunk/Documentation/lguest/Makefile diff --git a/trunk/Documentation/virtual/lguest/extract b/trunk/Documentation/lguest/extract similarity index 100% rename from trunk/Documentation/virtual/lguest/extract rename to trunk/Documentation/lguest/extract diff --git a/trunk/Documentation/virtual/lguest/lguest.c b/trunk/Documentation/lguest/lguest.c similarity index 100% rename from trunk/Documentation/virtual/lguest/lguest.c rename to trunk/Documentation/lguest/lguest.c diff --git a/trunk/Documentation/virtual/lguest/lguest.txt b/trunk/Documentation/lguest/lguest.txt similarity index 97% rename from trunk/Documentation/virtual/lguest/lguest.txt rename to trunk/Documentation/lguest/lguest.txt index bff0c554485d..dad99978a6a8 100644 --- a/trunk/Documentation/virtual/lguest/lguest.txt +++ b/trunk/Documentation/lguest/lguest.txt @@ -74,8 +74,7 @@ Running Lguest: - Run an lguest as root: - Documentation/virtual/lguest/lguest 64 vmlinux --tunnet=192.168.19.1 \ - --block=rootfile root=/dev/vda + Documentation/lguest/lguest 64 vmlinux --tunnet=192.168.19.1 --block=rootfile root=/dev/vda Explanation: 64: the amount of memory to use, in MB. diff --git a/trunk/Documentation/power/devices.txt b/trunk/Documentation/power/devices.txt index 88880839ece4..1971bcf48a60 100644 --- a/trunk/Documentation/power/devices.txt +++ b/trunk/Documentation/power/devices.txt @@ -279,15 +279,11 @@ When the system goes into the standby or memory sleep state, the phases are: time.) Unlike the other suspend-related phases, during the prepare phase the device tree is traversed top-down. - In addition to that, if device drivers need to allocate additional - memory to be able to hadle device suspend correctly, that should be - done in the prepare phase. - - After the prepare callback method returns, no new children may be - registered below the device. The method may also prepare the device or - driver in some way for the upcoming system power transition (for - example, by allocating additional memory required for this purpose), but - it should not put the device into a low-power state. + The prepare phase uses only a bus callback. After the callback method + returns, no new children may be registered below the device. The method + may also prepare the device or driver in some way for the upcoming + system power transition, but it should not put the device into a + low-power state. 2. The suspend methods should quiesce the device to stop it from performing I/O. They also may save the device registers and put it into the diff --git a/trunk/Documentation/power/notifiers.txt b/trunk/Documentation/power/notifiers.txt index c2a4a346c0d9..cf980709122a 100644 --- a/trunk/Documentation/power/notifiers.txt +++ b/trunk/Documentation/power/notifiers.txt @@ -1,41 +1,46 @@ Suspend notifiers - (C) 2007-2011 Rafael J. Wysocki , GPL - -There are some operations that subsystems or drivers may want to carry out -before hibernation/suspend or after restore/resume, but they require the system -to be fully functional, so the drivers' and subsystems' .suspend() and .resume() -or even .prepare() and .complete() callbacks are not suitable for this purpose. -For example, device drivers may want to upload firmware to their devices after -resume/restore, but they cannot do it by calling request_firmware() from their -.resume() or .complete() routines (user land processes are frozen at these -points). The solution may be to load the firmware into memory before processes -are frozen and upload it from there in the .resume() routine. -A suspend/hibernation notifier may be used for this purpose. - -The subsystems or drivers having such needs can register suspend notifiers that -will be called upon the following events by the PM core: + (C) 2007 Rafael J. Wysocki , GPL + +There are some operations that device drivers may want to carry out in their +.suspend() routines, but shouldn't, because they can cause the hibernation or +suspend to fail. For example, a driver may want to allocate a substantial amount +of memory (like 50 MB) in .suspend(), but that shouldn't be done after the +swsusp's memory shrinker has run. + +Also, there may be some operations, that subsystems want to carry out before a +hibernation/suspend or after a restore/resume, requiring the system to be fully +functional, so the drivers' .suspend() and .resume() routines are not suitable +for this purpose. For example, device drivers may want to upload firmware to +their devices after a restore from a hibernation image, but they cannot do it by +calling request_firmware() from their .resume() routines (user land processes +are frozen at this point). The solution may be to load the firmware into +memory before processes are frozen and upload it from there in the .resume() +routine. Of course, a hibernation notifier may be used for this purpose. + +The subsystems that have such needs can register suspend notifiers that will be +called upon the following events by the suspend core: PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will be frozen immediately. PM_POST_HIBERNATION The system memory state has been restored from a - hibernation image or an error occurred during - hibernation. Device drivers' restore callbacks have + hibernation image or an error occurred during the + hibernation. Device drivers' .resume() callbacks have been executed and tasks have been thawed. PM_RESTORE_PREPARE The system is going to restore a hibernation image. - If all goes well, the restored kernel will issue a + If all goes well the restored kernel will issue a PM_POST_HIBERNATION notification. -PM_POST_RESTORE An error occurred during restore from hibernation. - Device drivers' restore callbacks have been executed +PM_POST_RESTORE An error occurred during the hibernation restore. + Device drivers' .resume() callbacks have been executed and tasks have been thawed. -PM_SUSPEND_PREPARE The system is preparing for suspend. +PM_SUSPEND_PREPARE The system is preparing for a suspend. PM_POST_SUSPEND The system has just resumed or an error occurred during - suspend. Device drivers' resume callbacks have been - executed and tasks have been thawed. + the suspend. Device drivers' .resume() callbacks have + been executed and tasks have been thawed. It is generally assumed that whatever the notifiers do for PM_HIBERNATION_PREPARE, should be undone for PM_POST_HIBERNATION. Analogously, diff --git a/trunk/Documentation/trace/kprobetrace.txt b/trunk/Documentation/trace/kprobetrace.txt index c83bd6b4e6e8..6d27ab8d6e9f 100644 --- a/trunk/Documentation/trace/kprobetrace.txt +++ b/trunk/Documentation/trace/kprobetrace.txt @@ -120,6 +120,7 @@ format: field:unsigned char common_flags; offset:2; size:1; signed:0; field:unsigned char common_preempt_count; offset:3; size:1;signed:0; field:int common_pid; offset:4; size:4; signed:1; + field:int common_lock_depth; offset:8; size:4; signed:1; field:unsigned long __probe_ip; offset:12; size:4; signed:0; field:int __probe_nargs; offset:16; size:4; signed:1; diff --git a/trunk/Documentation/virtual/uml/UserModeLinux-HOWTO.txt b/trunk/Documentation/uml/UserModeLinux-HOWTO.txt similarity index 100% rename from trunk/Documentation/virtual/uml/UserModeLinux-HOWTO.txt rename to trunk/Documentation/uml/UserModeLinux-HOWTO.txt diff --git a/trunk/Documentation/video4linux/sh_mobile_ceu_camera.txt b/trunk/Documentation/video4linux/sh_mobile_ceu_camera.txt index 1e96ce6e2d2f..cb47e723af74 100644 --- a/trunk/Documentation/video4linux/sh_mobile_ceu_camera.txt +++ b/trunk/Documentation/video4linux/sh_mobile_ceu_camera.txt @@ -37,7 +37,7 @@ Generic scaling / cropping scheme -1'- In the above chart minuses and slashes represent "real" data amounts, points and -accents represent "useful" data, basically, CEU scaled and cropped output, +accents represent "useful" data, basically, CEU scaled amd cropped output, mapped back onto the client's source plane. Such a configuration can be produced by user requests: @@ -65,7 +65,7 @@ Do not touch input rectangle - it is already optimal. 1. Calculate current sensor scales: - scale_s = ((2') - (2)) / ((3') - (3)) + scale_s = ((3') - (3)) / ((2') - (2)) 2. Calculate "effective" input crop (sensor subwindow) - CEU crop scaled back at current sensor scales onto input window - this is user S_CROP: @@ -80,7 +80,7 @@ window: 4. Calculate sensor output window by applying combined scales to real input window: - width_s_out = ((7') - (7)) = ((2') - (2)) / scale_comb + width_s_out = ((2') - (2)) / scale_comb 5. Apply iterative sensor S_FMT for sensor output window. diff --git a/trunk/Documentation/virtual/00-INDEX b/trunk/Documentation/virtual/00-INDEX deleted file mode 100644 index fe0251c4cfb7..000000000000 --- a/trunk/Documentation/virtual/00-INDEX +++ /dev/null @@ -1,10 +0,0 @@ -Virtualization support in the Linux kernel. - -00-INDEX - - this file. -kvm/ - - Kernel Virtual Machine. See also http://linux-kvm.org -lguest/ - - Extremely simple hypervisor for experimental/educational use. -uml/ - - User Mode Linux, builds/runs Linux kernel as a userspace program. diff --git a/trunk/Documentation/workqueue.txt b/trunk/Documentation/workqueue.txt index a0b577de918f..01c513fac40e 100644 --- a/trunk/Documentation/workqueue.txt +++ b/trunk/Documentation/workqueue.txt @@ -12,7 +12,6 @@ CONTENTS 4. Application Programming Interface (API) 5. Example Execution Scenarios 6. Guidelines -7. Debugging 1. Introduction @@ -380,42 +379,3 @@ If q1 has WQ_CPU_INTENSIVE set, * Unless work items are expected to consume a huge amount of CPU cycles, using a bound wq is usually beneficial due to the increased level of locality in wq operations and work item execution. - - -7. Debugging - -Because the work functions are executed by generic worker threads -there are a few tricks needed to shed some light on misbehaving -workqueue users. - -Worker threads show up in the process list as: - -root 5671 0.0 0.0 0 0 ? S 12:07 0:00 [kworker/0:1] -root 5672 0.0 0.0 0 0 ? S 12:07 0:00 [kworker/1:2] -root 5673 0.0 0.0 0 0 ? S 12:12 0:00 [kworker/0:0] -root 5674 0.0 0.0 0 0 ? S 12:13 0:00 [kworker/1:0] - -If kworkers are going crazy (using too much cpu), there are two types -of possible problems: - - 1. Something beeing scheduled in rapid succession - 2. A single work item that consumes lots of cpu cycles - -The first one can be tracked using tracing: - - $ echo workqueue:workqueue_queue_work > /sys/kernel/debug/tracing/set_event - $ cat /sys/kernel/debug/tracing/trace_pipe > out.txt - (wait a few secs) - ^C - -If something is busy looping on work queueing, it would be dominating -the output and the offender can be determined with the work item -function. - -For the second type of problems it should be possible to just check -the stack trace of the offending worker thread. - - $ cat /proc/THE_OFFENDING_KWORKER/stack - -The work item's function should be trivially visible in the stack -trace. diff --git a/trunk/Documentation/x86/x86_64/boot-options.txt b/trunk/Documentation/x86/x86_64/boot-options.txt index c54b4f503e2a..092e596a1301 100644 --- a/trunk/Documentation/x86/x86_64/boot-options.txt +++ b/trunk/Documentation/x86/x86_64/boot-options.txt @@ -206,7 +206,7 @@ IOMMU (input/output memory management unit) (e.g. because you have < 3 GB memory). Kernel boot message: "PCI-DMA: Disabling IOMMU" - 2. : AMD GART based hardware IOMMU. + 2. : AMD GART based hardware IOMMU. Kernel boot message: "PCI-DMA: using GART IOMMU" 3. : Software IOMMU implementation. Used diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 8df8d2dfba28..13803127b68f 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -405,8 +405,8 @@ S: Maintained F: sound/oss/aedsp16.c AFFS FILE SYSTEM -L: linux-fsdevel@vger.kernel.org -S: Orphan +M: Roman Zippel +S: Maintained F: Documentation/filesystems/affs.txt F: fs/affs/ @@ -1032,13 +1032,12 @@ W: http://www.fluff.org/ben/linux/ S: Maintained F: arch/arm/mach-s3c64xx/ -ARM/S5P EXYNOS ARM ARCHITECTURES +ARM/S5P ARM ARCHITECTURES M: Kukjin Kim L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-s5p*/ -F: arch/arm/mach-exynos*/ ARM/SAMSUNG MOBILE MACHINE SUPPORT M: Kyungmin Park @@ -2809,23 +2808,42 @@ GPIO SUBSYSTEM M: Grant Likely S: Maintained T: git git://git.secretlab.ca/git/linux-2.6.git -F: Documentation/gpio.txt +F: Documentation/gpio/gpio.txt F: drivers/gpio/ F: include/linux/gpio* -GRE DEMULTIPLEXER DRIVER -M: Dmitry Kozlov -L: netdev@vger.kernel.org -S: Maintained -F: net/ipv4/gre.c -F: include/net/gre.h - GRETH 10/100/1G Ethernet MAC device driver M: Kristoffer Glembo L: netdev@vger.kernel.org S: Maintained F: drivers/net/greth* +HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER +M: Frank Seidel +L: platform-driver-x86@vger.kernel.org +W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ +S: Maintained +F: drivers/platform/x86/hdaps.c + +HWPOISON MEMORY FAILURE HANDLING +M: Andi Kleen +L: linux-mm@kvack.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison +S: Maintained +F: mm/memory-failure.c +F: mm/hwpoison-inject.c + +HYPERVISOR VIRTUAL CONSOLE DRIVER +L: linuxppc-dev@lists.ozlabs.org +S: Odd Fixes +F: drivers/tty/hvc/ + +iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER +M: Peter Jones +M: Konrad Rzeszutek Wilk +S: Maintained +F: drivers/firmware/iscsi_ibft* + GSPCA FINEPIX SUBDRIVER M: Frank Zago L: linux-media@vger.kernel.org @@ -2876,26 +2894,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git S: Maintained F: drivers/media/video/gspca/ -HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER -M: Frank Seidel -L: platform-driver-x86@vger.kernel.org -W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ -S: Maintained -F: drivers/platform/x86/hdaps.c - -HWPOISON MEMORY FAILURE HANDLING -M: Andi Kleen -L: linux-mm@kvack.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison -S: Maintained -F: mm/memory-failure.c -F: mm/hwpoison-inject.c - -HYPERVISOR VIRTUAL CONSOLE DRIVER -L: linuxppc-dev@lists.ozlabs.org -S: Odd Fixes -F: drivers/tty/hvc/ - HARDWARE MONITORING M: Jean Delvare M: Guenter Roeck @@ -2946,8 +2944,8 @@ F: drivers/block/cciss* F: include/linux/cciss_ioctl.h HFS FILESYSTEM -L: linux-fsdevel@vger.kernel.org -S: Orphan +M: Roman Zippel +S: Maintained F: Documentation/filesystems/hfs.txt F: fs/hfs/ @@ -3479,12 +3477,6 @@ F: Documentation/isapnp.txt F: drivers/pnp/isapnp/ F: include/linux/isapnp.h -iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER -M: Peter Jones -M: Konrad Rzeszutek Wilk -S: Maintained -F: drivers/firmware/iscsi_ibft* - ISCSI M: Mike Christie L: open-iscsi@googlegroups.com @@ -3814,7 +3806,7 @@ M: Rusty Russell L: lguest@lists.ozlabs.org W: http://lguest.ozlabs.org/ S: Odd Fixes -F: Documentation/virtual/lguest/ +F: Documentation/lguest/ F: arch/x86/lguest/ F: drivers/lguest/ F: include/linux/lguest*.h @@ -4001,6 +3993,7 @@ F: arch/m32r/ M68K ARCHITECTURE M: Geert Uytterhoeven +M: Roman Zippel L: linux-m68k@lists.linux-m68k.org W: http://www.linux-m68k.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux-m68k.git @@ -4995,13 +4988,6 @@ F: Documentation/pps/ F: drivers/pps/ F: include/linux/pps*.h -PPTP DRIVER -M: Dmitry Kozlov -L: netdev@vger.kernel.org -S: Maintained -F: drivers/net/pptp.c -W: http://sourceforge.net/projects/accel-pptp - PREEMPTIBLE KERNEL M: Robert Love L: kpreempt-tech@lists.sourceforge.net @@ -6569,7 +6555,7 @@ S: Maintained F: drivers/usb/host/uhci* USB "USBNET" DRIVER FRAMEWORK -M: Oliver Neukum +M: David Brownell L: netdev@vger.kernel.org W: http://www.linux-usb.org/usbnet S: Maintained @@ -6631,7 +6617,7 @@ L: user-mode-linux-devel@lists.sourceforge.net L: user-mode-linux-user@lists.sourceforge.net W: http://user-mode-linux.sourceforge.net S: Maintained -F: Documentation/virtual/uml/ +F: Documentation/uml/ F: arch/um/ F: fs/hostfs/ F: fs/hppfs/ @@ -6935,18 +6921,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86. S: Maintained F: drivers/platform/x86 -XEN HYPERVISOR INTERFACE -M: Jeremy Fitzhardinge -M: Konrad Rzeszutek Wilk -L: xen-devel@lists.xensource.com (moderated for non-subscribers) -L: virtualization@lists.linux-foundation.org -S: Supported -F: arch/x86/xen/ -F: drivers/*/xen-*front.c -F: drivers/xen/ -F: arch/x86/include/asm/xen/ -F: include/xen/ - XEN NETWORK BACKEND DRIVER M: Ian Campbell L: xen-devel@lists.xensource.com (moderated for non-subscribers) @@ -6968,6 +6942,18 @@ S: Supported F: arch/x86/xen/*swiotlb* F: drivers/xen/*swiotlb* +XEN HYPERVISOR INTERFACE +M: Jeremy Fitzhardinge +M: Konrad Rzeszutek Wilk +L: xen-devel@lists.xensource.com (moderated for non-subscribers) +L: virtualization@lists.linux-foundation.org +S: Supported +F: arch/x86/xen/ +F: drivers/*/xen-*front.c +F: drivers/xen/ +F: arch/x86/include/asm/xen/ +F: include/xen/ + XFS FILESYSTEM P: Silicon Graphics Inc M: Alex Elder @@ -7037,6 +7023,20 @@ M: "Maciej W. Rozycki" S: Maintained F: drivers/tty/serial/zs.* +GRE DEMULTIPLEXER DRIVER +M: Dmitry Kozlov +L: netdev@vger.kernel.org +S: Maintained +F: net/ipv4/gre.c +F: include/net/gre.h + +PPTP DRIVER +M: Dmitry Kozlov +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/pptp.c +W: http://sourceforge.net/projects/accel-pptp + THE REST M: Linus Torvalds L: linux-kernel@vger.kernel.org diff --git a/trunk/Makefile b/trunk/Makefile index a0344a81a893..b967b967572b 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 39 -EXTRAVERSION = +EXTRAVERSION = -rc4 NAME = Flesh-Eating Bats with Fangs # *DOCUMENTATION* @@ -1268,7 +1268,6 @@ help: @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' @echo ' make C=2 [targets] Force check of all c source with $$CHECK' @echo ' make W=1 [targets] Enable extra gcc checks' - @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' @echo '' @echo 'Execute "make" or "make all" to build all targets marked with [*] ' @echo 'For further info see the ./README file' diff --git a/trunk/arch/alpha/include/asm/unistd.h b/trunk/arch/alpha/include/asm/unistd.h index b1834166922d..058937bf5a77 100644 --- a/trunk/arch/alpha/include/asm/unistd.h +++ b/trunk/arch/alpha/include/asm/unistd.h @@ -452,14 +452,10 @@ #define __NR_fanotify_init 494 #define __NR_fanotify_mark 495 #define __NR_prlimit64 496 -#define __NR_name_to_handle_at 497 -#define __NR_open_by_handle_at 498 -#define __NR_clock_adjtime 499 -#define __NR_syncfs 500 #ifdef __KERNEL__ -#define NR_SYSCALLS 501 +#define NR_SYSCALLS 497 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/trunk/arch/alpha/kernel/smp.c b/trunk/arch/alpha/kernel/smp.c index 5a621c6d22ab..42aa078a5e4d 100644 --- a/trunk/arch/alpha/kernel/smp.c +++ b/trunk/arch/alpha/kernel/smp.c @@ -585,7 +585,8 @@ handle_ipi(struct pt_regs *regs) switch (which) { case IPI_RESCHEDULE: - scheduler_ipi(); + /* Reschedule callback. Everything to be done + is done by the interrupt return path. */ break; case IPI_CALL_FUNC: diff --git a/trunk/arch/alpha/kernel/systbls.S b/trunk/arch/alpha/kernel/systbls.S index 15f999d41c75..a6a1de9db16f 100644 --- a/trunk/arch/alpha/kernel/systbls.S +++ b/trunk/arch/alpha/kernel/systbls.S @@ -498,27 +498,23 @@ sys_call_table: .quad sys_ni_syscall /* sys_timerfd */ .quad sys_eventfd .quad sys_recvmmsg - .quad sys_fallocate /* 480 */ + .quad sys_fallocate /* 480 */ .quad sys_timerfd_create .quad sys_timerfd_settime .quad sys_timerfd_gettime .quad sys_signalfd4 - .quad sys_eventfd2 /* 485 */ + .quad sys_eventfd2 /* 485 */ .quad sys_epoll_create1 .quad sys_dup3 .quad sys_pipe2 .quad sys_inotify_init1 - .quad sys_preadv /* 490 */ + .quad sys_preadv /* 490 */ .quad sys_pwritev .quad sys_rt_tgsigqueueinfo .quad sys_perf_event_open .quad sys_fanotify_init - .quad sys_fanotify_mark /* 495 */ + .quad sys_fanotify_mark /* 495 */ .quad sys_prlimit64 - .quad sys_name_to_handle_at - .quad sys_open_by_handle_at - .quad sys_clock_adjtime - .quad sys_syncfs /* 500 */ .size sys_call_table, . - sys_call_table .type sys_call_table, @object diff --git a/trunk/arch/alpha/kernel/time.c b/trunk/arch/alpha/kernel/time.c index 818e74ed45dc..918e8e0b72ff 100644 --- a/trunk/arch/alpha/kernel/time.c +++ b/trunk/arch/alpha/kernel/time.c @@ -375,7 +375,8 @@ static struct clocksource clocksource_rpcc = { static inline void register_rpcc_clocksource(long cycle_freq) { - clocksource_register_hz(&clocksource_rpcc, cycle_freq); + clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4); + clocksource_register(&clocksource_rpcc); } #else /* !CONFIG_SMP */ static inline void register_rpcc_clocksource(long cycle_freq) diff --git a/trunk/arch/arm/boot/compressed/Makefile b/trunk/arch/arm/boot/compressed/Makefile index 0c6852d93506..8ebbb511c783 100644 --- a/trunk/arch/arm/boot/compressed/Makefile +++ b/trunk/arch/arm/boot/compressed/Makefile @@ -74,7 +74,7 @@ ZTEXTADDR := $(CONFIG_ZBOOT_ROM_TEXT) ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) else ZTEXTADDR := 0 -ZBSSADDR := ALIGN(8) +ZBSSADDR := ALIGN(4) endif SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ diff --git a/trunk/arch/arm/boot/compressed/head.S b/trunk/arch/arm/boot/compressed/head.S index 49f5b2eaaa87..adf583cd0c35 100644 --- a/trunk/arch/arm/boot/compressed/head.S +++ b/trunk/arch/arm/boot/compressed/head.S @@ -179,14 +179,15 @@ not_angel: bl cache_on restart: adr r0, LC0 - ldmia r0, {r1, r2, r3, r6, r9, r11, r12} - ldr sp, [r0, #28] + ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12} + ldr sp, [r0, #32] /* * We might be running at a different address. We need * to fix up various pointers. */ sub r0, r0, r1 @ calculate the delta offset + add r5, r5, r0 @ _start add r6, r6, r0 @ _edata #ifndef CONFIG_ZBOOT_ROM @@ -205,40 +206,31 @@ restart: adr r0, LC0 /* * Check to see if we will overwrite ourselves. * r4 = final kernel address + * r5 = start of this image * r9 = size of decompressed image * r10 = end of this image, including bss/stack/malloc space if non XIP * We basically want: - * r4 - 16k page directory >= r10 -> OK - * r4 + image length <= current position (pc) -> OK + * r4 >= r10 -> OK + * r4 + image length <= r5 -> OK */ - add r10, r10, #16384 cmp r4, r10 bhs wont_overwrite add r10, r4, r9 - ARM( cmp r10, pc ) - THUMB( mov lr, pc ) - THUMB( cmp r10, lr ) + cmp r10, r5 bls wont_overwrite /* * Relocate ourselves past the end of the decompressed kernel. + * r5 = start of this image * r6 = _edata * r10 = end of the decompressed kernel * Because we always copy ahead, we need to do it from the end and go * backward in case the source and destination overlap. */ - /* - * Bump to the next 256-byte boundary with the size of - * the relocation code added. This avoids overwriting - * ourself when the offset is small. - */ - add r10, r10, #((reloc_code_end - restart + 256) & ~255) + /* Round up to next 256-byte boundary. */ + add r10, r10, #256 bic r10, r10, #255 - /* Get start of code we want to copy and align it down. */ - adr r5, restart - bic r5, r5, #31 - sub r9, r6, r5 @ size to copy add r9, r9, #31 @ rounded up to a multiple bic r9, r9, #31 @ ... of 32 bytes @@ -253,11 +245,6 @@ restart: adr r0, LC0 /* Preserve offset to relocated code. */ sub r6, r9, r6 -#ifndef CONFIG_ZBOOT_ROM - /* cache_clean_flush may use the stack, so relocate it */ - add sp, sp, r6 -#endif - bl cache_clean_flush adr r0, BSYM(restart) @@ -346,6 +333,7 @@ not_relocated: mov r0, #0 LC0: .word LC0 @ r1 .word __bss_start @ r2 .word _end @ r3 + .word _start @ r5 .word _edata @ r6 .word _image_size @ r9 .word _got_start @ r11 @@ -1074,7 +1062,6 @@ memdump: mov r12, r0 #endif .ltorg -reloc_code_end: .align .section ".stack", "aw", %nobits diff --git a/trunk/arch/arm/boot/compressed/vmlinux.lds.in b/trunk/arch/arm/boot/compressed/vmlinux.lds.in index ea80abe78844..5309909d7282 100644 --- a/trunk/arch/arm/boot/compressed/vmlinux.lds.in +++ b/trunk/arch/arm/boot/compressed/vmlinux.lds.in @@ -54,7 +54,6 @@ SECTIONS .bss : { *(.bss) } _end = .; - . = ALIGN(8); /* the stack must be 64-bit aligned */ .stack : { *(.stack) } .stab 0 : { *(.stab) } diff --git a/trunk/arch/arm/common/vic.c b/trunk/arch/arm/common/vic.c index 7aa4262ada7a..113085a77123 100644 --- a/trunk/arch/arm/common/vic.c +++ b/trunk/arch/arm/common/vic.c @@ -22,16 +22,17 @@ #include #include #include -#include +#include #include #include #include #include -#ifdef CONFIG_PM +#if defined(CONFIG_PM) /** * struct vic_device - VIC PM device + * @sysdev: The system device which is registered. * @irq: The IRQ number for the base of the VIC. * @base: The register base for the VIC. * @resume_sources: A bitmask of interrupts for resume. @@ -42,6 +43,8 @@ * @protect: Save for VIC_PROTECT. */ struct vic_device { + struct sys_device sysdev; + void __iomem *base; int irq; u32 resume_sources; @@ -56,6 +59,11 @@ struct vic_device { static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; static int vic_id; + +static inline struct vic_device *to_vic(struct sys_device *sys) +{ + return container_of(sys, struct vic_device, sysdev); +} #endif /* CONFIG_PM */ /** @@ -77,9 +85,10 @@ static void vic_init2(void __iomem *base) writel(32, base + VIC_PL190_DEF_VECT_ADDR); } -#ifdef CONFIG_PM -static void resume_one_vic(struct vic_device *vic) +#if defined(CONFIG_PM) +static int vic_class_resume(struct sys_device *dev) { + struct vic_device *vic = to_vic(dev); void __iomem *base = vic->base; printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base); @@ -98,18 +107,13 @@ static void resume_one_vic(struct vic_device *vic) writel(vic->soft_int, base + VIC_INT_SOFT); writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); -} - -static void vic_resume(void) -{ - int id; - for (id = vic_id - 1; id >= 0; id--) - resume_one_vic(vic_devices + id); + return 0; } -static void suspend_one_vic(struct vic_device *vic) +static int vic_class_suspend(struct sys_device *dev, pm_message_t state) { + struct vic_device *vic = to_vic(dev); void __iomem *base = vic->base; printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base); @@ -124,21 +128,14 @@ static void suspend_one_vic(struct vic_device *vic) writel(vic->resume_irqs, base + VIC_INT_ENABLE); writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); -} - -static int vic_suspend(void) -{ - int id; - - for (id = 0; id < vic_id; id++) - suspend_one_vic(vic_devices + id); return 0; } -struct syscore_ops vic_syscore_ops = { - .suspend = vic_suspend, - .resume = vic_resume, +struct sysdev_class vic_class = { + .name = "vic", + .suspend = vic_class_suspend, + .resume = vic_class_resume, }; /** @@ -150,8 +147,30 @@ struct syscore_ops vic_syscore_ops = { */ static int __init vic_pm_init(void) { - if (vic_id > 0) - register_syscore_ops(&vic_syscore_ops); + struct vic_device *dev = vic_devices; + int err; + int id; + + if (vic_id == 0) + return 0; + + err = sysdev_class_register(&vic_class); + if (err) { + printk(KERN_ERR "%s: cannot register class\n", __func__); + return err; + } + + for (id = 0; id < vic_id; id++, dev++) { + dev->sysdev.id = id; + dev->sysdev.cls = &vic_class; + + err = sysdev_register(&dev->sysdev); + if (err) { + printk(KERN_ERR "%s: failed to register device\n", + __func__); + return err; + } + } return 0; } diff --git a/trunk/arch/arm/configs/at91x40_defconfig b/trunk/arch/arm/configs/at91x40_defconfig deleted file mode 100644 index c55e9212fcbb..000000000000 --- a/trunk/arch/arm/configs/at91x40_defconfig +++ /dev/null @@ -1,48 +0,0 @@ -CONFIG_EXPERIMENTAL=y -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_EMBEDDED=y -# CONFIG_HOTPLUG is not set -# CONFIG_ELF_CORE is not set -# CONFIG_FUTEX is not set -# CONFIG_TIMERFD is not set -# CONFIG_VM_EVENT_COUNTERS is not set -# CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y -# CONFIG_LBDAF is not set -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set -# CONFIG_MMU is not set -CONFIG_ARCH_AT91=y -CONFIG_ARCH_AT91X40=y -CONFIG_MACH_AT91EB01=y -CONFIG_AT91_EARLY_USART0=y -CONFIG_CPU_ARM7TDMI=y -CONFIG_SET_MEM_PARAM=y -CONFIG_DRAM_BASE=0x01000000 -CONFIG_DRAM_SIZE=0x00400000 -CONFIG_FLASH_MEM_BASE=0x01400000 -CONFIG_PROCESSOR_ID=0x14000040 -CONFIG_ZBOOT_ROM_TEXT=0x0 -CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_BINFMT_FLAT=y -# CONFIG_SUSPEND is not set -# CONFIG_FW_LOADER is not set -CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_CHAR=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_RAM=y -CONFIG_MTD_ROM=y -CONFIG_BLK_DEV_RAM=y -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -# CONFIG_DEVKMEM is not set -# CONFIG_HW_RANDOM is not set -# CONFIG_HWMON is not set -# CONFIG_USB_SUPPORT is not set -CONFIG_EXT2_FS=y -# CONFIG_DNOTIFY is not set -CONFIG_ROMFS_FS=y -# CONFIG_ENABLE_MUST_CHECK is not set diff --git a/trunk/arch/arm/include/asm/i8253.h b/trunk/arch/arm/include/asm/i8253.h deleted file mode 100644 index 70656b69d5ce..000000000000 --- a/trunk/arch/arm/include/asm/i8253.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef __ASMARM_I8253_H -#define __ASMARM_I8253_H - -/* i8253A PIT registers */ -#define PIT_MODE 0x43 -#define PIT_CH0 0x40 - -#define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) - -extern raw_spinlock_t i8253_lock; - -#define outb_pit outb_p -#define inb_pit inb_p - -#endif diff --git a/trunk/arch/arm/include/asm/kprobes.h b/trunk/arch/arm/include/asm/kprobes.h index e46bdd0097eb..bb8a19bd5822 100644 --- a/trunk/arch/arm/include/asm/kprobes.h +++ b/trunk/arch/arm/include/asm/kprobes.h @@ -39,13 +39,10 @@ typedef u32 kprobe_opcode_t; struct kprobe; typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); -typedef unsigned long (kprobe_check_cc)(unsigned long); - /* Architecture specific copy of original instruction. */ struct arch_specific_insn { kprobe_opcode_t *insn; kprobe_insn_handler_t *insn_handler; - kprobe_check_cc *insn_check_cc; }; struct prev_kprobe { diff --git a/trunk/arch/arm/include/asm/mach/time.h b/trunk/arch/arm/include/asm/mach/time.h index d5adaae5ee2c..883f6be5117a 100644 --- a/trunk/arch/arm/include/asm/mach/time.h +++ b/trunk/arch/arm/include/asm/mach/time.h @@ -34,6 +34,7 @@ * timer interrupt which may be pending. */ struct sys_timer { + struct sys_device dev; void (*init)(void); void (*suspend)(void); void (*resume)(void); diff --git a/trunk/arch/arm/include/asm/system.h b/trunk/arch/arm/include/asm/system.h index 832888d0c20c..885be097769d 100644 --- a/trunk/arch/arm/include/asm/system.h +++ b/trunk/arch/arm/include/asm/system.h @@ -159,7 +159,7 @@ extern unsigned int user_debug; #include #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) #define mb() do { dsb(); outer_sync(); } while (0) -#define rmb() dsb() +#define rmb() dmb() #define wmb() mb() #else #include diff --git a/trunk/arch/arm/kernel/kprobes-decode.c b/trunk/arch/arm/kernel/kprobes-decode.c index 15eeff6aea0e..23891317dc4b 100644 --- a/trunk/arch/arm/kernel/kprobes-decode.c +++ b/trunk/arch/arm/kernel/kprobes-decode.c @@ -34,6 +34,9 @@ * * *) If the PC is written to by the instruction, the * instruction must be fully simulated in software. + * If it is a conditional instruction, the handler + * will use insn[0] to copy its condition code to + * set r0 to 1 and insn[1] to "mov pc, lr" to return. * * *) Otherwise, a modified form of the instruction is * directly executed. Its handler calls the @@ -65,17 +68,13 @@ #define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25) -#define is_r15(insn, bitpos) (((insn) & (0xf << bitpos)) == (0xf << bitpos)) - -/* - * Test if load/store instructions writeback the address register. - * if P (bit 24) == 0 or W (bit 21) == 1 - */ -#define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000) - #define PSR_fs (PSR_f|PSR_s) #define KPROBE_RETURN_INSTRUCTION 0xe1a0f00e /* mov pc, lr */ +#define SET_R0_TRUE_INSTRUCTION 0xe3a00001 /* mov r0, #1 */ + +#define truecc_insn(insn) (((insn) & 0xf0000000) | \ + (SET_R0_TRUE_INSTRUCTION & 0x0fffffff)) typedef long (insn_0arg_fn_t)(void); typedef long (insn_1arg_fn_t)(long); @@ -420,10 +419,14 @@ insnslot_llret_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr, static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs) { + insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; long iaddr = (long)p->addr; int disp = branch_displacement(insn); + if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) + return; + if (insn & (1 << 24)) regs->ARM_lr = iaddr + 4; @@ -443,10 +446,14 @@ static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs) static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs) { + insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rm = insn & 0xf; long rmv = regs->uregs[rm]; + if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) + return; + if (insn & (1 << 5)) regs->ARM_lr = (long)p->addr + 4; @@ -456,16 +463,9 @@ static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs) regs->ARM_cpsr |= PSR_T_BIT; } -static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs) -{ - kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 12) & 0xf; - unsigned long mask = 0xf8ff03df; /* Mask out execution state */ - regs->uregs[rd] = regs->ARM_cpsr & mask; -} - static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs) { + insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rn = (insn >> 16) & 0xf; int lbit = insn & (1 << 20); @@ -476,6 +476,9 @@ static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs) int reg_bit_vector; int reg_count; + if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) + return; + reg_count = 0; reg_bit_vector = insn & 0xffff; while (reg_bit_vector) { @@ -507,6 +510,11 @@ static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs) static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs) { + insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; + + if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) + return; + regs->ARM_pc = (long)p->addr + str_pc_offset; simulate_ldm1stm1(p, regs); regs->ARM_pc = (long)p->addr + 4; @@ -517,16 +525,24 @@ static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs) regs->uregs[12] = regs->uregs[13]; } +static void __kprobes emulate_ldcstc(struct kprobe *p, struct pt_regs *regs) +{ + insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; + kprobe_opcode_t insn = p->opcode; + int rn = (insn >> 16) & 0xf; + long rnv = regs->uregs[rn]; + + /* Save Rn in case of writeback. */ + regs->uregs[rn] = insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn); +} + static void __kprobes emulate_ldrd(struct kprobe *p, struct pt_regs *regs) { insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; - long ppc = (long)p->addr + 8; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; /* rm may be invalid, don't care. */ - long rmv = (rm == 15) ? ppc : regs->uregs[rm]; - long rnv = (rn == 15) ? ppc : regs->uregs[rn]; /* Not following the C calling convention here, so need asm(). */ __asm__ __volatile__ ( @@ -538,36 +554,29 @@ static void __kprobes emulate_ldrd(struct kprobe *p, struct pt_regs *regs) "str r0, %[rn] \n\t" /* in case of writeback */ "str r2, %[rd0] \n\t" "str r3, %[rd1] \n\t" - : [rn] "+m" (rnv), + : [rn] "+m" (regs->uregs[rn]), [rd0] "=m" (regs->uregs[rd]), [rd1] "=m" (regs->uregs[rd+1]) - : [rm] "m" (rmv), + : [rm] "m" (regs->uregs[rm]), [cpsr] "r" (regs->ARM_cpsr), [i_fn] "r" (i_fn) : "r0", "r1", "r2", "r3", "lr", "cc" ); - if (is_writeback(insn)) - regs->uregs[rn] = rnv; } static void __kprobes emulate_strd(struct kprobe *p, struct pt_regs *regs) { insn_4arg_fn_t *i_fn = (insn_4arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; - long ppc = (long)p->addr + 8; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; - long rnv = (rn == 15) ? ppc : regs->uregs[rn]; - /* rm/rmv may be invalid, don't care. */ - long rmv = (rm == 15) ? ppc : regs->uregs[rm]; - long rnv_wb; + long rnv = regs->uregs[rn]; + long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ - rnv_wb = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd], + regs->uregs[rn] = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd], regs->uregs[rd+1], regs->ARM_cpsr, i_fn); - if (is_writeback(insn)) - regs->uregs[rn] = rnv_wb; } static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs) @@ -621,6 +630,31 @@ static void __kprobes emulate_str(struct kprobe *p, struct pt_regs *regs) regs->uregs[rn] = rnv_wb; /* Save Rn in case of writeback. */ } +static void __kprobes emulate_mrrc(struct kprobe *p, struct pt_regs *regs) +{ + insn_llret_0arg_fn_t *i_fn = (insn_llret_0arg_fn_t *)&p->ainsn.insn[0]; + kprobe_opcode_t insn = p->opcode; + union reg_pair fnr; + int rd = (insn >> 12) & 0xf; + int rn = (insn >> 16) & 0xf; + + fnr.dr = insnslot_llret_0arg_rflags(regs->ARM_cpsr, i_fn); + regs->uregs[rn] = fnr.r0; + regs->uregs[rd] = fnr.r1; +} + +static void __kprobes emulate_mcrr(struct kprobe *p, struct pt_regs *regs) +{ + insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; + kprobe_opcode_t insn = p->opcode; + int rd = (insn >> 12) & 0xf; + int rn = (insn >> 16) & 0xf; + long rnv = regs->uregs[rn]; + long rdv = regs->uregs[rd]; + + insnslot_2arg_rflags(rnv, rdv, regs->ARM_cpsr, i_fn); +} + static void __kprobes emulate_sat(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; @@ -654,32 +688,32 @@ static void __kprobes emulate_none(struct kprobe *p, struct pt_regs *regs) insnslot_0arg_rflags(regs->ARM_cpsr, i_fn); } -static void __kprobes emulate_nop(struct kprobe *p, struct pt_regs *regs) +static void __kprobes emulate_rd12(struct kprobe *p, struct pt_regs *regs) { + insn_0arg_fn_t *i_fn = (insn_0arg_fn_t *)&p->ainsn.insn[0]; + kprobe_opcode_t insn = p->opcode; + int rd = (insn >> 12) & 0xf; + + regs->uregs[rd] = insnslot_0arg_rflags(regs->ARM_cpsr, i_fn); } -static void __kprobes -emulate_rd12_modify(struct kprobe *p, struct pt_regs *regs) +static void __kprobes emulate_ird12(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 12) & 0xf; - long rdv = regs->uregs[rd]; + int ird = (insn >> 12) & 0xf; - regs->uregs[rd] = insnslot_1arg_rflags(rdv, regs->ARM_cpsr, i_fn); + insnslot_1arg_rflags(regs->uregs[ird], regs->ARM_cpsr, i_fn); } -static void __kprobes -emulate_rd12rn0_modify(struct kprobe *p, struct pt_regs *regs) +static void __kprobes emulate_rn16(struct kprobe *p, struct pt_regs *regs) { - insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; + insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 12) & 0xf; - int rn = insn & 0xf; - long rdv = regs->uregs[rd]; + int rn = (insn >> 16) & 0xf; long rnv = regs->uregs[rn]; - regs->uregs[rd] = insnslot_2arg_rflags(rdv, rnv, regs->ARM_cpsr, i_fn); + insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn); } static void __kprobes emulate_rd12rm0(struct kprobe *p, struct pt_regs *regs) @@ -784,17 +818,6 @@ emulate_alu_imm_rwflags(struct kprobe *p, struct pt_regs *regs) regs->uregs[rd] = insnslot_1arg_rwflags(rnv, ®s->ARM_cpsr, i_fn); } -static void __kprobes -emulate_alu_tests_imm(struct kprobe *p, struct pt_regs *regs) -{ - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rn = (insn >> 16) & 0xf; - long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn]; - - insnslot_1arg_rwflags(rnv, ®s->ARM_cpsr, i_fn); -} - static void __kprobes emulate_alu_rflags(struct kprobe *p, struct pt_regs *regs) { @@ -831,34 +854,14 @@ emulate_alu_rwflags(struct kprobe *p, struct pt_regs *regs) insnslot_3arg_rwflags(rnv, rmv, rsv, ®s->ARM_cpsr, i_fn); } -static void __kprobes -emulate_alu_tests(struct kprobe *p, struct pt_regs *regs) -{ - insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - long ppc = (long)p->addr + 8; - int rn = (insn >> 16) & 0xf; - int rs = (insn >> 8) & 0xf; /* rs/rsv may be invalid, don't care. */ - int rm = insn & 0xf; - long rnv = (rn == 15) ? ppc : regs->uregs[rn]; - long rmv = (rm == 15) ? ppc : regs->uregs[rm]; - long rsv = regs->uregs[rs]; - - insnslot_3arg_rwflags(rnv, rmv, rsv, ®s->ARM_cpsr, i_fn); -} - static enum kprobe_insn __kprobes prep_emulate_ldr_str(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - int not_imm = (insn & (1 << 26)) ? (insn & (1 << 25)) - : (~insn & (1 << 22)); - - if (is_writeback(insn) && is_r15(insn, 16)) - return INSN_REJECTED; /* Writeback to PC */ + int ibit = (insn & (1 << 26)) ? 25 : 22; insn &= 0xfff00fff; insn |= 0x00001000; /* Rn = r0, Rd = r1 */ - if (not_imm) { + if (insn & (1 << ibit)) { insn &= ~0xf; insn |= 2; /* Rm = r2 */ } @@ -868,40 +871,20 @@ prep_emulate_ldr_str(kprobe_opcode_t insn, struct arch_specific_insn *asi) } static enum kprobe_insn __kprobes -prep_emulate_rd12_modify(kprobe_opcode_t insn, struct arch_specific_insn *asi) -{ - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ - - insn &= 0xffff0fff; /* Rd = r0 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_rd12_modify; - return INSN_GOOD; -} - -static enum kprobe_insn __kprobes -prep_emulate_rd12rn0_modify(kprobe_opcode_t insn, - struct arch_specific_insn *asi) +prep_emulate_rd12rm0(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ - - insn &= 0xffff0ff0; /* Rd = r0 */ - insn |= 0x00000001; /* Rn = r1 */ + insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */ asi->insn[0] = insn; - asi->insn_handler = emulate_rd12rn0_modify; + asi->insn_handler = emulate_rd12rm0; return INSN_GOOD; } static enum kprobe_insn __kprobes -prep_emulate_rd12rm0(kprobe_opcode_t insn, struct arch_specific_insn *asi) +prep_emulate_rd12(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ - - insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */ + insn &= 0xffff0fff; /* Rd = r0 */ asi->insn[0] = insn; - asi->insn_handler = emulate_rd12rm0; + asi->insn_handler = emulate_rd12; return INSN_GOOD; } @@ -909,9 +892,6 @@ static enum kprobe_insn __kprobes prep_emulate_rd12rn16rm0_wflags(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ - insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */ insn |= 0x00000001; /* Rm = r1 */ asi->insn[0] = insn; @@ -923,9 +903,6 @@ static enum kprobe_insn __kprobes prep_emulate_rd16rs8rm0_wflags(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - if (is_r15(insn, 16)) - return INSN_REJECTED; /* Rd is PC */ - insn &= 0xfff0f0f0; /* Rd = r0, Rs = r0 */ insn |= 0x00000001; /* Rm = r1 */ asi->insn[0] = insn; @@ -937,9 +914,6 @@ static enum kprobe_insn __kprobes prep_emulate_rd16rn12rs8rm0_wflags(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - if (is_r15(insn, 16)) - return INSN_REJECTED; /* Rd is PC */ - insn &= 0xfff000f0; /* Rd = r0, Rn = r0 */ insn |= 0x00000102; /* Rs = r1, Rm = r2 */ asi->insn[0] = insn; @@ -951,9 +925,6 @@ static enum kprobe_insn __kprobes prep_emulate_rdhi16rdlo12rs8rm0_wflags(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - if (is_r15(insn, 16) || is_r15(insn, 12)) - return INSN_REJECTED; /* RdHi or RdLo is PC */ - insn &= 0xfff000f0; /* RdHi = r0, RdLo = r1 */ insn |= 0x00001203; /* Rs = r2, Rm = r3 */ asi->insn[0] = insn; @@ -974,13 +945,20 @@ prep_emulate_rdhi16rdlo12rs8rm0_wflags(kprobe_opcode_t insn, static enum kprobe_insn __kprobes space_1111(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - /* memory hint : 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx : */ - /* PLDI : 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx : */ - /* PLDW : 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx : */ - /* PLD : 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx : */ - if ((insn & 0xfe300000) == 0xf4100000) { - asi->insn_handler = emulate_nop; - return INSN_GOOD_NO_SLOT; + /* CPS mmod == 1 : 1111 0001 0000 xx10 xxxx xxxx xx0x xxxx */ + /* RFE : 1111 100x x0x1 xxxx xxxx 1010 xxxx xxxx */ + /* SRS : 1111 100x x1x0 1101 xxxx 0101 xxxx xxxx */ + if ((insn & 0xfff30020) == 0xf1020000 || + (insn & 0xfe500f00) == 0xf8100a00 || + (insn & 0xfe5f0f00) == 0xf84d0500) + return INSN_REJECTED; + + /* PLD : 1111 01x1 x101 xxxx xxxx xxxx xxxx xxxx : */ + if ((insn & 0xfd700000) == 0xf4500000) { + insn &= 0xfff0ffff; /* Rn = r0 */ + asi->insn[0] = insn; + asi->insn_handler = emulate_rn16; + return INSN_GOOD; } /* BLX(1) : 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx : */ @@ -989,22 +967,41 @@ space_1111(kprobe_opcode_t insn, struct arch_specific_insn *asi) return INSN_GOOD_NO_SLOT; } - /* CPS : 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */ - /* SETEND: 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */ - - /* SRS : 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */ - /* RFE : 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ + /* SETEND : 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */ + /* CDP2 : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ + if ((insn & 0xffff00f0) == 0xf1010000 || + (insn & 0xff000010) == 0xfe000000) { + asi->insn[0] = insn; + asi->insn_handler = emulate_none; + return INSN_GOOD; + } - /* Coprocessor instructions... */ /* MCRR2 : 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */ /* MRRC2 : 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */ - /* LDC2 : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ - /* STC2 : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ - /* CDP2 : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ - /* MCR2 : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ - /* MRC2 : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ + if ((insn & 0xffe00000) == 0xfc400000) { + insn &= 0xfff00fff; /* Rn = r0 */ + insn |= 0x00001000; /* Rd = r1 */ + asi->insn[0] = insn; + asi->insn_handler = + (insn & (1 << 20)) ? emulate_mrrc : emulate_mcrr; + return INSN_GOOD; + } - return INSN_REJECTED; + /* LDC2 : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ + /* STC2 : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ + if ((insn & 0xfe000000) == 0xfc000000) { + insn &= 0xfff0ffff; /* Rn = r0 */ + asi->insn[0] = insn; + asi->insn_handler = emulate_ldcstc; + return INSN_GOOD; + } + + /* MCR2 : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ + /* MRC2 : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ + insn &= 0xffff0fff; /* Rd = r0 */ + asi->insn[0] = insn; + asi->insn_handler = (insn & (1 << 20)) ? emulate_rd12 : emulate_ird12; + return INSN_GOOD; } static enum kprobe_insn __kprobes @@ -1013,18 +1010,19 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx xxx0 xxxx */ if ((insn & 0x0f900010) == 0x01000000) { - /* MRS cpsr : cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */ - if ((insn & 0x0ff000f0) == 0x01000000) { - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ - asi->insn_handler = simulate_mrs; - return INSN_GOOD_NO_SLOT; - } + /* BXJ : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */ + /* MSR : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */ + if ((insn & 0x0ff000f0) == 0x01200020 || + (insn & 0x0fb000f0) == 0x01200000) + return INSN_REJECTED; + + /* MRS : cccc 0001 0x00 xxxx xxxx xxxx 0000 xxxx */ + if ((insn & 0x0fb00010) == 0x01000000) + return prep_emulate_rd12(insn, asi); /* SMLALxy : cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */ if ((insn & 0x0ff00090) == 0x01400080) - return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, - asi); + return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi); /* SMULWy : cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */ /* SMULxy : cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */ @@ -1033,29 +1031,24 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) return prep_emulate_rd16rs8rm0_wflags(insn, asi); /* SMLAxy : cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx : Q */ - /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx : Q */ - if ((insn & 0x0ff00090) == 0x01000080 || - (insn & 0x0ff000b0) == 0x01200080) - return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); - - /* BXJ : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */ - /* MSR : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */ - /* MRS spsr : cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */ + /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 0x00 xxxx : Q */ + return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); - /* Other instruction encodings aren't yet defined */ - return INSN_REJECTED; } /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx 0xx1 xxxx */ else if ((insn & 0x0f900090) == 0x01000010) { + /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ + if ((insn & 0xfff000f0) == 0xe1200070) + return INSN_REJECTED; + /* BLX(2) : cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */ /* BX : cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */ if ((insn & 0x0ff000d0) == 0x01200010) { - if ((insn & 0x0ff000ff) == 0x0120003f) - return INSN_REJECTED; /* BLX pc */ + asi->insn[0] = truecc_insn(insn); asi->insn_handler = simulate_blx2bx; - return INSN_GOOD_NO_SLOT; + return INSN_GOOD; } /* CLZ : cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */ @@ -1066,27 +1059,17 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* QSUB : cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx :Q */ /* QDADD : cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx :Q */ /* QDSUB : cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx :Q */ - if ((insn & 0x0f9000f0) == 0x01000050) - return prep_emulate_rd12rn16rm0_wflags(insn, asi); - - /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ - /* SMC : cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */ - - /* Other instruction encodings aren't yet defined */ - return INSN_REJECTED; + return prep_emulate_rd12rn16rm0_wflags(insn, asi); } /* cccc 0000 xxxx xxxx xxxx xxxx xxxx 1001 xxxx */ - else if ((insn & 0x0f0000f0) == 0x00000090) { + else if ((insn & 0x0f000090) == 0x00000090) { /* MUL : cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx : */ /* MULS : cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx :cc */ /* MLA : cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx : */ /* MLAS : cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx :cc */ /* UMAAL : cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx : */ - /* undef : cccc 0000 0101 xxxx xxxx xxxx 1001 xxxx : */ - /* MLS : cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx : */ - /* undef : cccc 0000 0111 xxxx xxxx xxxx 1001 xxxx : */ /* UMULL : cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx : */ /* UMULLS : cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx :cc */ /* UMLAL : cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx : */ @@ -1095,15 +1078,13 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* SMULLS : cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx :cc */ /* SMLAL : cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx : */ /* SMLALS : cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx :cc */ - if ((insn & 0x00d00000) == 0x00500000) - return INSN_REJECTED; - else if ((insn & 0x00e00000) == 0x00000000) - return prep_emulate_rd16rs8rm0_wflags(insn, asi); - else if ((insn & 0x00a00000) == 0x00200000) - return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); - else - return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, - asi); + if ((insn & 0x0fe000f0) == 0x00000090) { + return prep_emulate_rd16rs8rm0_wflags(insn, asi); + } else if ((insn & 0x0fe000f0) == 0x00200090) { + return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); + } else { + return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi); + } } /* cccc 000x xxxx xxxx xxxx xxxx xxxx 1xx1 xxxx */ @@ -1111,45 +1092,23 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* SWP : cccc 0001 0000 xxxx xxxx xxxx 1001 xxxx */ /* SWPB : cccc 0001 0100 xxxx xxxx xxxx 1001 xxxx */ - /* ??? : cccc 0001 0x01 xxxx xxxx xxxx 1001 xxxx */ - /* ??? : cccc 0001 0x10 xxxx xxxx xxxx 1001 xxxx */ - /* ??? : cccc 0001 0x11 xxxx xxxx xxxx 1001 xxxx */ - /* STREX : cccc 0001 1000 xxxx xxxx xxxx 1001 xxxx */ - /* LDREX : cccc 0001 1001 xxxx xxxx xxxx 1001 xxxx */ - /* STREXD: cccc 0001 1010 xxxx xxxx xxxx 1001 xxxx */ - /* LDREXD: cccc 0001 1011 xxxx xxxx xxxx 1001 xxxx */ - /* STREXB: cccc 0001 1100 xxxx xxxx xxxx 1001 xxxx */ - /* LDREXB: cccc 0001 1101 xxxx xxxx xxxx 1001 xxxx */ - /* STREXH: cccc 0001 1110 xxxx xxxx xxxx 1001 xxxx */ - /* LDREXH: cccc 0001 1111 xxxx xxxx xxxx 1001 xxxx */ - /* LDRD : cccc 000x xxx0 xxxx xxxx xxxx 1101 xxxx */ /* STRD : cccc 000x xxx0 xxxx xxxx xxxx 1111 xxxx */ + /* STREX : cccc 0001 1000 xxxx xxxx xxxx 1001 xxxx */ + /* LDREX : cccc 0001 1001 xxxx xxxx xxxx 1001 xxxx */ /* LDRH : cccc 000x xxx1 xxxx xxxx xxxx 1011 xxxx */ /* STRH : cccc 000x xxx0 xxxx xxxx xxxx 1011 xxxx */ /* LDRSB : cccc 000x xxx1 xxxx xxxx xxxx 1101 xxxx */ /* LDRSH : cccc 000x xxx1 xxxx xxxx xxxx 1111 xxxx */ - if ((insn & 0x0f0000f0) == 0x01000090) { - if ((insn & 0x0fb000f0) == 0x01000090) { - /* SWP/SWPB */ - return prep_emulate_rd12rn16rm0_wflags(insn, - asi); - } else { - /* STREX/LDREX variants and unallocaed space */ - return INSN_REJECTED; - } - + if ((insn & 0x0fb000f0) == 0x01000090) { + /* SWP/SWPB */ + return prep_emulate_rd12rn16rm0_wflags(insn, asi); } else if ((insn & 0x0e1000d0) == 0x00000d0) { /* STRD/LDRD */ - if ((insn & 0x0000e000) == 0x0000e000) - return INSN_REJECTED; /* Rd is LR or PC */ - if (is_writeback(insn) && is_r15(insn, 16)) - return INSN_REJECTED; /* Writeback to PC */ - insn &= 0xfff00fff; insn |= 0x00002000; /* Rn = r0, Rd = r2 */ - if (!(insn & (1 << 22))) { - /* Register index */ + if (insn & (1 << 22)) { + /* I bit */ insn &= ~0xf; insn |= 1; /* Rm = r1 */ } @@ -1159,9 +1118,6 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) return INSN_GOOD; } - /* LDRH/STRH/LDRSB/LDRSH */ - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ return prep_emulate_ldr_str(insn, asi); } @@ -1169,7 +1125,7 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* * ALU op with S bit and Rd == 15 : - * cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx + * cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx */ if ((insn & 0x0e10f000) == 0x0010f000) return INSN_REJECTED; @@ -1198,61 +1154,22 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) insn |= 0x00000200; /* Rs = r2 */ } asi->insn[0] = insn; - - if ((insn & 0x0f900000) == 0x01100000) { - /* - * TST : cccc 0001 0001 xxxx xxxx xxxx xxxx xxxx - * TEQ : cccc 0001 0011 xxxx xxxx xxxx xxxx xxxx - * CMP : cccc 0001 0101 xxxx xxxx xxxx xxxx xxxx - * CMN : cccc 0001 0111 xxxx xxxx xxxx xxxx xxxx - */ - asi->insn_handler = emulate_alu_tests; - } else { - /* ALU ops which write to Rd */ - asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ + asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ emulate_alu_rwflags : emulate_alu_rflags; - } return INSN_GOOD; } static enum kprobe_insn __kprobes space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - /* MOVW : cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */ - /* MOVT : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */ - if ((insn & 0x0fb00000) == 0x03000000) - return prep_emulate_rd12_modify(insn, asi); - - /* hints : cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */ - if ((insn & 0x0fff0000) == 0x03200000) { - unsigned op2 = insn & 0x000000ff; - if (op2 == 0x01 || op2 == 0x04) { - /* YIELD : cccc 0011 0010 0000 xxxx xxxx 0000 0001 */ - /* SEV : cccc 0011 0010 0000 xxxx xxxx 0000 0100 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_none; - return INSN_GOOD; - } else if (op2 <= 0x03) { - /* NOP : cccc 0011 0010 0000 xxxx xxxx 0000 0000 */ - /* WFE : cccc 0011 0010 0000 xxxx xxxx 0000 0010 */ - /* WFI : cccc 0011 0010 0000 xxxx xxxx 0000 0011 */ - /* - * We make WFE and WFI true NOPs to avoid stalls due - * to missing events whilst processing the probe. - */ - asi->insn_handler = emulate_nop; - return INSN_GOOD_NO_SLOT; - } - /* For DBG and unallocated hints it's safest to reject them */ - return INSN_REJECTED; - } - /* * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx + * Undef : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx * ALU op with S bit and Rd == 15 : * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx */ if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */ + (insn & 0x0ff00000) == 0x03400000 || /* Undef */ (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ return INSN_REJECTED; @@ -1263,22 +1180,10 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) * *S (bit 20) updates condition codes * ADC/SBC/RSC reads the C flag */ - insn &= 0xfff00fff; /* Rn = r0 and Rd = r0 */ + insn &= 0xffff0fff; /* Rd = r0 */ asi->insn[0] = insn; - - if ((insn & 0x0f900000) == 0x03100000) { - /* - * TST : cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx - * TEQ : cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx - * CMP : cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx - * CMN : cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx - */ - asi->insn_handler = emulate_alu_tests_imm; - } else { - /* ALU ops which write to Rd */ - asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ + asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ emulate_alu_imm_rwflags : emulate_alu_imm_rflags; - } return INSN_GOOD; } @@ -1287,8 +1192,6 @@ space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* SEL : cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx GE: !!! */ if ((insn & 0x0ff000f0) == 0x068000b0) { - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */ insn |= 0x00000001; /* Rm = r1 */ asi->insn[0] = insn; @@ -1302,8 +1205,6 @@ space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* USAT16 : cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx :Q */ if ((insn & 0x0fa00030) == 0x06a00010 || (insn & 0x0fb000f0) == 0x06a00030) { - if (is_r15(insn, 12)) - return INSN_REJECTED; /* Rd is PC */ insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */ asi->insn[0] = insn; asi->insn_handler = emulate_sat; @@ -1312,101 +1213,57 @@ space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* REV : cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */ /* REV16 : cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */ - /* RBIT : cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */ /* REVSH : cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */ if ((insn & 0x0ff00070) == 0x06b00030 || - (insn & 0x0ff00070) == 0x06f00030) + (insn & 0x0ff000f0) == 0x06f000b0) return prep_emulate_rd12rm0(insn, asi); - /* ??? : cccc 0110 0000 xxxx xxxx xxxx xxx1 xxxx : */ /* SADD16 : cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx :GE */ /* SADDSUBX : cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx :GE */ /* SSUBADDX : cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx :GE */ /* SSUB16 : cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx :GE */ /* SADD8 : cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx :GE */ - /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1011 xxxx : */ - /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1101 xxxx : */ /* SSUB8 : cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx :GE */ /* QADD16 : cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx : */ /* QADDSUBX : cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx : */ /* QSUBADDX : cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx : */ /* QSUB16 : cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx : */ /* QADD8 : cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx : */ - /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1011 xxxx : */ - /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1101 xxxx : */ /* QSUB8 : cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx : */ /* SHADD16 : cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx : */ /* SHADDSUBX : cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx : */ /* SHSUBADDX : cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx : */ /* SHSUB16 : cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx : */ /* SHADD8 : cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx : */ - /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1011 xxxx : */ - /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1101 xxxx : */ /* SHSUB8 : cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx : */ - /* ??? : cccc 0110 0100 xxxx xxxx xxxx xxx1 xxxx : */ /* UADD16 : cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx :GE */ /* UADDSUBX : cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx :GE */ /* USUBADDX : cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx :GE */ /* USUB16 : cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx :GE */ /* UADD8 : cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx :GE */ - /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1011 xxxx : */ - /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1101 xxxx : */ /* USUB8 : cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx :GE */ /* UQADD16 : cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx : */ /* UQADDSUBX : cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx : */ /* UQSUBADDX : cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx : */ /* UQSUB16 : cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx : */ /* UQADD8 : cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx : */ - /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1011 xxxx : */ - /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1101 xxxx : */ /* UQSUB8 : cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx : */ /* UHADD16 : cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx : */ /* UHADDSUBX : cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx : */ /* UHSUBADDX : cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx : */ /* UHSUB16 : cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx : */ /* UHADD8 : cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx : */ - /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1011 xxxx : */ - /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1101 xxxx : */ /* UHSUB8 : cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx : */ - if ((insn & 0x0f800010) == 0x06000010) { - if ((insn & 0x00300000) == 0x00000000 || - (insn & 0x000000e0) == 0x000000a0 || - (insn & 0x000000e0) == 0x000000c0) - return INSN_REJECTED; /* Unallocated space */ - return prep_emulate_rd12rn16rm0_wflags(insn, asi); - } - /* PKHBT : cccc 0110 1000 xxxx xxxx xxxx x001 xxxx : */ /* PKHTB : cccc 0110 1000 xxxx xxxx xxxx x101 xxxx : */ - if ((insn & 0x0ff00030) == 0x06800010) - return prep_emulate_rd12rn16rm0_wflags(insn, asi); - /* SXTAB16 : cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx : */ - /* SXTB16 : cccc 0110 1000 1111 xxxx xxxx 0111 xxxx : */ - /* ??? : cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx : */ + /* SXTB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */ /* SXTAB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */ - /* SXTB : cccc 0110 1010 1111 xxxx xxxx 0111 xxxx : */ /* SXTAH : cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx : */ - /* SXTH : cccc 0110 1011 1111 xxxx xxxx 0111 xxxx : */ /* UXTAB16 : cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx : */ - /* UXTB16 : cccc 0110 1100 1111 xxxx xxxx 0111 xxxx : */ - /* ??? : cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx : */ /* UXTAB : cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx : */ - /* UXTB : cccc 0110 1110 1111 xxxx xxxx 0111 xxxx : */ /* UXTAH : cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx : */ - /* UXTH : cccc 0110 1111 1111 xxxx xxxx 0111 xxxx : */ - if ((insn & 0x0f8000f0) == 0x06800070) { - if ((insn & 0x00300000) == 0x00100000) - return INSN_REJECTED; /* Unallocated space */ - - if ((insn & 0x000f0000) == 0x000f0000) - return prep_emulate_rd12rm0(insn, asi); - else - return prep_emulate_rd12rn16rm0_wflags(insn, asi); - } - - /* Other instruction encodings aren't yet defined */ - return INSN_REJECTED; + return prep_emulate_rd12rn16rm0_wflags(insn, asi); } static enum kprobe_insn __kprobes @@ -1416,49 +1273,29 @@ space_cccc_0111__1(kprobe_opcode_t insn, struct arch_specific_insn *asi) if ((insn & 0x0ff000f0) == 0x03f000f0) return INSN_REJECTED; + /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */ + /* USAD8 : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */ + if ((insn & 0x0ff000f0) == 0x07800010) + return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); + /* SMLALD : cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */ /* SMLSLD : cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */ if ((insn & 0x0ff00090) == 0x07400010) return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi); /* SMLAD : cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx :Q */ - /* SMUAD : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */ /* SMLSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx :Q */ - /* SMUSD : cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx : */ /* SMMLA : cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx : */ - /* SMMUL : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx : */ - /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx : */ - /* USAD8 : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx : */ + /* SMMLS : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx : */ if ((insn & 0x0ff00090) == 0x07000010 || (insn & 0x0ff000d0) == 0x07500010 || - (insn & 0x0ff000f0) == 0x07800010) { - - if ((insn & 0x0000f000) == 0x0000f000) - return prep_emulate_rd16rs8rm0_wflags(insn, asi); - else - return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); - } - - /* SMMLS : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx : */ - if ((insn & 0x0ff000d0) == 0x075000d0) + (insn & 0x0ff000d0) == 0x075000d0) return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); - /* SBFX : cccc 0111 101x xxxx xxxx xxxx x101 xxxx : */ - /* UBFX : cccc 0111 111x xxxx xxxx xxxx x101 xxxx : */ - if ((insn & 0x0fa00070) == 0x07a00050) - return prep_emulate_rd12rm0(insn, asi); - - /* BFI : cccc 0111 110x xxxx xxxx xxxx x001 xxxx : */ - /* BFC : cccc 0111 110x xxxx xxxx xxxx x001 1111 : */ - if ((insn & 0x0fe00070) == 0x07c00010) { - - if ((insn & 0x0000000f) == 0x0000000f) - return prep_emulate_rd12_modify(insn, asi); - else - return prep_emulate_rd12rn0_modify(insn, asi); - } - - return INSN_REJECTED; + /* SMUSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx : */ + /* SMUAD : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */ + /* SMMUL : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx : */ + return prep_emulate_rd16rs8rm0_wflags(insn, asi); } static enum kprobe_insn __kprobes @@ -1472,10 +1309,6 @@ space_cccc_01xx(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* STRB : cccc 01xx x1x0 xxxx xxxx xxxx xxxx xxxx */ /* STRBT : cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */ /* STRT : cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */ - - if ((insn & 0x00500000) == 0x00500000 && is_r15(insn, 12)) - return INSN_REJECTED; /* LDRB into PC */ - return prep_emulate_ldr_str(insn, asi); } @@ -1490,9 +1323,10 @@ space_cccc_100x(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* LDM(1) : cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ /* STM(1) : cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */ + asi->insn[0] = truecc_insn(insn); asi->insn_handler = ((insn & 0x108000) == 0x008000) ? /* STM & R15 */ simulate_stm1_pc : simulate_ldm1stm1; - return INSN_GOOD_NO_SLOT; + return INSN_GOOD; } static enum kprobe_insn __kprobes @@ -1500,117 +1334,58 @@ space_cccc_101x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* B : cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */ /* BL : cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */ + asi->insn[0] = truecc_insn(insn); asi->insn_handler = simulate_bbl; - return INSN_GOOD_NO_SLOT; + return INSN_GOOD; } static enum kprobe_insn __kprobes -space_cccc_11xx(kprobe_opcode_t insn, struct arch_specific_insn *asi) +space_cccc_1100_010x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - /* Coprocessor instructions... */ /* MCRR : cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */ /* MRRC : cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */ - /* LDC : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ - /* STC : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ - /* CDP : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ - /* MCR : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ - /* MRC : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ - - /* SVC : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */ - - return INSN_REJECTED; -} - -static unsigned long __kprobes __check_eq(unsigned long cpsr) -{ - return cpsr & PSR_Z_BIT; -} - -static unsigned long __kprobes __check_ne(unsigned long cpsr) -{ - return (~cpsr) & PSR_Z_BIT; -} - -static unsigned long __kprobes __check_cs(unsigned long cpsr) -{ - return cpsr & PSR_C_BIT; -} - -static unsigned long __kprobes __check_cc(unsigned long cpsr) -{ - return (~cpsr) & PSR_C_BIT; -} - -static unsigned long __kprobes __check_mi(unsigned long cpsr) -{ - return cpsr & PSR_N_BIT; -} - -static unsigned long __kprobes __check_pl(unsigned long cpsr) -{ - return (~cpsr) & PSR_N_BIT; -} - -static unsigned long __kprobes __check_vs(unsigned long cpsr) -{ - return cpsr & PSR_V_BIT; -} - -static unsigned long __kprobes __check_vc(unsigned long cpsr) -{ - return (~cpsr) & PSR_V_BIT; -} - -static unsigned long __kprobes __check_hi(unsigned long cpsr) -{ - cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ - return cpsr & PSR_C_BIT; -} - -static unsigned long __kprobes __check_ls(unsigned long cpsr) -{ - cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ - return (~cpsr) & PSR_C_BIT; -} - -static unsigned long __kprobes __check_ge(unsigned long cpsr) -{ - cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ - return (~cpsr) & PSR_N_BIT; + insn &= 0xfff00fff; + insn |= 0x00001000; /* Rn = r0, Rd = r1 */ + asi->insn[0] = insn; + asi->insn_handler = (insn & (1 << 20)) ? emulate_mrrc : emulate_mcrr; + return INSN_GOOD; } -static unsigned long __kprobes __check_lt(unsigned long cpsr) +static enum kprobe_insn __kprobes +space_cccc_110x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ - return cpsr & PSR_N_BIT; + /* LDC : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ + /* STC : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ + insn &= 0xfff0ffff; /* Rn = r0 */ + asi->insn[0] = insn; + asi->insn_handler = emulate_ldcstc; + return INSN_GOOD; } -static unsigned long __kprobes __check_gt(unsigned long cpsr) +static enum kprobe_insn __kprobes +space_cccc_111x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ - temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ - return (~temp) & PSR_N_BIT; -} + /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ + /* SWI : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */ + if ((insn & 0xfff000f0) == 0xe1200070 || + (insn & 0x0f000000) == 0x0f000000) + return INSN_REJECTED; -static unsigned long __kprobes __check_le(unsigned long cpsr) -{ - unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ - temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ - return temp & PSR_N_BIT; -} + /* CDP : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ + if ((insn & 0x0f000010) == 0x0e000000) { + asi->insn[0] = insn; + asi->insn_handler = emulate_none; + return INSN_GOOD; + } -static unsigned long __kprobes __check_al(unsigned long cpsr) -{ - return true; + /* MCR : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ + /* MRC : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ + insn &= 0xffff0fff; /* Rd = r0 */ + asi->insn[0] = insn; + asi->insn_handler = (insn & (1 << 20)) ? emulate_rd12 : emulate_ird12; + return INSN_GOOD; } -static kprobe_check_cc * const condition_checks[16] = { - &__check_eq, &__check_ne, &__check_cs, &__check_cc, - &__check_mi, &__check_pl, &__check_vs, &__check_vc, - &__check_hi, &__check_ls, &__check_ge, &__check_lt, - &__check_gt, &__check_le, &__check_al, &__check_al -}; - /* Return: * INSN_REJECTED If instruction is one not allowed to kprobe, * INSN_GOOD If instruction is supported and uses instruction slot, @@ -1626,45 +1401,133 @@ static kprobe_check_cc * const condition_checks[16] = { enum kprobe_insn __kprobes arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - asi->insn_check_cc = condition_checks[insn>>28]; asi->insn[1] = KPROBE_RETURN_INSTRUCTION; - if ((insn & 0xf0000000) == 0xf0000000) + if ((insn & 0xf0000000) == 0xf0000000) { return space_1111(insn, asi); - else if ((insn & 0x0e000000) == 0x00000000) + } else if ((insn & 0x0e000000) == 0x00000000) { return space_cccc_000x(insn, asi); - else if ((insn & 0x0e000000) == 0x02000000) + } else if ((insn & 0x0e000000) == 0x02000000) { return space_cccc_001x(insn, asi); - else if ((insn & 0x0f000010) == 0x06000010) + } else if ((insn & 0x0f000010) == 0x06000010) { return space_cccc_0110__1(insn, asi); - else if ((insn & 0x0f000010) == 0x07000010) + } else if ((insn & 0x0f000010) == 0x07000010) { return space_cccc_0111__1(insn, asi); - else if ((insn & 0x0c000000) == 0x04000000) + } else if ((insn & 0x0c000000) == 0x04000000) { return space_cccc_01xx(insn, asi); - else if ((insn & 0x0e000000) == 0x08000000) + } else if ((insn & 0x0e000000) == 0x08000000) { return space_cccc_100x(insn, asi); - else if ((insn & 0x0e000000) == 0x0a000000) + } else if ((insn & 0x0e000000) == 0x0a000000) { return space_cccc_101x(insn, asi); - return space_cccc_11xx(insn, asi); + } else if ((insn & 0x0fe00000) == 0x0c400000) { + + return space_cccc_1100_010x(insn, asi); + + } else if ((insn & 0x0e000000) == 0x0c000000) { + + return space_cccc_110x(insn, asi); + + } + + return space_cccc_111x(insn, asi); } void __init arm_kprobe_decode_init(void) { find_str_pc_offset(); } + + +/* + * All ARM instructions listed below. + * + * Instructions and their general purpose registers are given. + * If a particular register may not use R15, it is prefixed with a "!". + * If marked with a "*" means the value returned by reading R15 + * is implementation defined. + * + * ADC/ADD/AND/BIC/CMN/CMP/EOR/MOV/MVN/ORR/RSB/RSC/SBC/SUB/TEQ + * TST: Rd, Rn, Rm, !Rs + * BX: Rm + * BLX(2): !Rm + * BX: Rm (R15 legal, but discouraged) + * BXJ: !Rm, + * CLZ: !Rd, !Rm + * CPY: Rd, Rm + * LDC/2,STC/2 immediate offset & unindex: Rn + * LDC/2,STC/2 immediate pre/post-indexed: !Rn + * LDM(1/3): !Rn, register_list + * LDM(2): !Rn, !register_list + * LDR,STR,PLD immediate offset: Rd, Rn + * LDR,STR,PLD register offset: Rd, Rn, !Rm + * LDR,STR,PLD scaled register offset: Rd, !Rn, !Rm + * LDR,STR immediate pre/post-indexed: Rd, !Rn + * LDR,STR register pre/post-indexed: Rd, !Rn, !Rm + * LDR,STR scaled register pre/post-indexed: Rd, !Rn, !Rm + * LDRB,STRB immediate offset: !Rd, Rn + * LDRB,STRB register offset: !Rd, Rn, !Rm + * LDRB,STRB scaled register offset: !Rd, !Rn, !Rm + * LDRB,STRB immediate pre/post-indexed: !Rd, !Rn + * LDRB,STRB register pre/post-indexed: !Rd, !Rn, !Rm + * LDRB,STRB scaled register pre/post-indexed: !Rd, !Rn, !Rm + * LDRT,LDRBT,STRBT immediate pre/post-indexed: !Rd, !Rn + * LDRT,LDRBT,STRBT register pre/post-indexed: !Rd, !Rn, !Rm + * LDRT,LDRBT,STRBT scaled register pre/post-indexed: !Rd, !Rn, !Rm + * LDRH/SH/SB/D,STRH/SH/SB/D immediate offset: !Rd, Rn + * LDRH/SH/SB/D,STRH/SH/SB/D register offset: !Rd, Rn, !Rm + * LDRH/SH/SB/D,STRH/SH/SB/D immediate pre/post-indexed: !Rd, !Rn + * LDRH/SH/SB/D,STRH/SH/SB/D register pre/post-indexed: !Rd, !Rn, !Rm + * LDREX: !Rd, !Rn + * MCR/2: !Rd + * MCRR/2,MRRC/2: !Rd, !Rn + * MLA: !Rd, !Rn, !Rm, !Rs + * MOV: Rd + * MRC/2: !Rd (if Rd==15, only changes cond codes, not the register) + * MRS,MSR: !Rd + * MUL: !Rd, !Rm, !Rs + * PKH{BT,TB}: !Rd, !Rn, !Rm + * QDADD,[U]QADD/16/8/SUBX: !Rd, !Rm, !Rn + * QDSUB,[U]QSUB/16/8/ADDX: !Rd, !Rm, !Rn + * REV/16/SH: !Rd, !Rm + * RFE: !Rn + * {S,U}[H]ADD{16,8,SUBX},{S,U}[H]SUB{16,8,ADDX}: !Rd, !Rn, !Rm + * SEL: !Rd, !Rn, !Rm + * SMLA,SMLA{D,W},SMLSD,SMML{A,S}: !Rd, !Rn, !Rm, !Rs + * SMLAL,SMLA{D,LD},SMLSLD,SMMULL,SMULW: !RdHi, !RdLo, !Rm, !Rs + * SMMUL,SMUAD,SMUL,SMUSD: !Rd, !Rm, !Rs + * SSAT/16: !Rd, !Rm + * STM(1/2): !Rn, register_list* (R15 in reg list not recommended) + * STRT immediate pre/post-indexed: Rd*, !Rn + * STRT register pre/post-indexed: Rd*, !Rn, !Rm + * STRT scaled register pre/post-indexed: Rd*, !Rn, !Rm + * STREX: !Rd, !Rn, !Rm + * SWP/B: !Rd, !Rn, !Rm + * {S,U}XTA{B,B16,H}: !Rd, !Rn, !Rm + * {S,U}XT{B,B16,H}: !Rd, !Rm + * UM{AA,LA,UL}L: !RdHi, !RdLo, !Rm, !Rs + * USA{D8,A8,T,T16}: !Rd, !Rm, !Rs + * + * May transfer control by writing R15 (possible mode changes or alternate + * mode accesses marked by "*"): + * ALU op (* with s-bit), B, BL, BKPT, BLX(1/2), BX, BXJ, CPS*, CPY, + * LDM(1), LDM(2/3)*, LDR, MOV, RFE*, SWI* + * + * Instructions that do not take general registers, nor transfer control: + * CDP/2, SETEND, SRS* + */ diff --git a/trunk/arch/arm/kernel/kprobes.c b/trunk/arch/arm/kernel/kprobes.c index 1656c87501c0..2ba7deb3072e 100644 --- a/trunk/arch/arm/kernel/kprobes.c +++ b/trunk/arch/arm/kernel/kprobes.c @@ -134,8 +134,7 @@ static void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { regs->ARM_pc += 4; - if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) - p->ainsn.insn_handler(p, regs); + p->ainsn.insn_handler(p, regs); } /* diff --git a/trunk/arch/arm/kernel/leds.c b/trunk/arch/arm/kernel/leds.c index 0f107dcb0347..31a316c1777b 100644 --- a/trunk/arch/arm/kernel/leds.c +++ b/trunk/arch/arm/kernel/leds.c @@ -10,7 +10,6 @@ #include #include #include -#include #include @@ -70,37 +69,36 @@ static ssize_t leds_store(struct sys_device *dev, static SYSDEV_ATTR(event, 0200, NULL, leds_store); -static struct sysdev_class leds_sysclass = { - .name = "leds", -}; - -static struct sys_device leds_device = { - .id = 0, - .cls = &leds_sysclass, -}; - -static int leds_suspend(void) +static int leds_suspend(struct sys_device *dev, pm_message_t state) { leds_event(led_stop); return 0; } -static void leds_resume(void) +static int leds_resume(struct sys_device *dev) { leds_event(led_start); + return 0; } -static void leds_shutdown(void) +static int leds_shutdown(struct sys_device *dev) { leds_event(led_halted); + return 0; } -static struct syscore_ops leds_syscore_ops = { +static struct sysdev_class leds_sysclass = { + .name = "leds", .shutdown = leds_shutdown, .suspend = leds_suspend, .resume = leds_resume, }; +static struct sys_device leds_device = { + .id = 0, + .cls = &leds_sysclass, +}; + static int __init leds_init(void) { int ret; @@ -109,8 +107,6 @@ static int __init leds_init(void) ret = sysdev_register(&leds_device); if (ret == 0) ret = sysdev_create_file(&leds_device, &attr_event); - if (ret == 0) - register_syscore_ops(&leds_syscore_ops); return ret; } diff --git a/trunk/arch/arm/kernel/perf_event.c b/trunk/arch/arm/kernel/perf_event.c index 139e3c827369..979da3947f42 100644 --- a/trunk/arch/arm/kernel/perf_event.c +++ b/trunk/arch/arm/kernel/perf_event.c @@ -746,8 +746,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) tail = (struct frame_tail __user *)regs->ARM_fp - 1; - while ((entry->nr < PERF_MAX_STACK_DEPTH) && - tail && !((unsigned long)tail & 0x3)) + while (tail && !((unsigned long)tail & 0x3)) tail = user_backtrace(tail, entry); } diff --git a/trunk/arch/arm/kernel/ptrace.c b/trunk/arch/arm/kernel/ptrace.c index 8182f45ca493..2bf27f364d09 100644 --- a/trunk/arch/arm/kernel/ptrace.c +++ b/trunk/arch/arm/kernel/ptrace.c @@ -767,20 +767,12 @@ long arch_ptrace(struct task_struct *child, long request, #ifdef CONFIG_HAVE_HW_BREAKPOINT case PTRACE_GETHBPREGS: - if (ptrace_get_breakpoints(child) < 0) - return -ESRCH; - ret = ptrace_gethbpregs(child, addr, (unsigned long __user *)data); - ptrace_put_breakpoints(child); break; case PTRACE_SETHBPREGS: - if (ptrace_get_breakpoints(child) < 0) - return -ESRCH; - ret = ptrace_sethbpregs(child, addr, (unsigned long __user *)data); - ptrace_put_breakpoints(child); break; #endif diff --git a/trunk/arch/arm/kernel/signal.c b/trunk/arch/arm/kernel/signal.c index 0340224cf73c..cb8398317644 100644 --- a/trunk/arch/arm/kernel/signal.c +++ b/trunk/arch/arm/kernel/signal.c @@ -597,19 +597,45 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, return err; } +static inline void setup_syscall_restart(struct pt_regs *regs) +{ + regs->ARM_r0 = regs->ARM_ORIG_r0; + regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; +} + /* * OK, we're invoking a handler */ static int handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, - struct pt_regs * regs) + struct pt_regs * regs, int syscall) { struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; int usig = sig; int ret; + /* + * If we were from a system call, check for system call restarting... + */ + if (syscall) { + switch (regs->ARM_r0) { + case -ERESTART_RESTARTBLOCK: + case -ERESTARTNOHAND: + regs->ARM_r0 = -EINTR; + break; + case -ERESTARTSYS: + if (!(ka->sa.sa_flags & SA_RESTART)) { + regs->ARM_r0 = -EINTR; + break; + } + /* fallthrough */ + case -ERESTARTNOINTR: + setup_syscall_restart(regs); + } + } + /* * translate the signal */ @@ -659,7 +685,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, */ static void do_signal(struct pt_regs *regs, int syscall) { - unsigned int retval = 0, continue_addr = 0, restart_addr = 0; struct k_sigaction ka; siginfo_t info; int signr; @@ -673,61 +698,18 @@ static void do_signal(struct pt_regs *regs, int syscall) if (!user_mode(regs)) return; - /* - * If we were from a system call, check for system call restarting... - */ - if (syscall) { - continue_addr = regs->ARM_pc; - restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4); - retval = regs->ARM_r0; - - /* - * Prepare for system call restart. We do this here so that a - * debugger will see the already changed PSW. - */ - switch (retval) { - case -ERESTARTNOHAND: - case -ERESTARTSYS: - case -ERESTARTNOINTR: - regs->ARM_r0 = regs->ARM_ORIG_r0; - regs->ARM_pc = restart_addr; - break; - case -ERESTART_RESTARTBLOCK: - regs->ARM_r0 = -EINTR; - break; - } - } - if (try_to_freeze()) goto no_signal; - /* - * Get the signal to deliver. When running under ptrace, at this - * point the debugger may change all our registers ... - */ signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { sigset_t *oldset; - /* - * Depending on the signal settings we may need to revert the - * decision to restart the system call. But skip this if a - * debugger has chosen to restart at a different PC. - */ - if (regs->ARM_pc == restart_addr) { - if (retval == -ERESTARTNOHAND - || (retval == -ERESTARTSYS - && !(ka.sa.sa_flags & SA_RESTART))) { - regs->ARM_r0 = -EINTR; - regs->ARM_pc = continue_addr; - } - } - if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = ¤t->saved_sigmask; else oldset = ¤t->blocked; - if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { + if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) { /* * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, @@ -741,14 +723,11 @@ static void do_signal(struct pt_regs *regs, int syscall) } no_signal: + /* + * No signal to deliver to the process - restart the syscall. + */ if (syscall) { - /* - * Handle restarting a different system call. As above, - * if a debugger has chosen to restart at a different PC, - * ignore the restart. - */ - if (retval == -ERESTART_RESTARTBLOCK - && regs->ARM_pc == continue_addr) { + if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { if (thumb_mode(regs)) { regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; regs->ARM_pc -= 2; @@ -771,6 +750,11 @@ static void do_signal(struct pt_regs *regs, int syscall) #endif } } + if (regs->ARM_r0 == -ERESTARTNOHAND || + regs->ARM_r0 == -ERESTARTSYS || + regs->ARM_r0 == -ERESTARTNOINTR) { + setup_syscall_restart(regs); + } /* If there's no signal to deliver, we just put the saved sigmask * back. diff --git a/trunk/arch/arm/kernel/smp.c b/trunk/arch/arm/kernel/smp.c index 007a0a950e75..8fe05ad932e4 100644 --- a/trunk/arch/arm/kernel/smp.c +++ b/trunk/arch/arm/kernel/smp.c @@ -479,7 +479,7 @@ static void broadcast_timer_set_mode(enum clock_event_mode mode, { } -static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) +static void broadcast_timer_setup(struct clock_event_device *evt) { evt->name = "dummy_timer"; evt->features = CLOCK_EVT_FEAT_ONESHOT | @@ -560,7 +560,10 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) break; case IPI_RESCHEDULE: - scheduler_ipi(); + /* + * nothing more to do - eveything is + * done on the interrupt return path + */ break; case IPI_CALL_FUNC: diff --git a/trunk/arch/arm/kernel/sys_oabi-compat.c b/trunk/arch/arm/kernel/sys_oabi-compat.c index af0aaebf4de6..4ad8da15ef2b 100644 --- a/trunk/arch/arm/kernel/sys_oabi-compat.c +++ b/trunk/arch/arm/kernel/sys_oabi-compat.c @@ -311,7 +311,7 @@ asmlinkage long sys_oabi_semtimedop(int semid, long err; int i; - if (nsops < 1 || nsops > SEMOPM) + if (nsops < 1) return -EINVAL; sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); if (!sops) diff --git a/trunk/arch/arm/kernel/time.c b/trunk/arch/arm/kernel/time.c index cb634c3e28e9..1ff46cabc7ef 100644 --- a/trunk/arch/arm/kernel/time.c +++ b/trunk/arch/arm/kernel/time.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include @@ -115,37 +115,48 @@ void timer_tick(void) #endif #if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS) -static int timer_suspend(void) +static int timer_suspend(struct sys_device *dev, pm_message_t state) { - if (system_timer->suspend) - system_timer->suspend(); + struct sys_timer *timer = container_of(dev, struct sys_timer, dev); + + if (timer->suspend != NULL) + timer->suspend(); return 0; } -static void timer_resume(void) +static int timer_resume(struct sys_device *dev) { - if (system_timer->resume) - system_timer->resume(); + struct sys_timer *timer = container_of(dev, struct sys_timer, dev); + + if (timer->resume != NULL) + timer->resume(); + + return 0; } #else #define timer_suspend NULL #define timer_resume NULL #endif -static struct syscore_ops timer_syscore_ops = { +static struct sysdev_class timer_sysclass = { + .name = "timer", .suspend = timer_suspend, .resume = timer_resume, }; -static int __init timer_init_syscore_ops(void) +static int __init timer_init_sysfs(void) { - register_syscore_ops(&timer_syscore_ops); + int ret = sysdev_class_register(&timer_sysclass); + if (ret == 0) { + system_timer->dev.cls = &timer_sysclass; + ret = sysdev_register(&system_timer->dev); + } - return 0; + return ret; } -device_initcall(timer_init_syscore_ops); +device_initcall(timer_init_sysfs); void __init time_init(void) { diff --git a/trunk/arch/arm/mach-at91/Kconfig b/trunk/arch/arm/mach-at91/Kconfig index 2d299bf5d72f..19390231a0e9 100644 --- a/trunk/arch/arm/mach-at91/Kconfig +++ b/trunk/arch/arm/mach-at91/Kconfig @@ -83,7 +83,6 @@ config ARCH_AT91CAP9 select CPU_ARM926T select GENERIC_CLOCKEVENTS select HAVE_FB_ATMEL - select HAVE_NET_MACB config ARCH_AT572D940HF bool "AT572D940HF" diff --git a/trunk/arch/arm/mach-at91/board-eb01.c b/trunk/arch/arm/mach-at91/board-eb01.c index d8df59a3426d..1f9d3cb64c50 100644 --- a/trunk/arch/arm/mach-at91/board-eb01.c +++ b/trunk/arch/arm/mach-at91/board-eb01.c @@ -30,11 +30,6 @@ #include #include "generic.h" -static void __init at91eb01_init_irq(void) -{ - at91x40_init_interrupts(NULL); -} - static void __init at91eb01_map_io(void) { at91x40_initialize(40000000); @@ -43,7 +38,7 @@ static void __init at91eb01_map_io(void) MACHINE_START(AT91EB01, "Atmel AT91 EB01") /* Maintainer: Greg Ungerer */ .timer = &at91x40_timer, - .init_irq = at91eb01_init_irq, + .init_irq = at91x40_init_interrupts, .map_io = at91eb01_map_io, MACHINE_END diff --git a/trunk/arch/arm/mach-at91/include/mach/cpu.h b/trunk/arch/arm/mach-at91/include/mach/cpu.h index 0700f2125305..3bef931d0b1c 100644 --- a/trunk/arch/arm/mach-at91/include/mach/cpu.h +++ b/trunk/arch/arm/mach-at91/include/mach/cpu.h @@ -27,7 +27,6 @@ #define ARCH_ID_AT91SAM9G45 0x819b05a0 #define ARCH_ID_AT91SAM9G45MRL 0x819b05a2 /* aka 9G45-ES2 & non ES lots */ #define ARCH_ID_AT91SAM9G45ES 0x819b05a1 /* 9G45-ES (Engineering Sample) */ -#define ARCH_ID_AT91SAM9X5 0x819a05a0 #define ARCH_ID_AT91CAP9 0x039A03A0 #define ARCH_ID_AT91SAM9XE128 0x329973a0 @@ -56,12 +55,6 @@ static inline unsigned long at91_cpu_fully_identify(void) #define ARCH_EXID_AT91SAM9G46 0x00000003 #define ARCH_EXID_AT91SAM9G45 0x00000004 -#define ARCH_EXID_AT91SAM9G15 0x00000000 -#define ARCH_EXID_AT91SAM9G35 0x00000001 -#define ARCH_EXID_AT91SAM9X35 0x00000002 -#define ARCH_EXID_AT91SAM9G25 0x00000003 -#define ARCH_EXID_AT91SAM9X25 0x00000004 - static inline unsigned long at91_exid_identify(void) { return at91_sys_read(AT91_DBGU_EXID); @@ -150,27 +143,6 @@ static inline unsigned long at91cap9_rev_identify(void) #define cpu_is_at91sam9m11() (0) #endif -#ifdef CONFIG_ARCH_AT91SAM9X5 -#define cpu_is_at91sam9x5() (at91_cpu_identify() == ARCH_ID_AT91SAM9X5) -#define cpu_is_at91sam9g15() (cpu_is_at91sam9x5() && \ - (at91_exid_identify() == ARCH_EXID_AT91SAM9G15)) -#define cpu_is_at91sam9g35() (cpu_is_at91sam9x5() && \ - (at91_exid_identify() == ARCH_EXID_AT91SAM9G35)) -#define cpu_is_at91sam9x35() (cpu_is_at91sam9x5() && \ - (at91_exid_identify() == ARCH_EXID_AT91SAM9X35)) -#define cpu_is_at91sam9g25() (cpu_is_at91sam9x5() && \ - (at91_exid_identify() == ARCH_EXID_AT91SAM9G25)) -#define cpu_is_at91sam9x25() (cpu_is_at91sam9x5() && \ - (at91_exid_identify() == ARCH_EXID_AT91SAM9X25)) -#else -#define cpu_is_at91sam9x5() (0) -#define cpu_is_at91sam9g15() (0) -#define cpu_is_at91sam9g35() (0) -#define cpu_is_at91sam9x35() (0) -#define cpu_is_at91sam9g25() (0) -#define cpu_is_at91sam9x25() (0) -#endif - #ifdef CONFIG_ARCH_AT91CAP9 #define cpu_is_at91cap9() (at91_cpu_identify() == ARCH_ID_AT91CAP9) #define cpu_is_at91cap9_revB() (at91cap9_rev_identify() == ARCH_REVISION_CAP9_B) diff --git a/trunk/arch/arm/mach-davinci/Kconfig b/trunk/arch/arm/mach-davinci/Kconfig index c0deacae778d..32f147998cd9 100644 --- a/trunk/arch/arm/mach-davinci/Kconfig +++ b/trunk/arch/arm/mach-davinci/Kconfig @@ -63,7 +63,6 @@ config MACH_DAVINCI_EVM depends on ARCH_DAVINCI_DM644x select MISC_DEVICES select EEPROM_AT24 - select I2C help Configure this option to specify the whether the board used for development is a DM644x EVM @@ -73,7 +72,6 @@ config MACH_SFFSDR depends on ARCH_DAVINCI_DM644x select MISC_DEVICES select EEPROM_AT24 - select I2C help Say Y here to select the Lyrtech Small Form Factor Software Defined Radio (SFFSDR) board. @@ -107,7 +105,6 @@ config MACH_DAVINCI_DM6467_EVM select MACH_DAVINCI_DM6467TEVM select MISC_DEVICES select EEPROM_AT24 - select I2C help Configure this option to specify the whether the board used for development is a DM6467 EVM @@ -121,7 +118,6 @@ config MACH_DAVINCI_DM365_EVM depends on ARCH_DAVINCI_DM365 select MISC_DEVICES select EEPROM_AT24 - select I2C help Configure this option to specify whether the board used for development is a DM365 EVM @@ -133,7 +129,6 @@ config MACH_DAVINCI_DA830_EVM select GPIO_PCF857X select MISC_DEVICES select EEPROM_AT24 - select I2C help Say Y here to select the TI DA830/OMAP-L137/AM17x Evaluation Module. @@ -210,7 +205,6 @@ config MACH_MITYOMAPL138 depends on ARCH_DAVINCI_DA850 select MISC_DEVICES select EEPROM_AT24 - select I2C help Say Y here to select the Critical Link MityDSP-L138/MityARM-1808 System on Module. Information on this SoM may be found at diff --git a/trunk/arch/arm/mach-davinci/board-mityomapl138.c b/trunk/arch/arm/mach-davinci/board-mityomapl138.c index 606a6f27ed6c..2aa79c54f98e 100644 --- a/trunk/arch/arm/mach-davinci/board-mityomapl138.c +++ b/trunk/arch/arm/mach-davinci/board-mityomapl138.c @@ -29,7 +29,7 @@ #include #include -#define MITYOMAPL138_PHY_ID "" +#define MITYOMAPL138_PHY_ID "0:03" #define FACTORY_CONFIG_MAGIC 0x012C0138 #define FACTORY_CONFIG_VERSION 0x00010001 @@ -414,7 +414,7 @@ static struct resource mityomapl138_nandflash_resource[] = { static struct platform_device mityomapl138_nandflash_device = { .name = "davinci_nand", - .id = 1, + .id = 0, .dev = { .platform_data = &mityomapl138_nandflash_data, }, diff --git a/trunk/arch/arm/mach-davinci/cpufreq.c b/trunk/arch/arm/mach-davinci/cpufreq.c index 41669ecc1f91..0a95be1512bb 100644 --- a/trunk/arch/arm/mach-davinci/cpufreq.c +++ b/trunk/arch/arm/mach-davinci/cpufreq.c @@ -94,7 +94,9 @@ static int davinci_target(struct cpufreq_policy *policy, if (freqs.old == freqs.new) return ret; - dev_dbg(&cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new); + cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, + dev_driver_string(cpufreq.dev), + "transition: %u --> %u\n", freqs.old, freqs.new); ret = cpufreq_frequency_table_target(policy, pdata->freq_table, freqs.new, relation, &idx); diff --git a/trunk/arch/arm/mach-davinci/devices-da8xx.c b/trunk/arch/arm/mach-davinci/devices-da8xx.c index 58a02dc7b15a..625d4b66718b 100644 --- a/trunk/arch/arm/mach-davinci/devices-da8xx.c +++ b/trunk/arch/arm/mach-davinci/devices-da8xx.c @@ -39,8 +39,7 @@ #define DA8XX_GPIO_BASE 0x01e26000 #define DA8XX_I2C1_BASE 0x01e28000 #define DA8XX_SPI0_BASE 0x01c41000 -#define DA830_SPI1_BASE 0x01e12000 -#define DA850_SPI1_BASE 0x01f0e000 +#define DA8XX_SPI1_BASE 0x01f0e000 #define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000 #define DA8XX_EMAC_MOD_REG_OFFSET 0x2000 @@ -763,8 +762,8 @@ static struct resource da8xx_spi0_resources[] = { static struct resource da8xx_spi1_resources[] = { [0] = { - .start = DA830_SPI1_BASE, - .end = DA830_SPI1_BASE + SZ_4K - 1, + .start = DA8XX_SPI1_BASE, + .end = DA8XX_SPI1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -833,10 +832,5 @@ int __init da8xx_register_spi(int instance, struct spi_board_info *info, da8xx_spi_pdata[instance].num_chipselect = len; - if (instance == 1 && cpu_is_davinci_da850()) { - da8xx_spi1_resources[0].start = DA850_SPI1_BASE; - da8xx_spi1_resources[0].end = DA850_SPI1_BASE + SZ_4K - 1; - } - return platform_device_register(&da8xx_spi_device[instance]); } diff --git a/trunk/arch/arm/mach-davinci/dm355.c b/trunk/arch/arm/mach-davinci/dm355.c index a3a94e9c9378..f68012239641 100644 --- a/trunk/arch/arm/mach-davinci/dm355.c +++ b/trunk/arch/arm/mach-davinci/dm355.c @@ -314,7 +314,7 @@ static struct clk timer2_clk = { .name = "timer2", .parent = &pll1_aux_clk, .lpsc = DAVINCI_LPSC_TIMER2, - .usecount = 1, /* REVISIT: why can't this be disabled? */ + .usecount = 1, /* REVISIT: why can't' this be disabled? */ }; static struct clk timer3_clk = { diff --git a/trunk/arch/arm/mach-davinci/dm644x.c b/trunk/arch/arm/mach-davinci/dm644x.c index 4c82c2716293..5f8a65424184 100644 --- a/trunk/arch/arm/mach-davinci/dm644x.c +++ b/trunk/arch/arm/mach-davinci/dm644x.c @@ -274,7 +274,7 @@ static struct clk timer2_clk = { .name = "timer2", .parent = &pll1_aux_clk, .lpsc = DAVINCI_LPSC_TIMER2, - .usecount = 1, /* REVISIT: why can't this be disabled? */ + .usecount = 1, /* REVISIT: why can't' this be disabled? */ }; static struct clk_lookup dm644x_clks[] = { diff --git a/trunk/arch/arm/mach-davinci/include/mach/debug-macro.S b/trunk/arch/arm/mach-davinci/include/mach/debug-macro.S index f8b7ea4f6235..9f1befc5ac38 100644 --- a/trunk/arch/arm/mach-davinci/include/mach/debug-macro.S +++ b/trunk/arch/arm/mach-davinci/include/mach/debug-macro.S @@ -24,9 +24,6 @@ #define UART_SHIFT 2 -#define davinci_uart_v2p(x) ((x) - PAGE_OFFSET + PLAT_PHYS_OFFSET) -#define davinci_uart_p2v(x) ((x) - PLAT_PHYS_OFFSET + PAGE_OFFSET) - .pushsection .data davinci_uart_phys: .word 0 davinci_uart_virt: .word 0 @@ -37,7 +34,7 @@ davinci_uart_virt: .word 0 /* Use davinci_uart_phys/virt if already configured */ 10: mrc p15, 0, \rp, c1, c0 tst \rp, #1 @ MMU enabled? - ldreq \rp, =davinci_uart_v2p(davinci_uart_phys) + ldreq \rp, =__virt_to_phys(davinci_uart_phys) ldrne \rp, =davinci_uart_phys add \rv, \rp, #4 @ davinci_uart_virt ldr \rp, [\rp, #0] @@ -51,18 +48,18 @@ davinci_uart_virt: .word 0 tst \rp, #1 @ MMU enabled? /* Copy uart phys address from decompressor uart info */ - ldreq \rv, =davinci_uart_v2p(davinci_uart_phys) + ldreq \rv, =__virt_to_phys(davinci_uart_phys) ldrne \rv, =davinci_uart_phys ldreq \rp, =DAVINCI_UART_INFO - ldrne \rp, =davinci_uart_p2v(DAVINCI_UART_INFO) + ldrne \rp, =__phys_to_virt(DAVINCI_UART_INFO) ldr \rp, [\rp, #0] str \rp, [\rv] /* Copy uart virt address from decompressor uart info */ - ldreq \rv, =davinci_uart_v2p(davinci_uart_virt) + ldreq \rv, =__virt_to_phys(davinci_uart_virt) ldrne \rv, =davinci_uart_virt ldreq \rp, =DAVINCI_UART_INFO - ldrne \rp, =davinci_uart_p2v(DAVINCI_UART_INFO) + ldrne \rp, =__phys_to_virt(DAVINCI_UART_INFO) ldr \rp, [\rp, #4] str \rp, [\rv] diff --git a/trunk/arch/arm/mach-davinci/include/mach/serial.h b/trunk/arch/arm/mach-davinci/include/mach/serial.h index c9e6ce185a66..8051110b8ac3 100644 --- a/trunk/arch/arm/mach-davinci/include/mach/serial.h +++ b/trunk/arch/arm/mach-davinci/include/mach/serial.h @@ -22,7 +22,7 @@ * * This area sits just below the page tables (see arch/arm/kernel/head.S). */ -#define DAVINCI_UART_INFO (PLAT_PHYS_OFFSET + 0x3ff8) +#define DAVINCI_UART_INFO (PHYS_OFFSET + 0x3ff8) #define DAVINCI_UART0_BASE (IO_PHYS + 0x20000) #define DAVINCI_UART1_BASE (IO_PHYS + 0x20400) diff --git a/trunk/arch/arm/mach-exynos4/pm.c b/trunk/arch/arm/mach-exynos4/pm.c index 8755ca8dd48d..10d917d9e3ad 100644 --- a/trunk/arch/arm/mach-exynos4/pm.c +++ b/trunk/arch/arm/mach-exynos4/pm.c @@ -16,7 +16,6 @@ #include #include -#include #include #include @@ -373,27 +372,7 @@ void exynos4_scu_enable(void __iomem *scu_base) flush_cache_all(); } -static struct sysdev_driver exynos4_pm_driver = { - .add = exynos4_pm_add, -}; - -static __init int exynos4_pm_drvinit(void) -{ - unsigned int tmp; - - s3c_pm_init(); - - /* All wakeup disable */ - - tmp = __raw_readl(S5P_WAKEUP_MASK); - tmp |= ((0xFF << 8) | (0x1F << 1)); - __raw_writel(tmp, S5P_WAKEUP_MASK); - - return sysdev_driver_register(&exynos4_sysclass, &exynos4_pm_driver); -} -arch_initcall(exynos4_pm_drvinit); - -static void exynos4_pm_resume(void) +static int exynos4_pm_resume(struct sys_device *dev) { /* For release retention */ @@ -415,15 +394,27 @@ static void exynos4_pm_resume(void) /* enable L2X0*/ writel_relaxed(1, S5P_VA_L2CC + L2X0_CTRL); #endif + + return 0; } -static struct syscore_ops exynos4_pm_syscore_ops = { +static struct sysdev_driver exynos4_pm_driver = { + .add = exynos4_pm_add, .resume = exynos4_pm_resume, }; -static __init int exynos4_pm_syscore_init(void) +static __init int exynos4_pm_drvinit(void) { - register_syscore_ops(&exynos4_pm_syscore_ops); - return 0; + unsigned int tmp; + + s3c_pm_init(); + + /* All wakeup disable */ + + tmp = __raw_readl(S5P_WAKEUP_MASK); + tmp |= ((0xFF << 8) | (0x1F << 1)); + __raw_writel(tmp, S5P_WAKEUP_MASK); + + return sysdev_driver_register(&exynos4_sysclass, &exynos4_pm_driver); } -arch_initcall(exynos4_pm_syscore_init); +arch_initcall(exynos4_pm_drvinit); diff --git a/trunk/arch/arm/mach-footbridge/Kconfig b/trunk/arch/arm/mach-footbridge/Kconfig index 46adca068f2c..bdd257921cfb 100644 --- a/trunk/arch/arm/mach-footbridge/Kconfig +++ b/trunk/arch/arm/mach-footbridge/Kconfig @@ -4,7 +4,6 @@ menu "Footbridge Implementations" config ARCH_CATS bool "CATS" - select CLKSRC_I8253 select FOOTBRIDGE_HOST select ISA select ISA_DMA @@ -60,7 +59,6 @@ config ARCH_EBSA285_HOST config ARCH_NETWINDER bool "NetWinder" - select CLKSRC_I8253 select FOOTBRIDGE_HOST select ISA select ISA_DMA diff --git a/trunk/arch/arm/mach-footbridge/isa-timer.c b/trunk/arch/arm/mach-footbridge/isa-timer.c index 7020f1a3feca..441c6ce0d555 100644 --- a/trunk/arch/arm/mach-footbridge/isa-timer.c +++ b/trunk/arch/arm/mach-footbridge/isa-timer.c @@ -10,16 +10,53 @@ #include #include #include -#include #include #include -#include + #include #include "common.h" -DEFINE_RAW_SPINLOCK(i8253_lock); +#define PIT_MODE 0x43 +#define PIT_CH0 0x40 + +#define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) + +static cycle_t pit_read(struct clocksource *cs) +{ + unsigned long flags; + static int old_count; + static u32 old_jifs; + int count; + u32 jifs; + + raw_local_irq_save(flags); + + jifs = jiffies; + outb_p(0x00, PIT_MODE); /* latch the count */ + count = inb_p(PIT_CH0); /* read the latched count */ + count |= inb_p(PIT_CH0) << 8; + + if (count > old_count && jifs == old_jifs) + count = old_count; + + old_count = count; + old_jifs = jifs; + + raw_local_irq_restore(flags); + + count = (PIT_LATCH - 1) - count; + + return (cycle_t)(jifs * PIT_LATCH) + count; +} + +static struct clocksource pit_cs = { + .name = "pit", + .rating = 110, + .read = pit_read, + .mask = CLOCKSOURCE_MASK(32), +}; static void pit_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) @@ -84,7 +121,7 @@ static void __init isa_timer_init(void) pit_ce.max_delta_ns = clockevent_delta2ns(0x7fff, &pit_ce); pit_ce.min_delta_ns = clockevent_delta2ns(0x000f, &pit_ce); - clocksource_i8253_init(); + clocksource_register_hz(&pit_cs, PIT_TICK_RATE); setup_irq(pit_ce.irq, &pit_timer_irq); clockevents_register_device(&pit_ce); diff --git a/trunk/arch/arm/mach-integrator/integrator_ap.c b/trunk/arch/arm/mach-integrator/integrator_ap.c index d3e96451529c..980803ff348c 100644 --- a/trunk/arch/arm/mach-integrator/integrator_ap.c +++ b/trunk/arch/arm/mach-integrator/integrator_ap.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include @@ -180,13 +180,13 @@ static void __init ap_init_irq(void) #ifdef CONFIG_PM static unsigned long ic_irq_enable; -static int irq_suspend(void) +static int irq_suspend(struct sys_device *dev, pm_message_t state) { ic_irq_enable = readl(VA_IC_BASE + IRQ_ENABLE); return 0; } -static void irq_resume(void) +static int irq_resume(struct sys_device *dev) { /* disable all irq sources */ writel(-1, VA_CMIC_BASE + IRQ_ENABLE_CLEAR); @@ -194,25 +194,33 @@ static void irq_resume(void) writel(-1, VA_IC_BASE + FIQ_ENABLE_CLEAR); writel(ic_irq_enable, VA_IC_BASE + IRQ_ENABLE_SET); + return 0; } #else #define irq_suspend NULL #define irq_resume NULL #endif -static struct syscore_ops irq_syscore_ops = { +static struct sysdev_class irq_class = { + .name = "irq", .suspend = irq_suspend, .resume = irq_resume, }; -static int __init irq_syscore_init(void) -{ - register_syscore_ops(&irq_syscore_ops); +static struct sys_device irq_device = { + .id = 0, + .cls = &irq_class, +}; - return 0; +static int __init irq_init_sysfs(void) +{ + int ret = sysdev_class_register(&irq_class); + if (ret == 0) + ret = sysdev_register(&irq_device); + return ret; } -device_initcall(irq_syscore_init); +device_initcall(irq_init_sysfs); /* * Flash handling. diff --git a/trunk/arch/arm/mach-mx3/mach-vpr200.c b/trunk/arch/arm/mach-mx3/mach-vpr200.c index 47a69cbc31a8..2cf390fbd980 100644 --- a/trunk/arch/arm/mach-mx3/mach-vpr200.c +++ b/trunk/arch/arm/mach-mx3/mach-vpr200.c @@ -257,16 +257,11 @@ static const struct fsl_usb2_platform_data otg_device_pdata __initconst = { .workaround = FLS_USB2_WORKAROUND_ENGCM09152, }; -static int vpr200_usbh_init(struct platform_device *pdev) -{ - return mx35_initialize_usb_hw(pdev->id, - MXC_EHCI_INTERFACE_SINGLE_UNI | MXC_EHCI_INTERNAL_PHY); -} - /* USB HOST config */ static const struct mxc_usbh_platform_data usb_host_pdata __initconst = { - .init = vpr200_usbh_init, - .portsc = MXC_EHCI_MODE_SERIAL, + .portsc = MXC_EHCI_MODE_SERIAL, + .flags = MXC_EHCI_INTERFACE_SINGLE_UNI | + MXC_EHCI_INTERNAL_PHY, }; static struct platform_device *devices[] __initdata = { diff --git a/trunk/arch/arm/mach-mx5/board-mx53_loco.c b/trunk/arch/arm/mach-mx5/board-mx53_loco.c index 6206b1191fe8..10a1bea10548 100644 --- a/trunk/arch/arm/mach-mx5/board-mx53_loco.c +++ b/trunk/arch/arm/mach-mx5/board-mx53_loco.c @@ -193,7 +193,7 @@ static iomux_v3_cfg_t mx53_loco_pads[] = { .wakeup = wake, \ } -static struct gpio_keys_button loco_buttons[] = { +static const struct gpio_keys_button loco_buttons[] __initconst = { GPIO_BUTTON(MX53_LOCO_POWER, KEY_POWER, 1, "power", 0), GPIO_BUTTON(MX53_LOCO_UI1, KEY_VOLUMEUP, 1, "volume-up", 0), GPIO_BUTTON(MX53_LOCO_UI2, KEY_VOLUMEDOWN, 1, "volume-down", 0), diff --git a/trunk/arch/arm/mach-mxs/clock-mx28.c b/trunk/arch/arm/mach-mxs/clock-mx28.c index 5dcc59d5b9ec..1ad97fed1e94 100644 --- a/trunk/arch/arm/mach-mxs/clock-mx28.c +++ b/trunk/arch/arm/mach-mxs/clock-mx28.c @@ -295,11 +295,11 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \ unsigned long diff, parent_rate, calc_rate; \ int i; \ \ + parent_rate = clk_get_rate(clk->parent); \ div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV; \ bm_busy = BM_CLKCTRL_##dr##_BUSY; \ \ if (clk->parent == &ref_xtal_clk) { \ - parent_rate = clk_get_rate(clk->parent); \ div = DIV_ROUND_UP(parent_rate, rate); \ if (clk == &cpu_clk) { \ div_max = BM_CLKCTRL_CPU_DIV_XTAL >> \ @@ -309,11 +309,6 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \ if (div == 0 || div > div_max) \ return -EINVAL; \ } else { \ - /* \ - * hack alert: this block modifies clk->parent, too, \ - * so the base to use it the grand parent. \ - */ \ - parent_rate = clk_get_rate(clk->parent->parent); \ rate >>= PARENT_RATE_SHIFT; \ parent_rate >>= PARENT_RATE_SHIFT; \ diff = parent_rate; \ diff --git a/trunk/arch/arm/mach-omap1/pm_bus.c b/trunk/arch/arm/mach-omap1/pm_bus.c index fe31d933f0ed..6588c22b8a64 100644 --- a/trunk/arch/arm/mach-omap1/pm_bus.c +++ b/trunk/arch/arm/mach-omap1/pm_bus.c @@ -24,50 +24,75 @@ #ifdef CONFIG_PM_RUNTIME static int omap1_pm_runtime_suspend(struct device *dev) { - int ret; + struct clk *iclk, *fclk; + int ret = 0; dev_dbg(dev, "%s\n", __func__); ret = pm_generic_runtime_suspend(dev); - if (ret) - return ret; - ret = pm_runtime_clk_suspend(dev); - if (ret) { - pm_generic_runtime_resume(dev); - return ret; + fclk = clk_get(dev, "fck"); + if (!IS_ERR(fclk)) { + clk_disable(fclk); + clk_put(fclk); + } + + iclk = clk_get(dev, "ick"); + if (!IS_ERR(iclk)) { + clk_disable(iclk); + clk_put(iclk); } return 0; -} +}; static int omap1_pm_runtime_resume(struct device *dev) { + struct clk *iclk, *fclk; + dev_dbg(dev, "%s\n", __func__); - pm_runtime_clk_resume(dev); - return pm_generic_runtime_resume(dev); -} + iclk = clk_get(dev, "ick"); + if (!IS_ERR(iclk)) { + clk_enable(iclk); + clk_put(iclk); + } -static struct dev_power_domain default_power_domain = { - .ops = { - .runtime_suspend = omap1_pm_runtime_suspend, - .runtime_resume = omap1_pm_runtime_resume, - USE_PLATFORM_PM_SLEEP_OPS - }, -}; + fclk = clk_get(dev, "fck"); + if (!IS_ERR(fclk)) { + clk_enable(fclk); + clk_put(fclk); + } -static struct pm_clk_notifier_block platform_bus_notifier = { - .pwr_domain = &default_power_domain, - .con_ids = { "ick", "fck", NULL, }, + return pm_generic_runtime_resume(dev); }; static int __init omap1_pm_runtime_init(void) { + const struct dev_pm_ops *pm; + struct dev_pm_ops *omap_pm; + if (!cpu_class_is_omap1()) return -ENODEV; - pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); + pm = platform_bus_get_pm_ops(); + if (!pm) { + pr_err("%s: unable to get dev_pm_ops from platform_bus\n", + __func__); + return -ENODEV; + } + + omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL); + if (!omap_pm) { + pr_err("%s: unable to alloc memory for new dev_pm_ops\n", + __func__); + return -ENOMEM; + } + + omap_pm->runtime_suspend = omap1_pm_runtime_suspend; + omap_pm->runtime_resume = omap1_pm_runtime_resume; + + platform_bus_set_pm_ops(omap_pm); return 0; } diff --git a/trunk/arch/arm/mach-omap2/Makefile b/trunk/arch/arm/mach-omap2/Makefile index 66dfbccacd25..a45cd6409686 100644 --- a/trunk/arch/arm/mach-omap2/Makefile +++ b/trunk/arch/arm/mach-omap2/Makefile @@ -59,16 +59,16 @@ endif # Power Management ifeq ($(CONFIG_PM),y) obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o -obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o +obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o pm_bus.o obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \ - cpuidle34xx.o -obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o + cpuidle34xx.o pm_bus.o +obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o pm_bus.o obj-$(CONFIG_PM_DEBUG) += pm-debug.o obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o AFLAGS_sleep24xx.o :=-Wa,-march=armv6 -AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a$(plus_sec) +AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a ifeq ($(CONFIG_PM_VERBOSE),y) CFLAGS_pm_bus.o += -DDEBUG diff --git a/trunk/arch/arm/mach-omap2/board-rx51.c b/trunk/arch/arm/mach-omap2/board-rx51.c index f8ba20a14e62..e964895b80e8 100644 --- a/trunk/arch/arm/mach-omap2/board-rx51.c +++ b/trunk/arch/arm/mach-omap2/board-rx51.c @@ -141,19 +141,14 @@ static void __init rx51_init(void) static void __init rx51_map_io(void) { omap2_set_globals_3xxx(); - omap34xx_map_common_io(); -} - -static void __init rx51_reserve(void) -{ rx51_video_mem_init(); - omap_reserve(); + omap34xx_map_common_io(); } MACHINE_START(NOKIA_RX51, "Nokia RX-51 board") /* Maintainer: Lauri Leukkunen */ .boot_params = 0x80000100, - .reserve = rx51_reserve, + .reserve = omap_reserve, .map_io = rx51_map_io, .init_early = rx51_init_early, .init_irq = omap_init_irq, diff --git a/trunk/arch/arm/mach-omap2/clkt34xx_dpll3m2.c b/trunk/arch/arm/mach-omap2/clkt34xx_dpll3m2.c index d6e34dd9e7e7..b2b1e37bb6bb 100644 --- a/trunk/arch/arm/mach-omap2/clkt34xx_dpll3m2.c +++ b/trunk/arch/arm/mach-omap2/clkt34xx_dpll3m2.c @@ -115,7 +115,6 @@ int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate) sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, 0, 0, 0, 0); - clk->rate = rate; return 0; } diff --git a/trunk/arch/arm/mach-omap2/clock44xx_data.c b/trunk/arch/arm/mach-omap2/clock44xx_data.c index 8c965671b4d4..276992d3b7fb 100644 --- a/trunk/arch/arm/mach-omap2/clock44xx_data.c +++ b/trunk/arch/arm/mach-omap2/clock44xx_data.c @@ -3116,9 +3116,14 @@ static struct omap_clk omap44xx_clks[] = { CLK(NULL, "dsp_fck", &dsp_fck, CK_443X), CLK("omapdss_dss", "sys_clk", &dss_sys_clk, CK_443X), CLK("omapdss_dss", "tv_clk", &dss_tv_clk, CK_443X), + CLK("omapdss_dss", "dss_clk", &dss_dss_clk, CK_443X), CLK("omapdss_dss", "video_clk", &dss_48mhz_clk, CK_443X), - CLK("omapdss_dss", "fck", &dss_dss_clk, CK_443X), - CLK("omapdss_dss", "ick", &dss_fck, CK_443X), + CLK("omapdss_dss", "fck", &dss_fck, CK_443X), + /* + * On OMAP4, DSS ick is a dummy clock; this is needed for compatibility + * with OMAP2/3. + */ + CLK("omapdss_dss", "ick", &dummy_ck, CK_443X), CLK(NULL, "efuse_ctrl_cust_fck", &efuse_ctrl_cust_fck, CK_443X), CLK(NULL, "emif1_fck", &emif1_fck, CK_443X), CLK(NULL, "emif2_fck", &emif2_fck, CK_443X), diff --git a/trunk/arch/arm/mach-omap2/cm2xxx_3xxx.c b/trunk/arch/arm/mach-omap2/cm2xxx_3xxx.c index 38830d8d4783..9d0dec806e92 100644 --- a/trunk/arch/arm/mach-omap2/cm2xxx_3xxx.c +++ b/trunk/arch/arm/mach-omap2/cm2xxx_3xxx.c @@ -247,7 +247,6 @@ struct omap3_cm_regs { u32 per_cm_clksel; u32 emu_cm_clksel; u32 emu_cm_clkstctrl; - u32 pll_cm_autoidle; u32 pll_cm_autoidle2; u32 pll_cm_clksel4; u32 pll_cm_clksel5; @@ -320,15 +319,6 @@ void omap3_cm_save_context(void) omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, CM_CLKSEL1); cm_context.emu_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL); - /* - * As per erratum i671, ROM code does not respect the PER DPLL - * programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1. - * In this case, even though this register has been saved in - * scratchpad contents, we need to restore AUTO_PERIPH_DPLL - * by ourselves. So, we need to save it anyway. - */ - cm_context.pll_cm_autoidle = - omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE); cm_context.pll_cm_autoidle2 = omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE2); cm_context.pll_cm_clksel4 = @@ -451,13 +441,6 @@ void omap3_cm_restore_context(void) CM_CLKSEL1); omap2_cm_write_mod_reg(cm_context.emu_cm_clkstctrl, OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL); - /* - * As per erratum i671, ROM code does not respect the PER DPLL - * programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1. - * In this case, we need to restore AUTO_PERIPH_DPLL by ourselves. - */ - omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle, PLL_MOD, - CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle2, PLL_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(cm_context.pll_cm_clksel4, PLL_MOD, diff --git a/trunk/arch/arm/mach-omap2/control.c b/trunk/arch/arm/mach-omap2/control.c index da53ba3917ca..695279419020 100644 --- a/trunk/arch/arm/mach-omap2/control.c +++ b/trunk/arch/arm/mach-omap2/control.c @@ -316,14 +316,8 @@ void omap3_save_scratchpad_contents(void) omap2_cm_read_mod_reg(WKUP_MOD, CM_CLKSEL); prcm_block_contents.cm_clken_pll = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN); - /* - * As per erratum i671, ROM code does not respect the PER DPLL - * programming scheme if CM_AUTOIDLE_PLL..AUTO_PERIPH_DPLL == 1. - * Then, in anycase, clear these bits to avoid extra latencies. - */ prcm_block_contents.cm_autoidle_pll = - omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE) & - ~OMAP3430_AUTO_PERIPH_DPLL_MASK; + omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_AUTOIDLE_PLL); prcm_block_contents.cm_clksel1_pll = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL1_PLL); prcm_block_contents.cm_clksel2_pll = diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_2420_data.c index c4d0ae87d62a..8eb3ce1bbfbe 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_2420_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_2420_data.c @@ -1639,7 +1639,6 @@ static struct omap_hwmod_ocp_if *omap2420_gpio1_slaves[] = { static struct omap_hwmod omap2420_gpio1_hwmod = { .name = "gpio1", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap242x_gpio1_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio1_irqs), .main_clk = "gpios_fck", @@ -1670,7 +1669,6 @@ static struct omap_hwmod_ocp_if *omap2420_gpio2_slaves[] = { static struct omap_hwmod omap2420_gpio2_hwmod = { .name = "gpio2", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap242x_gpio2_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio2_irqs), .main_clk = "gpios_fck", @@ -1701,7 +1699,6 @@ static struct omap_hwmod_ocp_if *omap2420_gpio3_slaves[] = { static struct omap_hwmod omap2420_gpio3_hwmod = { .name = "gpio3", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap242x_gpio3_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio3_irqs), .main_clk = "gpios_fck", @@ -1732,7 +1729,6 @@ static struct omap_hwmod_ocp_if *omap2420_gpio4_slaves[] = { static struct omap_hwmod omap2420_gpio4_hwmod = { .name = "gpio4", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap242x_gpio4_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio4_irqs), .main_clk = "gpios_fck", @@ -1786,7 +1782,7 @@ static struct omap_hwmod_irq_info omap2420_dma_system_irqs[] = { static struct omap_hwmod_addr_space omap2420_dma_system_addrs[] = { { .pa_start = 0x48056000, - .pa_end = 0x48056fff, + .pa_end = 0x4a0560ff, .flags = ADDR_TYPE_RT }, }; diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_2430_data.c index 9682dd519f8d..e6e3810db77f 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_2430_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_2430_data.c @@ -1742,7 +1742,6 @@ static struct omap_hwmod_ocp_if *omap2430_gpio1_slaves[] = { static struct omap_hwmod omap2430_gpio1_hwmod = { .name = "gpio1", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap243x_gpio1_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio1_irqs), .main_clk = "gpios_fck", @@ -1773,7 +1772,6 @@ static struct omap_hwmod_ocp_if *omap2430_gpio2_slaves[] = { static struct omap_hwmod omap2430_gpio2_hwmod = { .name = "gpio2", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap243x_gpio2_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio2_irqs), .main_clk = "gpios_fck", @@ -1804,7 +1802,6 @@ static struct omap_hwmod_ocp_if *omap2430_gpio3_slaves[] = { static struct omap_hwmod omap2430_gpio3_hwmod = { .name = "gpio3", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap243x_gpio3_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio3_irqs), .main_clk = "gpios_fck", @@ -1835,7 +1832,6 @@ static struct omap_hwmod_ocp_if *omap2430_gpio4_slaves[] = { static struct omap_hwmod omap2430_gpio4_hwmod = { .name = "gpio4", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap243x_gpio4_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio4_irqs), .main_clk = "gpios_fck", @@ -1866,7 +1862,6 @@ static struct omap_hwmod_ocp_if *omap2430_gpio5_slaves[] = { static struct omap_hwmod omap2430_gpio5_hwmod = { .name = "gpio5", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap243x_gpio5_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio5_irqs), .main_clk = "gpio5_fck", @@ -1920,7 +1915,7 @@ static struct omap_hwmod_irq_info omap2430_dma_system_irqs[] = { static struct omap_hwmod_addr_space omap2430_dma_system_addrs[] = { { .pa_start = 0x48056000, - .pa_end = 0x48056fff, + .pa_end = 0x4a0560ff, .flags = ADDR_TYPE_RT }, }; diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 909a84de6682..b98e2dfcba28 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -2141,7 +2141,6 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio1_slaves[] = { static struct omap_hwmod omap3xxx_gpio1_hwmod = { .name = "gpio1", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio1_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio1_irqs), .main_clk = "gpio1_ick", @@ -2178,7 +2177,6 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio2_slaves[] = { static struct omap_hwmod omap3xxx_gpio2_hwmod = { .name = "gpio2", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio2_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio2_irqs), .main_clk = "gpio2_ick", @@ -2215,7 +2213,6 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio3_slaves[] = { static struct omap_hwmod omap3xxx_gpio3_hwmod = { .name = "gpio3", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio3_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio3_irqs), .main_clk = "gpio3_ick", @@ -2252,7 +2249,6 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio4_slaves[] = { static struct omap_hwmod omap3xxx_gpio4_hwmod = { .name = "gpio4", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio4_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio4_irqs), .main_clk = "gpio4_ick", @@ -2289,7 +2285,6 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio5_slaves[] = { static struct omap_hwmod omap3xxx_gpio5_hwmod = { .name = "gpio5", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio5_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio5_irqs), .main_clk = "gpio5_ick", @@ -2326,7 +2321,6 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio6_slaves[] = { static struct omap_hwmod omap3xxx_gpio6_hwmod = { .name = "gpio6", - .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio6_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio6_irqs), .main_clk = "gpio6_ick", @@ -2392,7 +2386,7 @@ static struct omap_hwmod_irq_info omap3xxx_dma_system_irqs[] = { static struct omap_hwmod_addr_space omap3xxx_dma_system_addrs[] = { { .pa_start = 0x48056000, - .pa_end = 0x48056fff, + .pa_end = 0x4a0560ff, .flags = ADDR_TYPE_RT }, }; diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index abc548a0c98d..3e88dd3f8ef3 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -885,7 +885,7 @@ static struct omap_hwmod_ocp_if *omap44xx_dma_system_masters[] = { static struct omap_hwmod_addr_space omap44xx_dma_system_addrs[] = { { .pa_start = 0x4a056000, - .pa_end = 0x4a056fff, + .pa_end = 0x4a0560ff, .flags = ADDR_TYPE_RT }, }; diff --git a/trunk/arch/arm/mach-omap2/omap_l3_smx.c b/trunk/arch/arm/mach-omap2/omap_l3_smx.c index 4321e7938929..5f2da7565b68 100644 --- a/trunk/arch/arm/mach-omap2/omap_l3_smx.c +++ b/trunk/arch/arm/mach-omap2/omap_l3_smx.c @@ -196,11 +196,11 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3) /* No timeout error for debug sources */ } + base = ((l3->rt) + (*(omap3_l3_bases[int_type] + err_source))); + /* identify the error source */ for (err_source = 0; !(status & (1 << err_source)); err_source++) ; - - base = l3->rt + *(omap3_l3_bases[int_type] + err_source); error = omap3_l3_readll(base, L3_ERROR_LOG); if (error) { diff --git a/trunk/arch/arm/mach-omap2/pm.c b/trunk/arch/arm/mach-omap2/pm.c index 49486f522dca..30af3351c2d6 100644 --- a/trunk/arch/arm/mach-omap2/pm.c +++ b/trunk/arch/arm/mach-omap2/pm.c @@ -89,7 +89,6 @@ static void omap2_init_processor_devices(void) if (cpu_is_omap44xx()) { _init_omap_device("l3_main_1", &l3_dev); _init_omap_device("dsp", &dsp_dev); - _init_omap_device("iva", &iva_dev); } else { _init_omap_device("l3_main", &l3_dev); } diff --git a/trunk/arch/arm/mach-omap2/pm_bus.c b/trunk/arch/arm/mach-omap2/pm_bus.c new file mode 100644 index 000000000000..5acd2ab298b1 --- /dev/null +++ b/trunk/arch/arm/mach-omap2/pm_bus.c @@ -0,0 +1,85 @@ +/* + * Runtime PM support code for OMAP + * + * Author: Kevin Hilman, Deep Root Systems, LLC + * + * Copyright (C) 2010 Texas Instruments, Inc. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef CONFIG_PM_RUNTIME +static int omap_pm_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + int r, ret = 0; + + dev_dbg(dev, "%s\n", __func__); + + ret = pm_generic_runtime_suspend(dev); + + if (!ret && dev->parent == &omap_device_parent) { + r = omap_device_idle(pdev); + WARN_ON(r); + } + + return ret; +}; + +static int omap_pm_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + int r; + + dev_dbg(dev, "%s\n", __func__); + + if (dev->parent == &omap_device_parent) { + r = omap_device_enable(pdev); + WARN_ON(r); + } + + return pm_generic_runtime_resume(dev); +}; +#else +#define omap_pm_runtime_suspend NULL +#define omap_pm_runtime_resume NULL +#endif /* CONFIG_PM_RUNTIME */ + +static int __init omap_pm_runtime_init(void) +{ + const struct dev_pm_ops *pm; + struct dev_pm_ops *omap_pm; + + pm = platform_bus_get_pm_ops(); + if (!pm) { + pr_err("%s: unable to get dev_pm_ops from platform_bus\n", + __func__); + return -ENODEV; + } + + omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL); + if (!omap_pm) { + pr_err("%s: unable to alloc memory for new dev_pm_ops\n", + __func__); + return -ENOMEM; + } + + omap_pm->runtime_suspend = omap_pm_runtime_suspend; + omap_pm->runtime_resume = omap_pm_runtime_resume; + + platform_bus_set_pm_ops(omap_pm); + + return 0; +} +core_initcall(omap_pm_runtime_init); diff --git a/trunk/arch/arm/mach-omap2/voltage.c b/trunk/arch/arm/mach-omap2/voltage.c index 0c1552d9d995..6fb520999b6e 100644 --- a/trunk/arch/arm/mach-omap2/voltage.c +++ b/trunk/arch/arm/mach-omap2/voltage.c @@ -114,6 +114,7 @@ static int __init _config_common_vdd_data(struct omap_vdd_info *vdd) sys_clk_speed /= 1000; /* Generic voltage parameters */ + vdd->curr_volt = 1200000; vdd->volt_scale = vp_forceupdate_scale_voltage; vdd->vp_enabled = false; diff --git a/trunk/arch/arm/mach-pxa/balloon3.c b/trunk/arch/arm/mach-pxa/balloon3.c index 810a982a66f8..bfbecec6d05f 100644 --- a/trunk/arch/arm/mach-pxa/balloon3.c +++ b/trunk/arch/arm/mach-pxa/balloon3.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include diff --git a/trunk/arch/arm/mach-pxa/clock-pxa2xx.c b/trunk/arch/arm/mach-pxa/clock-pxa2xx.c index 1d5859d9a0e3..1ce090448493 100644 --- a/trunk/arch/arm/mach-pxa/clock-pxa2xx.c +++ b/trunk/arch/arm/mach-pxa/clock-pxa2xx.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include @@ -33,22 +33,32 @@ const struct clkops clk_pxa2xx_cken_ops = { #ifdef CONFIG_PM static uint32_t saved_cken; -static int pxa2xx_clock_suspend(void) +static int pxa2xx_clock_suspend(struct sys_device *d, pm_message_t state) { saved_cken = CKEN; return 0; } -static void pxa2xx_clock_resume(void) +static int pxa2xx_clock_resume(struct sys_device *d) { CKEN = saved_cken; + return 0; } #else #define pxa2xx_clock_suspend NULL #define pxa2xx_clock_resume NULL #endif -struct syscore_ops pxa2xx_clock_syscore_ops = { +struct sysdev_class pxa2xx_clock_sysclass = { + .name = "pxa2xx-clock", .suspend = pxa2xx_clock_suspend, .resume = pxa2xx_clock_resume, }; + +static int __init pxa2xx_clock_init(void) +{ + if (cpu_is_pxa2xx()) + return sysdev_class_register(&pxa2xx_clock_sysclass); + return 0; +} +postcore_initcall(pxa2xx_clock_init); diff --git a/trunk/arch/arm/mach-pxa/clock-pxa3xx.c b/trunk/arch/arm/mach-pxa/clock-pxa3xx.c index 2a37a9a8f621..3f864cd0bd28 100644 --- a/trunk/arch/arm/mach-pxa/clock-pxa3xx.c +++ b/trunk/arch/arm/mach-pxa/clock-pxa3xx.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include @@ -183,7 +182,7 @@ const struct clkops clk_pxa3xx_pout_ops = { static uint32_t cken[2]; static uint32_t accr; -static int pxa3xx_clock_suspend(void) +static int pxa3xx_clock_suspend(struct sys_device *d, pm_message_t state) { cken[0] = CKENA; cken[1] = CKENB; @@ -191,18 +190,28 @@ static int pxa3xx_clock_suspend(void) return 0; } -static void pxa3xx_clock_resume(void) +static int pxa3xx_clock_resume(struct sys_device *d) { ACCR = accr; CKENA = cken[0]; CKENB = cken[1]; + return 0; } #else #define pxa3xx_clock_suspend NULL #define pxa3xx_clock_resume NULL #endif -struct syscore_ops pxa3xx_clock_syscore_ops = { +struct sysdev_class pxa3xx_clock_sysclass = { + .name = "pxa3xx-clock", .suspend = pxa3xx_clock_suspend, .resume = pxa3xx_clock_resume, }; + +static int __init pxa3xx_clock_init(void) +{ + if (cpu_is_pxa3xx() || cpu_is_pxa95x()) + return sysdev_class_register(&pxa3xx_clock_sysclass); + return 0; +} +postcore_initcall(pxa3xx_clock_init); diff --git a/trunk/arch/arm/mach-pxa/clock.h b/trunk/arch/arm/mach-pxa/clock.h index 1f2fb9c43f06..f9f349a21b54 100644 --- a/trunk/arch/arm/mach-pxa/clock.h +++ b/trunk/arch/arm/mach-pxa/clock.h @@ -1,5 +1,5 @@ #include -#include +#include struct clkops { void (*enable)(struct clk *); @@ -54,7 +54,7 @@ extern const struct clkops clk_pxa2xx_cken_ops; void clk_pxa2xx_cken_enable(struct clk *clk); void clk_pxa2xx_cken_disable(struct clk *clk); -extern struct syscore_ops pxa2xx_clock_syscore_ops; +extern struct sysdev_class pxa2xx_clock_sysclass; #if defined(CONFIG_PXA3xx) || defined(CONFIG_PXA95x) #define DEFINE_PXA3_CKEN(_name, _cken, _rate, _delay) \ @@ -74,6 +74,5 @@ extern const struct clkops clk_pxa3xx_smemc_ops; extern void clk_pxa3xx_cken_enable(struct clk *); extern void clk_pxa3xx_cken_disable(struct clk *); -extern struct syscore_ops pxa3xx_clock_syscore_ops; - +extern struct sysdev_class pxa3xx_clock_sysclass; #endif diff --git a/trunk/arch/arm/mach-pxa/cm-x270.c b/trunk/arch/arm/mach-pxa/cm-x270.c index 13518a705399..b88d601a8090 100644 --- a/trunk/arch/arm/mach-pxa/cm-x270.c +++ b/trunk/arch/arm/mach-pxa/cm-x270.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include diff --git a/trunk/arch/arm/mach-pxa/cm-x2xx.c b/trunk/arch/arm/mach-pxa/cm-x2xx.c index a10996782476..8225e2e58c6e 100644 --- a/trunk/arch/arm/mach-pxa/cm-x2xx.c +++ b/trunk/arch/arm/mach-pxa/cm-x2xx.c @@ -10,7 +10,7 @@ */ #include -#include +#include #include #include @@ -388,7 +388,7 @@ static inline void cmx2xx_init_display(void) {} #ifdef CONFIG_PM static unsigned long sleep_save_msc[10]; -static int cmx2xx_suspend(void) +static int cmx2xx_suspend(struct sys_device *dev, pm_message_t state) { cmx2xx_pci_suspend(); @@ -412,7 +412,7 @@ static int cmx2xx_suspend(void) return 0; } -static void cmx2xx_resume(void) +static int cmx2xx_resume(struct sys_device *dev) { cmx2xx_pci_resume(); @@ -420,18 +420,27 @@ static void cmx2xx_resume(void) __raw_writel(sleep_save_msc[0], MSC0); __raw_writel(sleep_save_msc[1], MSC1); __raw_writel(sleep_save_msc[2], MSC2); + + return 0; } -static struct syscore_ops cmx2xx_pm_syscore_ops = { +static struct sysdev_class cmx2xx_pm_sysclass = { + .name = "pm", .resume = cmx2xx_resume, .suspend = cmx2xx_suspend, }; +static struct sys_device cmx2xx_pm_device = { + .cls = &cmx2xx_pm_sysclass, +}; + static int __init cmx2xx_pm_init(void) { - register_syscore_ops(&cmx2xx_pm_syscore_ops); - - return 0; + int error; + error = sysdev_class_register(&cmx2xx_pm_sysclass); + if (error == 0) + error = sysdev_register(&cmx2xx_pm_device); + return error; } #else static int __init cmx2xx_pm_init(void) { return 0; } diff --git a/trunk/arch/arm/mach-pxa/colibri-evalboard.c b/trunk/arch/arm/mach-pxa/colibri-evalboard.c index d28e802e2448..81c3c433e2d6 100644 --- a/trunk/arch/arm/mach-pxa/colibri-evalboard.c +++ b/trunk/arch/arm/mach-pxa/colibri-evalboard.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/arch/arm/mach-pxa/colibri-pxa270-income.c b/trunk/arch/arm/mach-pxa/colibri-pxa270-income.c index 80538b8806ed..44c1b77ece67 100644 --- a/trunk/arch/arm/mach-pxa/colibri-pxa270-income.c +++ b/trunk/arch/arm/mach-pxa/colibri-pxa270-income.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include diff --git a/trunk/arch/arm/mach-pxa/colibri-pxa270.c b/trunk/arch/arm/mach-pxa/colibri-pxa270.c index 7545a48ed88b..6fc5d328ba7f 100644 --- a/trunk/arch/arm/mach-pxa/colibri-pxa270.c +++ b/trunk/arch/arm/mach-pxa/colibri-pxa270.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/trunk/arch/arm/mach-pxa/generic.h b/trunk/arch/arm/mach-pxa/generic.h index e6c9344a95ae..a079d8baa45a 100644 --- a/trunk/arch/arm/mach-pxa/generic.h +++ b/trunk/arch/arm/mach-pxa/generic.h @@ -61,10 +61,10 @@ extern unsigned pxa3xx_get_clk_frequency_khz(int); #define pxa3xx_get_clk_frequency_khz(x) (0) #endif -extern struct syscore_ops pxa_irq_syscore_ops; -extern struct syscore_ops pxa_gpio_syscore_ops; -extern struct syscore_ops pxa2xx_mfp_syscore_ops; -extern struct syscore_ops pxa3xx_mfp_syscore_ops; +extern struct sysdev_class pxa_irq_sysclass; +extern struct sysdev_class pxa_gpio_sysclass; +extern struct sysdev_class pxa2xx_mfp_sysclass; +extern struct sysdev_class pxa3xx_mfp_sysclass; void __init pxa_set_ffuart_info(void *info); void __init pxa_set_btuart_info(void *info); diff --git a/trunk/arch/arm/mach-pxa/hx4700.c b/trunk/arch/arm/mach-pxa/hx4700.c index 9cdcca597924..6de0ad0eea65 100644 --- a/trunk/arch/arm/mach-pxa/hx4700.c +++ b/trunk/arch/arm/mach-pxa/hx4700.c @@ -711,7 +711,7 @@ static struct regulator_consumer_supply bq24022_consumers[] = { static struct regulator_init_data bq24022_init_data = { .constraints = { .max_uA = 500000, - .valid_ops_mask = REGULATOR_CHANGE_CURRENT|REGULATOR_CHANGE_STATUS, + .valid_ops_mask = REGULATOR_CHANGE_CURRENT, }, .num_consumer_supplies = ARRAY_SIZE(bq24022_consumers), .consumer_supplies = bq24022_consumers, diff --git a/trunk/arch/arm/mach-pxa/irq.c b/trunk/arch/arm/mach-pxa/irq.c index 32ed551bf9c5..6251e3f5c62c 100644 --- a/trunk/arch/arm/mach-pxa/irq.c +++ b/trunk/arch/arm/mach-pxa/irq.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include @@ -183,7 +183,7 @@ void __init pxa_init_irq(int irq_nr, set_wake_t fn) static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; -static int pxa_irq_suspend(void) +static int pxa_irq_suspend(struct sys_device *dev, pm_message_t state) { int i; @@ -202,7 +202,7 @@ static int pxa_irq_suspend(void) return 0; } -static void pxa_irq_resume(void) +static int pxa_irq_resume(struct sys_device *dev) { int i; @@ -218,13 +218,22 @@ static void pxa_irq_resume(void) __raw_writel(saved_ipr[i], IRQ_BASE + IPR(i)); __raw_writel(1, IRQ_BASE + ICCR); + return 0; } #else #define pxa_irq_suspend NULL #define pxa_irq_resume NULL #endif -struct syscore_ops pxa_irq_syscore_ops = { +struct sysdev_class pxa_irq_sysclass = { + .name = "irq", .suspend = pxa_irq_suspend, .resume = pxa_irq_resume, }; + +static int __init pxa_irq_init(void) +{ + return sysdev_class_register(&pxa_irq_sysclass); +} + +core_initcall(pxa_irq_init); diff --git a/trunk/arch/arm/mach-pxa/lpd270.c b/trunk/arch/arm/mach-pxa/lpd270.c index 6cf8180bf5bd..f5de541725b1 100644 --- a/trunk/arch/arm/mach-pxa/lpd270.c +++ b/trunk/arch/arm/mach-pxa/lpd270.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include #include @@ -159,22 +159,30 @@ static void __init lpd270_init_irq(void) #ifdef CONFIG_PM -static void lpd270_irq_resume(void) +static int lpd270_irq_resume(struct sys_device *dev) { __raw_writew(lpd270_irq_enabled, LPD270_INT_MASK); + return 0; } -static struct syscore_ops lpd270_irq_syscore_ops = { +static struct sysdev_class lpd270_irq_sysclass = { + .name = "cpld_irq", .resume = lpd270_irq_resume, }; +static struct sys_device lpd270_irq_device = { + .cls = &lpd270_irq_sysclass, +}; + static int __init lpd270_irq_device_init(void) { + int ret = -ENODEV; if (machine_is_logicpd_pxa270()) { - register_syscore_ops(&lpd270_irq_syscore_ops); - return 0; + ret = sysdev_class_register(&lpd270_irq_sysclass); + if (ret == 0) + ret = sysdev_register(&lpd270_irq_device); } - return -ENODEV; + return ret; } device_initcall(lpd270_irq_device_init); diff --git a/trunk/arch/arm/mach-pxa/lubbock.c b/trunk/arch/arm/mach-pxa/lubbock.c index e10ddb827147..3ede978c83d9 100644 --- a/trunk/arch/arm/mach-pxa/lubbock.c +++ b/trunk/arch/arm/mach-pxa/lubbock.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -176,22 +176,31 @@ static void __init lubbock_init_irq(void) #ifdef CONFIG_PM -static void lubbock_irq_resume(void) +static int lubbock_irq_resume(struct sys_device *dev) { LUB_IRQ_MASK_EN = lubbock_irq_enabled; + return 0; } -static struct syscore_ops lubbock_irq_syscore_ops = { +static struct sysdev_class lubbock_irq_sysclass = { + .name = "cpld_irq", .resume = lubbock_irq_resume, }; +static struct sys_device lubbock_irq_device = { + .cls = &lubbock_irq_sysclass, +}; + static int __init lubbock_irq_device_init(void) { + int ret = -ENODEV; + if (machine_is_lubbock()) { - register_syscore_ops(&lubbock_irq_syscore_ops); - return 0; + ret = sysdev_class_register(&lubbock_irq_sysclass); + if (ret == 0) + ret = sysdev_register(&lubbock_irq_device); } - return -ENODEV; + return ret; } device_initcall(lubbock_irq_device_init); diff --git a/trunk/arch/arm/mach-pxa/magician.c b/trunk/arch/arm/mach-pxa/magician.c index 9984ef70bd79..a72993dde2b3 100644 --- a/trunk/arch/arm/mach-pxa/magician.c +++ b/trunk/arch/arm/mach-pxa/magician.c @@ -599,7 +599,7 @@ static struct regulator_consumer_supply bq24022_consumers[] = { static struct regulator_init_data bq24022_init_data = { .constraints = { .max_uA = 500000, - .valid_ops_mask = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS, + .valid_ops_mask = REGULATOR_CHANGE_CURRENT, }, .num_consumer_supplies = ARRAY_SIZE(bq24022_consumers), .consumer_supplies = bq24022_consumers, diff --git a/trunk/arch/arm/mach-pxa/mainstone.c b/trunk/arch/arm/mach-pxa/mainstone.c index 3479e2b3b511..95163baca29e 100644 --- a/trunk/arch/arm/mach-pxa/mainstone.c +++ b/trunk/arch/arm/mach-pxa/mainstone.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include #include @@ -185,21 +185,31 @@ static void __init mainstone_init_irq(void) #ifdef CONFIG_PM -static void mainstone_irq_resume(void) +static int mainstone_irq_resume(struct sys_device *dev) { MST_INTMSKENA = mainstone_irq_enabled; + return 0; } -static struct syscore_ops mainstone_irq_syscore_ops = { +static struct sysdev_class mainstone_irq_sysclass = { + .name = "cpld_irq", .resume = mainstone_irq_resume, }; +static struct sys_device mainstone_irq_device = { + .cls = &mainstone_irq_sysclass, +}; + static int __init mainstone_irq_device_init(void) { - if (machine_is_mainstone()) - register_syscore_ops(&mainstone_irq_syscore_ops); + int ret = -ENODEV; - return 0; + if (machine_is_mainstone()) { + ret = sysdev_class_register(&mainstone_irq_sysclass); + if (ret == 0) + ret = sysdev_register(&mainstone_irq_device); + } + return ret; } device_initcall(mainstone_irq_device_init); diff --git a/trunk/arch/arm/mach-pxa/mfp-pxa2xx.c b/trunk/arch/arm/mach-pxa/mfp-pxa2xx.c index 87ae3129f4f7..1d1419b73457 100644 --- a/trunk/arch/arm/mach-pxa/mfp-pxa2xx.c +++ b/trunk/arch/arm/mach-pxa/mfp-pxa2xx.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include @@ -338,7 +338,7 @@ static unsigned long saved_gafr[2][4]; static unsigned long saved_gpdr[4]; static unsigned long saved_pgsr[4]; -static int pxa2xx_mfp_suspend(void) +static int pxa2xx_mfp_suspend(struct sys_device *d, pm_message_t state) { int i; @@ -365,7 +365,7 @@ static int pxa2xx_mfp_suspend(void) return 0; } -static void pxa2xx_mfp_resume(void) +static int pxa2xx_mfp_resume(struct sys_device *d) { int i; @@ -376,13 +376,15 @@ static void pxa2xx_mfp_resume(void) PGSR(i) = saved_pgsr[i]; } PSSR = PSSR_RDH | PSSR_PH; + return 0; } #else #define pxa2xx_mfp_suspend NULL #define pxa2xx_mfp_resume NULL #endif -struct syscore_ops pxa2xx_mfp_syscore_ops = { +struct sysdev_class pxa2xx_mfp_sysclass = { + .name = "mfp", .suspend = pxa2xx_mfp_suspend, .resume = pxa2xx_mfp_resume, }; @@ -407,6 +409,6 @@ static int __init pxa2xx_mfp_init(void) for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) gpdr_lpm[i] = GPDR(i * 32); - return 0; + return sysdev_class_register(&pxa2xx_mfp_sysclass); } postcore_initcall(pxa2xx_mfp_init); diff --git a/trunk/arch/arm/mach-pxa/mfp-pxa3xx.c b/trunk/arch/arm/mach-pxa/mfp-pxa3xx.c index 89863a01ecd7..7a270eecd480 100644 --- a/trunk/arch/arm/mach-pxa/mfp-pxa3xx.c +++ b/trunk/arch/arm/mach-pxa/mfp-pxa3xx.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include @@ -31,13 +31,13 @@ * a pull-down mode if they're an active low chip select, and we're * just entering standby. */ -static int pxa3xx_mfp_suspend(void) +static int pxa3xx_mfp_suspend(struct sys_device *d, pm_message_t state) { mfp_config_lpm(); return 0; } -static void pxa3xx_mfp_resume(void) +static int pxa3xx_mfp_resume(struct sys_device *d) { mfp_config_run(); @@ -47,13 +47,24 @@ static void pxa3xx_mfp_resume(void) * preserve them here in case they will be referenced later */ ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); + return 0; } #else #define pxa3xx_mfp_suspend NULL #define pxa3xx_mfp_resume NULL #endif -struct syscore_ops pxa3xx_mfp_syscore_ops = { +struct sysdev_class pxa3xx_mfp_sysclass = { + .name = "mfp", .suspend = pxa3xx_mfp_suspend, - .resume = pxa3xx_mfp_resume, + .resume = pxa3xx_mfp_resume, }; + +static int __init mfp_init_devicefs(void) +{ + if (cpu_is_pxa3xx()) + return sysdev_class_register(&pxa3xx_mfp_sysclass); + + return 0; +} +postcore_initcall(mfp_init_devicefs); diff --git a/trunk/arch/arm/mach-pxa/mioa701.c b/trunk/arch/arm/mach-pxa/mioa701.c index e3470137c934..23925db8ff74 100644 --- a/trunk/arch/arm/mach-pxa/mioa701.c +++ b/trunk/arch/arm/mach-pxa/mioa701.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include @@ -488,7 +488,7 @@ static void install_bootstrap(void) } -static int mioa701_sys_suspend(void) +static int mioa701_sys_suspend(struct sys_device *sysdev, pm_message_t state) { int i = 0, is_bt_on; u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); @@ -514,7 +514,7 @@ static int mioa701_sys_suspend(void) return 0; } -static void mioa701_sys_resume(void) +static int mioa701_sys_resume(struct sys_device *sysdev) { int i = 0; u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); @@ -527,18 +527,43 @@ static void mioa701_sys_resume(void) *mem_resume_enabler = save_buffer[i++]; *mem_resume_bt = save_buffer[i++]; *mem_resume_unknown = save_buffer[i++]; + + return 0; } -static struct syscore_ops mioa701_syscore_ops = { - .suspend = mioa701_sys_suspend, - .resume = mioa701_sys_resume, +static struct sysdev_class mioa701_sysclass = { + .name = "mioa701", +}; + +static struct sys_device sysdev_bootstrap = { + .cls = &mioa701_sysclass, +}; + +static struct sysdev_driver driver_bootstrap = { + .suspend = &mioa701_sys_suspend, + .resume = &mioa701_sys_resume, }; static int __init bootstrap_init(void) { + int rc; int save_size = mioa701_bootstrap_lg + (sizeof(u32) * 3); - register_syscore_ops(&mioa701_syscore_ops); + rc = sysdev_class_register(&mioa701_sysclass); + if (rc) { + printk(KERN_ERR "Failed registering mioa701 sys class\n"); + return -ENODEV; + } + rc = sysdev_register(&sysdev_bootstrap); + if (rc) { + printk(KERN_ERR "Failed registering mioa701 sys device\n"); + return -ENODEV; + } + rc = sysdev_driver_register(&mioa701_sysclass, &driver_bootstrap); + if (rc) { + printk(KERN_ERR "Failed registering PMU sys driver\n"); + return -ENODEV; + } save_buffer = kmalloc(save_size, GFP_KERNEL); if (!save_buffer) @@ -551,7 +576,9 @@ static int __init bootstrap_init(void) static void bootstrap_exit(void) { kfree(save_buffer); - unregister_syscore_ops(&mioa701_syscore_ops); + sysdev_driver_unregister(&mioa701_sysclass, &driver_bootstrap); + sysdev_unregister(&sysdev_bootstrap); + sysdev_class_unregister(&mioa701_sysclass); printk(KERN_CRIT "Unregistering mioa701 suspend will hang next" "resume !!!\n"); diff --git a/trunk/arch/arm/mach-pxa/palmld.c b/trunk/arch/arm/mach-pxa/palmld.c index 4061ecddee70..a6f898cbfac9 100644 --- a/trunk/arch/arm/mach-pxa/palmld.c +++ b/trunk/arch/arm/mach-pxa/palmld.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/arch/arm/mach-pxa/palmtreo.c b/trunk/arch/arm/mach-pxa/palmtreo.c index 20d1b18b1733..8aadad55fbe4 100644 --- a/trunk/arch/arm/mach-pxa/palmtreo.c +++ b/trunk/arch/arm/mach-pxa/palmtreo.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include diff --git a/trunk/arch/arm/mach-pxa/palmz72.c b/trunk/arch/arm/mach-pxa/palmz72.c index 65f24f0b77e8..3b8a4f37dbbe 100644 --- a/trunk/arch/arm/mach-pxa/palmz72.c +++ b/trunk/arch/arm/mach-pxa/palmz72.c @@ -19,7 +19,7 @@ */ #include -#include +#include #include #include #include @@ -233,9 +233,9 @@ static struct palmz72_resume_info palmz72_resume_info = { static unsigned long store_ptr; -/* syscore_ops for Palm Zire 72 PM */ +/* sys_device for Palm Zire 72 PM */ -static int palmz72_pm_suspend(void) +static int palmz72_pm_suspend(struct sys_device *dev, pm_message_t msg) { /* setup the resume_info struct for the original bootloader */ palmz72_resume_info.resume_addr = (u32) cpu_resume; @@ -249,23 +249,31 @@ static int palmz72_pm_suspend(void) return 0; } -static void palmz72_pm_resume(void) +static int palmz72_pm_resume(struct sys_device *dev) { *PALMZ72_SAVE_DWORD = store_ptr; + return 0; } -static struct syscore_ops palmz72_pm_syscore_ops = { +static struct sysdev_class palmz72_pm_sysclass = { + .name = "palmz72_pm", .suspend = palmz72_pm_suspend, .resume = palmz72_pm_resume, }; +static struct sys_device palmz72_pm_device = { + .cls = &palmz72_pm_sysclass, +}; + static int __init palmz72_pm_init(void) { + int ret = -ENODEV; if (machine_is_palmz72()) { - register_syscore_ops(&palmz72_pm_syscore_ops); - return 0; + ret = sysdev_class_register(&palmz72_pm_sysclass); + if (ret == 0) + ret = sysdev_register(&palmz72_pm_device); } - return -ENODEV; + return ret; } device_initcall(palmz72_pm_init); diff --git a/trunk/arch/arm/mach-pxa/pxa25x.c b/trunk/arch/arm/mach-pxa/pxa25x.c index fed363cec9c6..a4af8c52d7ee 100644 --- a/trunk/arch/arm/mach-pxa/pxa25x.c +++ b/trunk/arch/arm/mach-pxa/pxa25x.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include @@ -350,9 +350,21 @@ static struct platform_device *pxa25x_devices[] __initdata = { &pxa_device_asoc_platform, }; +static struct sys_device pxa25x_sysdev[] = { + { + .cls = &pxa_irq_sysclass, + }, { + .cls = &pxa2xx_mfp_sysclass, + }, { + .cls = &pxa_gpio_sysclass, + }, { + .cls = &pxa2xx_clock_sysclass, + } +}; + static int __init pxa25x_init(void) { - int ret = 0; + int i, ret = 0; if (cpu_is_pxa25x()) { @@ -365,10 +377,11 @@ static int __init pxa25x_init(void) pxa25x_init_pm(); - register_syscore_ops(&pxa_irq_syscore_ops); - register_syscore_ops(&pxa2xx_mfp_syscore_ops); - register_syscore_ops(&pxa_gpio_syscore_ops); - register_syscore_ops(&pxa2xx_clock_syscore_ops); + for (i = 0; i < ARRAY_SIZE(pxa25x_sysdev); i++) { + ret = sysdev_register(&pxa25x_sysdev[i]); + if (ret) + pr_err("failed to register sysdev[%d]\n", i); + } ret = platform_add_devices(pxa25x_devices, ARRAY_SIZE(pxa25x_devices)); diff --git a/trunk/arch/arm/mach-pxa/pxa27x.c b/trunk/arch/arm/mach-pxa/pxa27x.c index 2fecbec58d88..909756eaf4b7 100644 --- a/trunk/arch/arm/mach-pxa/pxa27x.c +++ b/trunk/arch/arm/mach-pxa/pxa27x.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include @@ -428,9 +428,21 @@ static struct platform_device *devices[] __initdata = { &pxa27x_device_pwm1, }; +static struct sys_device pxa27x_sysdev[] = { + { + .cls = &pxa_irq_sysclass, + }, { + .cls = &pxa2xx_mfp_sysclass, + }, { + .cls = &pxa_gpio_sysclass, + }, { + .cls = &pxa2xx_clock_sysclass, + } +}; + static int __init pxa27x_init(void) { - int ret = 0; + int i, ret = 0; if (cpu_is_pxa27x()) { @@ -443,10 +455,11 @@ static int __init pxa27x_init(void) pxa27x_init_pm(); - register_syscore_ops(&pxa_irq_syscore_ops); - register_syscore_ops(&pxa2xx_mfp_syscore_ops); - register_syscore_ops(&pxa_gpio_syscore_ops); - register_syscore_ops(&pxa2xx_clock_syscore_ops); + for (i = 0; i < ARRAY_SIZE(pxa27x_sysdev); i++) { + ret = sysdev_register(&pxa27x_sysdev[i]); + if (ret) + pr_err("failed to register sysdev[%d]\n", i); + } ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } diff --git a/trunk/arch/arm/mach-pxa/pxa3xx.c b/trunk/arch/arm/mach-pxa/pxa3xx.c index 8521d7d6f1da..8dd107391157 100644 --- a/trunk/arch/arm/mach-pxa/pxa3xx.c +++ b/trunk/arch/arm/mach-pxa/pxa3xx.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include @@ -427,9 +427,21 @@ static struct platform_device *devices[] __initdata = { &pxa27x_device_pwm1, }; +static struct sys_device pxa3xx_sysdev[] = { + { + .cls = &pxa_irq_sysclass, + }, { + .cls = &pxa3xx_mfp_sysclass, + }, { + .cls = &pxa_gpio_sysclass, + }, { + .cls = &pxa3xx_clock_sysclass, + } +}; + static int __init pxa3xx_init(void) { - int ret = 0; + int i, ret = 0; if (cpu_is_pxa3xx()) { @@ -450,10 +462,11 @@ static int __init pxa3xx_init(void) pxa3xx_init_pm(); - register_syscore_ops(&pxa_irq_syscore_ops); - register_syscore_ops(&pxa3xx_mfp_syscore_ops); - register_syscore_ops(&pxa_gpio_syscore_ops); - register_syscore_ops(&pxa3xx_clock_syscore_ops); + for (i = 0; i < ARRAY_SIZE(pxa3xx_sysdev); i++) { + ret = sysdev_register(&pxa3xx_sysdev[i]); + if (ret) + pr_err("failed to register sysdev[%d]\n", i); + } ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } diff --git a/trunk/arch/arm/mach-pxa/pxa95x.c b/trunk/arch/arm/mach-pxa/pxa95x.c index ecc82a330fad..23b229bd06e9 100644 --- a/trunk/arch/arm/mach-pxa/pxa95x.c +++ b/trunk/arch/arm/mach-pxa/pxa95x.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include @@ -260,6 +260,16 @@ static struct platform_device *devices[] __initdata = { &pxa27x_device_pwm1, }; +static struct sys_device pxa95x_sysdev[] = { + { + .cls = &pxa_irq_sysclass, + }, { + .cls = &pxa_gpio_sysclass, + }, { + .cls = &pxa3xx_clock_sysclass, + } +}; + static int __init pxa95x_init(void) { int ret = 0, i; @@ -283,9 +293,11 @@ static int __init pxa95x_init(void) if ((ret = pxa_init_dma(IRQ_DMA, 32))) return ret; - register_syscore_ops(&pxa_irq_syscore_ops); - register_syscore_ops(&pxa_gpio_syscore_ops); - register_syscore_ops(&pxa3xx_clock_syscore_ops); + for (i = 0; i < ARRAY_SIZE(pxa95x_sysdev); i++) { + ret = sysdev_register(&pxa95x_sysdev[i]); + if (ret) + pr_err("failed to register sysdev[%d]\n", i); + } ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } diff --git a/trunk/arch/arm/mach-pxa/raumfeld.c b/trunk/arch/arm/mach-pxa/raumfeld.c index d130f77b6d11..cd1861351f75 100644 --- a/trunk/arch/arm/mach-pxa/raumfeld.c +++ b/trunk/arch/arm/mach-pxa/raumfeld.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include diff --git a/trunk/arch/arm/mach-pxa/smemc.c b/trunk/arch/arm/mach-pxa/smemc.c index 79923058d10f..232b7316ec08 100644 --- a/trunk/arch/arm/mach-pxa/smemc.c +++ b/trunk/arch/arm/mach-pxa/smemc.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include @@ -16,7 +16,7 @@ static unsigned long msc[2]; static unsigned long sxcnfg, memclkcfg; static unsigned long csadrcfg[4]; -static int pxa3xx_smemc_suspend(void) +static int pxa3xx_smemc_suspend(struct sys_device *dev, pm_message_t state) { msc[0] = __raw_readl(MSC0); msc[1] = __raw_readl(MSC1); @@ -30,7 +30,7 @@ static int pxa3xx_smemc_suspend(void) return 0; } -static void pxa3xx_smemc_resume(void) +static int pxa3xx_smemc_resume(struct sys_device *dev) { __raw_writel(msc[0], MSC0); __raw_writel(msc[1], MSC1); @@ -40,19 +40,34 @@ static void pxa3xx_smemc_resume(void) __raw_writel(csadrcfg[1], CSADRCFG1); __raw_writel(csadrcfg[2], CSADRCFG2); __raw_writel(csadrcfg[3], CSADRCFG3); + + return 0; } -static struct syscore_ops smemc_syscore_ops = { +static struct sysdev_class smemc_sysclass = { + .name = "smemc", .suspend = pxa3xx_smemc_suspend, .resume = pxa3xx_smemc_resume, }; +static struct sys_device smemc_sysdev = { + .id = 0, + .cls = &smemc_sysclass, +}; + static int __init smemc_init(void) { - if (cpu_is_pxa3xx()) - register_syscore_ops(&smemc_syscore_ops); + int ret = 0; - return 0; + if (cpu_is_pxa3xx()) { + ret = sysdev_class_register(&smemc_sysclass); + if (ret) + return ret; + + ret = sysdev_register(&smemc_sysdev); + } + + return ret; } subsys_initcall(smemc_init); #endif diff --git a/trunk/arch/arm/mach-pxa/trizeps4.c b/trunk/arch/arm/mach-pxa/trizeps4.c index 687417a93698..b9cfbebdfe9c 100644 --- a/trunk/arch/arm/mach-pxa/trizeps4.c +++ b/trunk/arch/arm/mach-pxa/trizeps4.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/arch/arm/mach-pxa/viper.c b/trunk/arch/arm/mach-pxa/viper.c index 903218eab56d..b523f119e0f0 100644 --- a/trunk/arch/arm/mach-pxa/viper.c +++ b/trunk/arch/arm/mach-pxa/viper.c @@ -44,7 +44,6 @@ #include #include #include -#include #include #include @@ -131,19 +130,20 @@ static u8 viper_hw_version(void) return v1; } -/* CPU system core operations. */ -static int viper_cpu_suspend(void) +/* CPU sysdev */ +static int viper_cpu_suspend(struct sys_device *sysdev, pm_message_t state) { viper_icr_set_bit(VIPER_ICR_R_DIS); return 0; } -static void viper_cpu_resume(void) +static int viper_cpu_resume(struct sys_device *sysdev) { viper_icr_clear_bit(VIPER_ICR_R_DIS); + return 0; } -static struct syscore_ops viper_cpu_syscore_ops = { +static struct sysdev_driver viper_cpu_sysdev_driver = { .suspend = viper_cpu_suspend, .resume = viper_cpu_resume, }; @@ -945,7 +945,7 @@ static void __init viper_init(void) viper_init_vcore_gpios(); viper_init_cpufreq(); - register_syscore_ops(&viper_cpu_syscore_ops); + sysdev_driver_register(&cpu_sysdev_class, &viper_cpu_sysdev_driver); if (version) { pr_info("viper: hardware v%di%d detected. " diff --git a/trunk/arch/arm/mach-pxa/vpac270.c b/trunk/arch/arm/mach-pxa/vpac270.c index 67bd41488bf8..f71d377c8640 100644 --- a/trunk/arch/arm/mach-pxa/vpac270.c +++ b/trunk/arch/arm/mach-pxa/vpac270.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/arch/arm/mach-realview/include/mach/barriers.h b/trunk/arch/arm/mach-realview/include/mach/barriers.h index 9a732195aa1c..0c5d749d7b5f 100644 --- a/trunk/arch/arm/mach-realview/include/mach/barriers.h +++ b/trunk/arch/arm/mach-realview/include/mach/barriers.h @@ -4,5 +4,5 @@ * operation to deadlock the system. */ #define mb() dsb() -#define rmb() dsb() +#define rmb() dmb() #define wmb() mb() diff --git a/trunk/arch/arm/mach-s3c2410/irq.c b/trunk/arch/arm/mach-s3c2410/irq.c index 2854129f8cc7..5e2f35332056 100644 --- a/trunk/arch/arm/mach-s3c2410/irq.c +++ b/trunk/arch/arm/mach-s3c2410/irq.c @@ -23,12 +23,38 @@ #include #include #include -#include +#include #include #include -struct syscore_ops s3c24xx_irq_syscore_ops = { +static int s3c2410_irq_add(struct sys_device *sysdev) +{ + return 0; +} + +static struct sysdev_driver s3c2410_irq_driver = { + .add = s3c2410_irq_add, .suspend = s3c24xx_irq_suspend, .resume = s3c24xx_irq_resume, }; + +static int __init s3c2410_irq_init(void) +{ + return sysdev_driver_register(&s3c2410_sysclass, &s3c2410_irq_driver); +} + +arch_initcall(s3c2410_irq_init); + +static struct sysdev_driver s3c2410a_irq_driver = { + .add = s3c2410_irq_add, + .suspend = s3c24xx_irq_suspend, + .resume = s3c24xx_irq_resume, +}; + +static int __init s3c2410a_irq_init(void) +{ + return sysdev_driver_register(&s3c2410a_sysclass, &s3c2410a_irq_driver); +} + +arch_initcall(s3c2410a_irq_init); diff --git a/trunk/arch/arm/mach-s3c2410/mach-bast.c b/trunk/arch/arm/mach-s3c2410/mach-bast.c index 1e2d536adda9..2970ea9f7c2b 100644 --- a/trunk/arch/arm/mach-s3c2410/mach-bast.c +++ b/trunk/arch/arm/mach-s3c2410/mach-bast.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -214,16 +214,17 @@ static struct s3c2410_uartcfg bast_uartcfgs[] __initdata = { /* NAND Flash on BAST board */ #ifdef CONFIG_PM -static int bast_pm_suspend(void) +static int bast_pm_suspend(struct sys_device *sd, pm_message_t state) { /* ensure that an nRESET is not generated on resume. */ gpio_direction_output(S3C2410_GPA(21), 1); return 0; } -static void bast_pm_resume(void) +static int bast_pm_resume(struct sys_device *sd) { s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT); + return 0; } #else @@ -231,11 +232,16 @@ static void bast_pm_resume(void) #define bast_pm_resume NULL #endif -static struct syscore_ops bast_pm_syscore_ops = { +static struct sysdev_class bast_pm_sysclass = { + .name = "mach-bast", .suspend = bast_pm_suspend, .resume = bast_pm_resume, }; +static struct sys_device bast_pm_sysdev = { + .cls = &bast_pm_sysclass, +}; + static int smartmedia_map[] = { 0 }; static int chip0_map[] = { 1 }; static int chip1_map[] = { 2 }; @@ -636,7 +642,8 @@ static void __init bast_map_io(void) static void __init bast_init(void) { - register_syscore_ops(&bast_pm_syscore_ops); + sysdev_class_register(&bast_pm_sysclass); + sysdev_register(&bast_pm_sysdev); s3c_i2c0_set_platdata(&bast_i2c_info); s3c_nand_set_platdata(&bast_nand_info); diff --git a/trunk/arch/arm/mach-s3c2410/pm.c b/trunk/arch/arm/mach-s3c2410/pm.c index 4728f9aa7df1..725636fc4dc3 100644 --- a/trunk/arch/arm/mach-s3c2410/pm.c +++ b/trunk/arch/arm/mach-s3c2410/pm.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include @@ -93,7 +92,7 @@ static void s3c2410_pm_prepare(void) } } -static void s3c2410_pm_resume(void) +static int s3c2410_pm_resume(struct sys_device *dev) { unsigned long tmp; @@ -105,11 +104,9 @@ static void s3c2410_pm_resume(void) if ( machine_is_aml_m5900() ) s3c2410_gpio_setpin(S3C2410_GPF(2), 0); -} -struct syscore_ops s3c2410_pm_syscore_ops = { - .resume = s3c2410_pm_resume, -}; + return 0; +} static int s3c2410_pm_add(struct sys_device *dev) { @@ -122,6 +119,7 @@ static int s3c2410_pm_add(struct sys_device *dev) #if defined(CONFIG_CPU_S3C2410) static struct sysdev_driver s3c2410_pm_driver = { .add = s3c2410_pm_add, + .resume = s3c2410_pm_resume, }; /* register ourselves */ @@ -135,6 +133,7 @@ arch_initcall(s3c2410_pm_drvinit); static struct sysdev_driver s3c2410a_pm_driver = { .add = s3c2410_pm_add, + .resume = s3c2410_pm_resume, }; static int __init s3c2410a_pm_drvinit(void) @@ -148,6 +147,7 @@ arch_initcall(s3c2410a_pm_drvinit); #if defined(CONFIG_CPU_S3C2440) static struct sysdev_driver s3c2440_pm_driver = { .add = s3c2410_pm_add, + .resume = s3c2410_pm_resume, }; static int __init s3c2440_pm_drvinit(void) @@ -161,6 +161,7 @@ arch_initcall(s3c2440_pm_drvinit); #if defined(CONFIG_CPU_S3C2442) static struct sysdev_driver s3c2442_pm_driver = { .add = s3c2410_pm_add, + .resume = s3c2410_pm_resume, }; static int __init s3c2442_pm_drvinit(void) diff --git a/trunk/arch/arm/mach-s3c2410/s3c2410.c b/trunk/arch/arm/mach-s3c2410/s3c2410.c index f1d3bd8f6f17..adc90a3c5890 100644 --- a/trunk/arch/arm/mach-s3c2410/s3c2410.c +++ b/trunk/arch/arm/mach-s3c2410/s3c2410.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -41,7 +40,6 @@ #include #include #include -#include #include #include @@ -170,9 +168,6 @@ int __init s3c2410_init(void) { printk("S3C2410: Initialising architecture\n"); - register_syscore_ops(&s3c2410_pm_syscore_ops); - register_syscore_ops(&s3c24xx_irq_syscore_ops); - return sysdev_register(&s3c2410_sysdev); } diff --git a/trunk/arch/arm/mach-s3c2412/irq.c b/trunk/arch/arm/mach-s3c2412/irq.c index 1a1aa220972b..f3355d2ec634 100644 --- a/trunk/arch/arm/mach-s3c2412/irq.c +++ b/trunk/arch/arm/mach-s3c2412/irq.c @@ -202,6 +202,8 @@ static int s3c2412_irq_add(struct sys_device *sysdev) static struct sysdev_driver s3c2412_irq_driver = { .add = s3c2412_irq_add, + .suspend = s3c24xx_irq_suspend, + .resume = s3c24xx_irq_resume, }; static int s3c2412_irq_init(void) diff --git a/trunk/arch/arm/mach-s3c2412/mach-jive.c b/trunk/arch/arm/mach-s3c2412/mach-jive.c index 85dcaeb9e62f..923e01bdf017 100644 --- a/trunk/arch/arm/mach-s3c2412/mach-jive.c +++ b/trunk/arch/arm/mach-s3c2412/mach-jive.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -486,7 +486,7 @@ static struct s3c2410_udc_mach_info jive_udc_cfg __initdata = { /* Jive power management device */ #ifdef CONFIG_PM -static int jive_pm_suspend(void) +static int jive_pm_suspend(struct sys_device *sd, pm_message_t state) { /* Write the magic value u-boot uses to check for resume into * the INFORM0 register, and ensure INFORM1 is set to the @@ -498,9 +498,10 @@ static int jive_pm_suspend(void) return 0; } -static void jive_pm_resume(void) +static int jive_pm_resume(struct sys_device *sd) { __raw_writel(0x0, S3C2412_INFORM0); + return 0; } #else @@ -508,11 +509,16 @@ static void jive_pm_resume(void) #define jive_pm_resume NULL #endif -static struct syscore_ops jive_pm_syscore_ops = { +static struct sysdev_class jive_pm_sysclass = { + .name = "jive-pm", .suspend = jive_pm_suspend, .resume = jive_pm_resume, }; +static struct sys_device jive_pm_sysdev = { + .cls = &jive_pm_sysclass, +}; + static void __init jive_map_io(void) { s3c24xx_init_io(jive_iodesc, ARRAY_SIZE(jive_iodesc)); @@ -530,9 +536,10 @@ static void jive_power_off(void) static void __init jive_machine_init(void) { - /* register system core operations for managing low level suspend */ + /* register system devices for managing low level suspend */ - register_syscore_ops(&jive_pm_syscore_ops); + sysdev_class_register(&jive_pm_sysclass); + sysdev_register(&jive_pm_sysdev); /* write our sleep configurations for the IO. Pull down all unused * IO, ensure that we have turned off all peripherals we do not diff --git a/trunk/arch/arm/mach-s3c2412/pm.c b/trunk/arch/arm/mach-s3c2412/pm.c index 752b13a7b3db..a7417c479ffe 100644 --- a/trunk/arch/arm/mach-s3c2412/pm.c +++ b/trunk/arch/arm/mach-s3c2412/pm.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -87,24 +86,13 @@ static struct sleep_save s3c2412_sleep[] = { SAVE_ITEM(S3C2413_GPJSLPCON), }; -static struct sysdev_driver s3c2412_pm_driver = { - .add = s3c2412_pm_add, -}; - -static __init int s3c2412_pm_init(void) -{ - return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_pm_driver); -} - -arch_initcall(s3c2412_pm_init); - -static int s3c2412_pm_suspend(void) +static int s3c2412_pm_suspend(struct sys_device *dev, pm_message_t state) { s3c_pm_do_save(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); return 0; } -static void s3c2412_pm_resume(void) +static int s3c2412_pm_resume(struct sys_device *dev) { unsigned long tmp; @@ -114,9 +102,18 @@ static void s3c2412_pm_resume(void) __raw_writel(tmp, S3C2412_PWRCFG); s3c_pm_do_restore(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); + return 0; } -struct syscore_ops s3c2412_pm_syscore_ops = { +static struct sysdev_driver s3c2412_pm_driver = { + .add = s3c2412_pm_add, .suspend = s3c2412_pm_suspend, .resume = s3c2412_pm_resume, }; + +static __init int s3c2412_pm_init(void) +{ + return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_pm_driver); +} + +arch_initcall(s3c2412_pm_init); diff --git a/trunk/arch/arm/mach-s3c2412/s3c2412.c b/trunk/arch/arm/mach-s3c2412/s3c2412.c index ef0958d3e5c6..4c6df51ddf33 100644 --- a/trunk/arch/arm/mach-s3c2412/s3c2412.c +++ b/trunk/arch/arm/mach-s3c2412/s3c2412.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -245,8 +244,5 @@ int __init s3c2412_init(void) { printk("S3C2412: Initialising architecture\n"); - register_syscore_ops(&s3c2412_pm_syscore_ops); - register_syscore_ops(&s3c24xx_irq_syscore_ops); - return sysdev_register(&s3c2412_sysdev); } diff --git a/trunk/arch/arm/mach-s3c2416/irq.c b/trunk/arch/arm/mach-s3c2416/irq.c index 28ad20d42445..77b38f2381c1 100644 --- a/trunk/arch/arm/mach-s3c2416/irq.c +++ b/trunk/arch/arm/mach-s3c2416/irq.c @@ -236,6 +236,8 @@ static int __init s3c2416_irq_add(struct sys_device *sysdev) static struct sysdev_driver s3c2416_irq_driver = { .add = s3c2416_irq_add, + .suspend = s3c24xx_irq_suspend, + .resume = s3c24xx_irq_resume, }; static int __init s3c2416_irq_init(void) diff --git a/trunk/arch/arm/mach-s3c2416/pm.c b/trunk/arch/arm/mach-s3c2416/pm.c index 41db2b21e213..4a04205b04d5 100644 --- a/trunk/arch/arm/mach-s3c2416/pm.c +++ b/trunk/arch/arm/mach-s3c2416/pm.c @@ -11,7 +11,6 @@ */ #include -#include #include #include @@ -56,26 +55,30 @@ static int s3c2416_pm_add(struct sys_device *sysdev) return 0; } -static struct sysdev_driver s3c2416_pm_driver = { - .add = s3c2416_pm_add, -}; - -static __init int s3c2416_pm_init(void) +static int s3c2416_pm_suspend(struct sys_device *dev, pm_message_t state) { - return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_pm_driver); + return 0; } -arch_initcall(s3c2416_pm_init); - - -static void s3c2416_pm_resume(void) +static int s3c2416_pm_resume(struct sys_device *dev) { /* unset the return-from-sleep amd inform flags */ __raw_writel(0x0, S3C2443_PWRMODE); __raw_writel(0x0, S3C2412_INFORM0); __raw_writel(0x0, S3C2412_INFORM1); + + return 0; } -struct syscore_ops s3c2416_pm_syscore_ops = { +static struct sysdev_driver s3c2416_pm_driver = { + .add = s3c2416_pm_add, + .suspend = s3c2416_pm_suspend, .resume = s3c2416_pm_resume, }; + +static __init int s3c2416_pm_init(void) +{ + return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_pm_driver); +} + +arch_initcall(s3c2416_pm_init); diff --git a/trunk/arch/arm/mach-s3c2416/s3c2416.c b/trunk/arch/arm/mach-s3c2416/s3c2416.c index 494ce913dc95..ba7fd8737434 100644 --- a/trunk/arch/arm/mach-s3c2416/s3c2416.c +++ b/trunk/arch/arm/mach-s3c2416/s3c2416.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include @@ -55,7 +54,6 @@ #include #include #include -#include #include #include @@ -97,9 +95,6 @@ int __init s3c2416_init(void) s3c_fb_setname("s3c2443-fb"); - register_syscore_ops(&s3c2416_pm_syscore_ops); - register_syscore_ops(&s3c24xx_irq_syscore_ops); - return sysdev_register(&s3c2416_sysdev); } diff --git a/trunk/arch/arm/mach-s3c2440/mach-osiris.c b/trunk/arch/arm/mach-s3c2440/mach-osiris.c index d88536393310..14dc67897757 100644 --- a/trunk/arch/arm/mach-s3c2440/mach-osiris.c +++ b/trunk/arch/arm/mach-s3c2440/mach-osiris.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -284,7 +284,7 @@ static struct platform_device osiris_pcmcia = { #ifdef CONFIG_PM static unsigned char pm_osiris_ctrl0; -static int osiris_pm_suspend(void) +static int osiris_pm_suspend(struct sys_device *sd, pm_message_t state) { unsigned int tmp; @@ -304,7 +304,7 @@ static int osiris_pm_suspend(void) return 0; } -static void osiris_pm_resume(void) +static int osiris_pm_resume(struct sys_device *sd) { if (pm_osiris_ctrl0 & OSIRIS_CTRL0_FIX8) __raw_writeb(OSIRIS_CTRL1_FIX8, OSIRIS_VA_CTRL1); @@ -312,6 +312,8 @@ static void osiris_pm_resume(void) __raw_writeb(pm_osiris_ctrl0, OSIRIS_VA_CTRL0); s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT); + + return 0; } #else @@ -319,11 +321,16 @@ static void osiris_pm_resume(void) #define osiris_pm_resume NULL #endif -static struct syscore_ops osiris_pm_syscore_ops = { +static struct sysdev_class osiris_pm_sysclass = { + .name = "mach-osiris", .suspend = osiris_pm_suspend, .resume = osiris_pm_resume, }; +static struct sys_device osiris_pm_sysdev = { + .cls = &osiris_pm_sysclass, +}; + /* Link for DVS driver to TPS65011 */ static void osiris_tps_release(struct device *dev) @@ -432,7 +439,8 @@ static void __init osiris_map_io(void) static void __init osiris_init(void) { - register_syscore_ops(&osiris_pm_syscore_ops); + sysdev_class_register(&osiris_pm_sysclass); + sysdev_register(&osiris_pm_sysdev); s3c_i2c0_set_platdata(NULL); s3c_nand_set_platdata(&osiris_nand_info); diff --git a/trunk/arch/arm/mach-s3c2440/s3c2440.c b/trunk/arch/arm/mach-s3c2440/s3c2440.c index ce99ff72838d..f7663f731ea0 100644 --- a/trunk/arch/arm/mach-s3c2440/s3c2440.c +++ b/trunk/arch/arm/mach-s3c2440/s3c2440.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -34,7 +33,6 @@ #include #include #include -#include #include #include @@ -53,12 +51,6 @@ int __init s3c2440_init(void) s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT; s3c_device_wdt.resource[1].end = IRQ_S3C2440_WDT; - /* register suspend/resume handlers */ - - register_syscore_ops(&s3c2410_pm_syscore_ops); - register_syscore_ops(&s3c244x_pm_syscore_ops); - register_syscore_ops(&s3c24xx_irq_syscore_ops); - /* register our system device for everything else */ return sysdev_register(&s3c2440_sysdev); diff --git a/trunk/arch/arm/mach-s3c2440/s3c2442.c b/trunk/arch/arm/mach-s3c2440/s3c2442.c index 6224bad4d604..ecf813546554 100644 --- a/trunk/arch/arm/mach-s3c2440/s3c2442.c +++ b/trunk/arch/arm/mach-s3c2440/s3c2442.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -46,7 +45,6 @@ #include #include #include -#include #include #include @@ -169,10 +167,6 @@ int __init s3c2442_init(void) { printk("S3C2442: Initialising architecture\n"); - register_syscore_ops(&s3c2410_pm_syscore_ops); - register_syscore_ops(&s3c244x_pm_syscore_ops); - register_syscore_ops(&s3c24xx_irq_syscore_ops); - return sysdev_register(&s3c2442_sysdev); } diff --git a/trunk/arch/arm/mach-s3c2440/s3c244x-irq.c b/trunk/arch/arm/mach-s3c2440/s3c244x-irq.c index c63e8f26d901..de07c2feaa32 100644 --- a/trunk/arch/arm/mach-s3c2440/s3c244x-irq.c +++ b/trunk/arch/arm/mach-s3c2440/s3c244x-irq.c @@ -116,6 +116,8 @@ static int s3c244x_irq_add(struct sys_device *sysdev) static struct sysdev_driver s3c2440_irq_driver = { .add = s3c244x_irq_add, + .suspend = s3c24xx_irq_suspend, + .resume = s3c24xx_irq_resume, }; static int s3c2440_irq_init(void) @@ -127,6 +129,8 @@ arch_initcall(s3c2440_irq_init); static struct sysdev_driver s3c2442_irq_driver = { .add = s3c244x_irq_add, + .suspend = s3c24xx_irq_suspend, + .resume = s3c24xx_irq_resume, }; diff --git a/trunk/arch/arm/mach-s3c2440/s3c244x.c b/trunk/arch/arm/mach-s3c2440/s3c244x.c index 7e8a23d2098a..90c1707b9c95 100644 --- a/trunk/arch/arm/mach-s3c2440/s3c244x.c +++ b/trunk/arch/arm/mach-s3c2440/s3c244x.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include @@ -135,14 +134,45 @@ void __init s3c244x_init_clocks(int xtal) s3c2410_baseclk_add(); } +#ifdef CONFIG_PM + +static struct sleep_save s3c244x_sleep[] = { + SAVE_ITEM(S3C2440_DSC0), + SAVE_ITEM(S3C2440_DSC1), + SAVE_ITEM(S3C2440_GPJDAT), + SAVE_ITEM(S3C2440_GPJCON), + SAVE_ITEM(S3C2440_GPJUP) +}; + +static int s3c244x_suspend(struct sys_device *dev, pm_message_t state) +{ + s3c_pm_do_save(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); + return 0; +} + +static int s3c244x_resume(struct sys_device *dev) +{ + s3c_pm_do_restore(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); + return 0; +} + +#else +#define s3c244x_suspend NULL +#define s3c244x_resume NULL +#endif + /* Since the S3C2442 and S3C2440 share items, put both sysclasses here */ struct sysdev_class s3c2440_sysclass = { .name = "s3c2440-core", + .suspend = s3c244x_suspend, + .resume = s3c244x_resume }; struct sysdev_class s3c2442_sysclass = { .name = "s3c2442-core", + .suspend = s3c244x_suspend, + .resume = s3c244x_resume }; /* need to register class before we actually register the device, and @@ -164,33 +194,3 @@ static int __init s3c2442_core_init(void) } core_initcall(s3c2442_core_init); - - -#ifdef CONFIG_PM -static struct sleep_save s3c244x_sleep[] = { - SAVE_ITEM(S3C2440_DSC0), - SAVE_ITEM(S3C2440_DSC1), - SAVE_ITEM(S3C2440_GPJDAT), - SAVE_ITEM(S3C2440_GPJCON), - SAVE_ITEM(S3C2440_GPJUP) -}; - -static int s3c244x_suspend(void) -{ - s3c_pm_do_save(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); - return 0; -} - -static void s3c244x_resume(void) -{ - s3c_pm_do_restore(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); -} -#else -#define s3c244x_suspend NULL -#define s3c244x_resume NULL -#endif - -struct syscore_ops s3c244x_pm_syscore_ops = { - .suspend = s3c244x_suspend, - .resume = s3c244x_resume, -}; diff --git a/trunk/arch/arm/mach-s3c64xx/irq-pm.c b/trunk/arch/arm/mach-s3c64xx/irq-pm.c index 8bec61e242c7..da1bec64b9da 100644 --- a/trunk/arch/arm/mach-s3c64xx/irq-pm.c +++ b/trunk/arch/arm/mach-s3c64xx/irq-pm.c @@ -13,7 +13,7 @@ */ #include -#include +#include #include #include #include @@ -54,7 +54,7 @@ static struct irq_grp_save { static u32 irq_uart_mask[CONFIG_SERIAL_SAMSUNG_UARTS]; -static int s3c64xx_irq_pm_suspend(void) +static int s3c64xx_irq_pm_suspend(struct sys_device *dev, pm_message_t state) { struct irq_grp_save *grp = eint_grp_save; int i; @@ -75,7 +75,7 @@ static int s3c64xx_irq_pm_suspend(void) return 0; } -static void s3c64xx_irq_pm_resume(void) +static int s3c64xx_irq_pm_resume(struct sys_device *dev) { struct irq_grp_save *grp = eint_grp_save; int i; @@ -94,18 +94,18 @@ static void s3c64xx_irq_pm_resume(void) } S3C_PMDBG("%s: IRQ configuration restored\n", __func__); + return 0; } -struct syscore_ops s3c64xx_irq_syscore_ops = { +static struct sysdev_driver s3c64xx_irq_driver = { .suspend = s3c64xx_irq_pm_suspend, .resume = s3c64xx_irq_pm_resume, }; -static __init int s3c64xx_syscore_init(void) +static int __init s3c64xx_irq_pm_init(void) { - register_syscore_ops(&s3c64xx_irq_syscore_ops); - - return 0; + return sysdev_driver_register(&s3c64xx_sysclass, &s3c64xx_irq_driver); } -core_initcall(s3c64xx_syscore_init); +arch_initcall(s3c64xx_irq_pm_init); + diff --git a/trunk/arch/arm/mach-s5pv210/pm.c b/trunk/arch/arm/mach-s5pv210/pm.c index 24febae3d4c0..549d7924fd4c 100644 --- a/trunk/arch/arm/mach-s5pv210/pm.c +++ b/trunk/arch/arm/mach-s5pv210/pm.c @@ -16,7 +16,6 @@ #include #include -#include #include #include @@ -141,17 +140,7 @@ static int s5pv210_pm_add(struct sys_device *sysdev) return 0; } -static struct sysdev_driver s5pv210_pm_driver = { - .add = s5pv210_pm_add, -}; - -static __init int s5pv210_pm_drvinit(void) -{ - return sysdev_driver_register(&s5pv210_sysclass, &s5pv210_pm_driver); -} -arch_initcall(s5pv210_pm_drvinit); - -static void s5pv210_pm_resume(void) +static int s5pv210_pm_resume(struct sys_device *dev) { u32 tmp; @@ -161,15 +150,17 @@ static void s5pv210_pm_resume(void) __raw_writel(tmp , S5P_OTHERS); s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); + + return 0; } -static struct syscore_ops s5pv210_pm_syscore_ops = { +static struct sysdev_driver s5pv210_pm_driver = { + .add = s5pv210_pm_add, .resume = s5pv210_pm_resume, }; -static __init int s5pv210_pm_syscore_init(void) +static __init int s5pv210_pm_drvinit(void) { - register_syscore_ops(&s5pv210_pm_syscore_ops); - return 0; + return sysdev_driver_register(&s5pv210_sysclass, &s5pv210_pm_driver); } -arch_initcall(s5pv210_pm_syscore_init); +arch_initcall(s5pv210_pm_drvinit); diff --git a/trunk/arch/arm/mach-sa1100/irq.c b/trunk/arch/arm/mach-sa1100/irq.c index dfbf824a69fa..423ddb3d65e9 100644 --- a/trunk/arch/arm/mach-sa1100/irq.c +++ b/trunk/arch/arm/mach-sa1100/irq.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include @@ -234,7 +234,7 @@ static struct sa1100irq_state { unsigned int iccr; } sa1100irq_state; -static int sa1100irq_suspend(void) +static int sa1100irq_suspend(struct sys_device *dev, pm_message_t state) { struct sa1100irq_state *st = &sa1100irq_state; @@ -264,7 +264,7 @@ static int sa1100irq_suspend(void) return 0; } -static void sa1100irq_resume(void) +static int sa1100irq_resume(struct sys_device *dev) { struct sa1100irq_state *st = &sa1100irq_state; @@ -277,17 +277,24 @@ static void sa1100irq_resume(void) ICMR = st->icmr; } + return 0; } -static struct syscore_ops sa1100irq_syscore_ops = { +static struct sysdev_class sa1100irq_sysclass = { + .name = "sa11x0-irq", .suspend = sa1100irq_suspend, .resume = sa1100irq_resume, }; +static struct sys_device sa1100irq_device = { + .id = 0, + .cls = &sa1100irq_sysclass, +}; + static int __init sa1100irq_init_devicefs(void) { - register_syscore_ops(&sa1100irq_syscore_ops); - return 0; + sysdev_class_register(&sa1100irq_sysclass); + return sysdev_register(&sa1100irq_device); } device_initcall(sa1100irq_init_devicefs); diff --git a/trunk/arch/arm/mach-shmobile/pm_runtime.c b/trunk/arch/arm/mach-shmobile/pm_runtime.c index 2d1b67a59e4a..94912d3944d3 100644 --- a/trunk/arch/arm/mach-shmobile/pm_runtime.c +++ b/trunk/arch/arm/mach-shmobile/pm_runtime.c @@ -18,41 +18,152 @@ #include #include #include -#include #ifdef CONFIG_PM_RUNTIME +#define BIT_ONCE 0 +#define BIT_ACTIVE 1 +#define BIT_CLK_ENABLED 2 -static int default_platform_runtime_idle(struct device *dev) +struct pm_runtime_data { + unsigned long flags; + struct clk *clk; +}; + +static void __devres_release(struct device *dev, void *res) +{ + struct pm_runtime_data *prd = res; + + dev_dbg(dev, "__devres_release()\n"); + + if (test_bit(BIT_CLK_ENABLED, &prd->flags)) + clk_disable(prd->clk); + + if (test_bit(BIT_ACTIVE, &prd->flags)) + clk_put(prd->clk); +} + +static struct pm_runtime_data *__to_prd(struct device *dev) +{ + return devres_find(dev, __devres_release, NULL, NULL); +} + +static void platform_pm_runtime_init(struct device *dev, + struct pm_runtime_data *prd) +{ + if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) { + prd->clk = clk_get(dev, NULL); + if (!IS_ERR(prd->clk)) { + set_bit(BIT_ACTIVE, &prd->flags); + dev_info(dev, "clocks managed by runtime pm\n"); + } + } +} + +static void platform_pm_runtime_bug(struct device *dev, + struct pm_runtime_data *prd) +{ + if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) + dev_err(dev, "runtime pm suspend before resume\n"); +} + +int platform_pm_runtime_suspend(struct device *dev) +{ + struct pm_runtime_data *prd = __to_prd(dev); + + dev_dbg(dev, "platform_pm_runtime_suspend()\n"); + + platform_pm_runtime_bug(dev, prd); + + if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { + clk_disable(prd->clk); + clear_bit(BIT_CLK_ENABLED, &prd->flags); + } + + return 0; +} + +int platform_pm_runtime_resume(struct device *dev) +{ + struct pm_runtime_data *prd = __to_prd(dev); + + dev_dbg(dev, "platform_pm_runtime_resume()\n"); + + platform_pm_runtime_init(dev, prd); + + if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { + clk_enable(prd->clk); + set_bit(BIT_CLK_ENABLED, &prd->flags); + } + + return 0; +} + +int platform_pm_runtime_idle(struct device *dev) { /* suspend synchronously to disable clocks immediately */ return pm_runtime_suspend(dev); } -static struct dev_power_domain default_power_domain = { - .ops = { - .runtime_suspend = pm_runtime_clk_suspend, - .runtime_resume = pm_runtime_clk_resume, - .runtime_idle = default_platform_runtime_idle, - USE_PLATFORM_PM_SLEEP_OPS - }, -}; +static int platform_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct pm_runtime_data *prd; + + dev_dbg(dev, "platform_bus_notify() %ld !\n", action); + + if (action == BUS_NOTIFY_BIND_DRIVER) { + prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL); + if (prd) + devres_add(dev, prd); + else + dev_err(dev, "unable to alloc memory for runtime pm\n"); + } -#define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain) + return 0; +} -#else +#else /* CONFIG_PM_RUNTIME */ -#define DEFAULT_PWR_DOMAIN_PTR NULL +static int platform_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct clk *clk; + + dev_dbg(dev, "platform_bus_notify() %ld !\n", action); + + switch (action) { + case BUS_NOTIFY_BIND_DRIVER: + clk = clk_get(dev, NULL); + if (!IS_ERR(clk)) { + clk_enable(clk); + clk_put(clk); + dev_info(dev, "runtime pm disabled, clock forced on\n"); + } + break; + case BUS_NOTIFY_UNBOUND_DRIVER: + clk = clk_get(dev, NULL); + if (!IS_ERR(clk)) { + clk_disable(clk); + clk_put(clk); + dev_info(dev, "runtime pm disabled, clock forced off\n"); + } + break; + } + + return 0; +} #endif /* CONFIG_PM_RUNTIME */ -static struct pm_clk_notifier_block platform_bus_notifier = { - .pwr_domain = DEFAULT_PWR_DOMAIN_PTR, - .con_ids = { NULL, }, +static struct notifier_block platform_bus_notifier = { + .notifier_call = platform_bus_notify }; static int __init sh_pm_runtime_init(void) { - pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); + bus_register_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } core_initcall(sh_pm_runtime_init); diff --git a/trunk/arch/arm/mach-tegra/include/mach/barriers.h b/trunk/arch/arm/mach-tegra/include/mach/barriers.h index 425b42e91ef6..cc115174899b 100644 --- a/trunk/arch/arm/mach-tegra/include/mach/barriers.h +++ b/trunk/arch/arm/mach-tegra/include/mach/barriers.h @@ -23,7 +23,7 @@ #include -#define rmb() dsb() +#define rmb() dmb() #define wmb() do { dsb(); outer_sync(); } while (0) #define mb() wmb() diff --git a/trunk/arch/arm/mm/init.c b/trunk/arch/arm/mm/init.c index e591513bb53e..e5f6fc428348 100644 --- a/trunk/arch/arm/mm/init.c +++ b/trunk/arch/arm/mm/init.c @@ -392,7 +392,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn - 1) + 1; - end_pg = pfn_to_page(end_pfn - 1) + 1; + end_pg = pfn_to_page(end_pfn); /* * Convert to physical addresses, and @@ -426,14 +426,6 @@ static void __init free_unused_memmap(struct meminfo *mi) bank_start = bank_pfn_start(bank); -#ifdef CONFIG_SPARSEMEM - /* - * Take care not to free memmap entries that don't exist - * due to SPARSEMEM sections which aren't present. - */ - bank_start = min(bank_start, - ALIGN(prev_bank_end, PAGES_PER_SECTION)); -#endif /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. @@ -448,12 +440,6 @@ static void __init free_unused_memmap(struct meminfo *mi) */ prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); } - -#ifdef CONFIG_SPARSEMEM - if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) - free_memmap(prev_bank_end, - ALIGN(prev_bank_end, PAGES_PER_SECTION)); -#endif } static void __init free_highpages(void) diff --git a/trunk/arch/arm/mm/proc-xscale.S b/trunk/arch/arm/mm/proc-xscale.S index 42af97664c9d..ce233bcbf506 100644 --- a/trunk/arch/arm/mm/proc-xscale.S +++ b/trunk/arch/arm/mm/proc-xscale.S @@ -395,7 +395,7 @@ ENTRY(xscale_dma_a0_map_area) teq r2, #DMA_TO_DEVICE beq xscale_dma_clean_range b xscale_dma_flush_range -ENDPROC(xscale_dma_a0_map_area) +ENDPROC(xscsale_dma_a0_map_area) /* * dma_unmap_area(start, size, dir) diff --git a/trunk/arch/arm/plat-mxc/gpio.c b/trunk/arch/arm/plat-mxc/gpio.c index 6cd6d7f686f6..7a107246fd98 100644 --- a/trunk/arch/arm/plat-mxc/gpio.c +++ b/trunk/arch/arm/plat-mxc/gpio.c @@ -295,12 +295,6 @@ static int mxc_gpio_direction_output(struct gpio_chip *chip, return 0; } -/* - * This lock class tells lockdep that GPIO irqs are in a different - * category than their parents, so it won't report false recursion. - */ -static struct lock_class_key gpio_lock_class; - int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt) { int i, j; @@ -317,7 +311,6 @@ int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt) __raw_writel(~0, port[i].base + GPIO_ISR); for (j = port[i].virtual_irq_start; j < port[i].virtual_irq_start + 32; j++) { - irq_set_lockdep_class(j, &gpio_lock_class); irq_set_chip_and_handler(j, &gpio_irq_chip, handle_level_irq); set_irq_flags(j, IRQF_VALID); diff --git a/trunk/arch/arm/plat-mxc/ssi-fiq.S b/trunk/arch/arm/plat-mxc/ssi-fiq.S index 8397a2dd19f2..4ddce565b353 100644 --- a/trunk/arch/arm/plat-mxc/ssi-fiq.S +++ b/trunk/arch/arm/plat-mxc/ssi-fiq.S @@ -124,8 +124,6 @@ imx_ssi_fiq_start: 1: @ return from FIQ subs pc, lr, #4 - - .align imx_ssi_fiq_base: .word 0x0 imx_ssi_fiq_rx_buffer: diff --git a/trunk/arch/arm/plat-omap/gpio.c b/trunk/arch/arm/plat-omap/gpio.c index bd9e32187eab..d2adcdda23cf 100644 --- a/trunk/arch/arm/plat-omap/gpio.c +++ b/trunk/arch/arm/plat-omap/gpio.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -1372,7 +1372,9 @@ static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { .resume_noirq = omap_mpuio_resume_noirq, }; -/* use platform_driver for this. */ +/* use platform_driver for this, now that there's no longer any + * point to sys_device (other than not disturbing old code). + */ static struct platform_driver omap_mpuio_driver = { .driver = { .name = "mpuio", @@ -1743,7 +1745,7 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev) } #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) -static int omap_gpio_suspend(void) +static int omap_gpio_suspend(struct sys_device *dev, pm_message_t mesg) { int i; @@ -1793,12 +1795,12 @@ static int omap_gpio_suspend(void) return 0; } -static void omap_gpio_resume(void) +static int omap_gpio_resume(struct sys_device *dev) { int i; if (!cpu_class_is_omap2() && !cpu_is_omap16xx()) - return; + return 0; for (i = 0; i < gpio_bank_count; i++) { struct gpio_bank *bank = &gpio_bank[i]; @@ -1834,13 +1836,21 @@ static void omap_gpio_resume(void) __raw_writel(bank->saved_wakeup, wake_set); spin_unlock_irqrestore(&bank->lock, flags); } + + return 0; } -static struct syscore_ops omap_gpio_syscore_ops = { +static struct sysdev_class omap_gpio_sysclass = { + .name = "gpio", .suspend = omap_gpio_suspend, .resume = omap_gpio_resume, }; +static struct sys_device omap_gpio_device = { + .id = 0, + .cls = &omap_gpio_sysclass, +}; + #endif #ifdef CONFIG_ARCH_OMAP2PLUS @@ -2098,14 +2108,21 @@ postcore_initcall(omap_gpio_drv_reg); static int __init omap_gpio_sysinit(void) { + int ret = 0; + mpuio_init(); #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) - if (cpu_is_omap16xx() || cpu_class_is_omap2()) - register_syscore_ops(&omap_gpio_syscore_ops); + if (cpu_is_omap16xx() || cpu_class_is_omap2()) { + if (ret == 0) { + ret = sysdev_class_register(&omap_gpio_sysclass); + if (ret == 0) + ret = sysdev_register(&omap_gpio_device); + } + } #endif - return 0; + return ret; } arch_initcall(omap_gpio_sysinit); diff --git a/trunk/arch/arm/plat-omap/iommu.c b/trunk/arch/arm/plat-omap/iommu.c index 34fc31ee9081..8a51fd58f656 100644 --- a/trunk/arch/arm/plat-omap/iommu.c +++ b/trunk/arch/arm/plat-omap/iommu.c @@ -793,8 +793,6 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) clk_enable(obj->clk); errs = iommu_report_fault(obj, &da); clk_disable(obj->clk); - if (errs == 0) - return IRQ_HANDLED; /* Fault callback or TLB/PTE Dynamic loading */ if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) diff --git a/trunk/arch/arm/plat-omap/omap_device.c b/trunk/arch/arm/plat-omap/omap_device.c index a37b8eb65b76..9bbda9acb73b 100644 --- a/trunk/arch/arm/plat-omap/omap_device.c +++ b/trunk/arch/arm/plat-omap/omap_device.c @@ -536,28 +536,6 @@ int omap_early_device_register(struct omap_device *od) return 0; } -static int _od_runtime_suspend(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - - return omap_device_idle(pdev); -} - -static int _od_runtime_resume(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - - return omap_device_enable(pdev); -} - -static struct dev_power_domain omap_device_power_domain = { - .ops = { - .runtime_suspend = _od_runtime_suspend, - .runtime_resume = _od_runtime_resume, - USE_PLATFORM_PM_SLEEP_OPS - } -}; - /** * omap_device_register - register an omap_device with one omap_hwmod * @od: struct omap_device * to register @@ -571,7 +549,6 @@ int omap_device_register(struct omap_device *od) pr_debug("omap_device: %s: registering\n", od->pdev.name); od->pdev.dev.parent = &omap_device_parent; - od->pdev.dev.pwr_domain = &omap_device_power_domain; return platform_device_register(&od->pdev); } diff --git a/trunk/arch/arm/plat-pxa/gpio.c b/trunk/arch/arm/plat-pxa/gpio.c index 48ebb9479b61..dce088f45678 100644 --- a/trunk/arch/arm/plat-pxa/gpio.c +++ b/trunk/arch/arm/plat-pxa/gpio.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include @@ -295,7 +295,7 @@ void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn) } #ifdef CONFIG_PM -static int pxa_gpio_suspend(void) +static int pxa_gpio_suspend(struct sys_device *dev, pm_message_t state) { struct pxa_gpio_chip *c; int gpio; @@ -312,7 +312,7 @@ static int pxa_gpio_suspend(void) return 0; } -static void pxa_gpio_resume(void) +static int pxa_gpio_resume(struct sys_device *dev) { struct pxa_gpio_chip *c; int gpio; @@ -326,13 +326,22 @@ static void pxa_gpio_resume(void) __raw_writel(c->saved_gfer, c->regbase + GFER_OFFSET); __raw_writel(c->saved_gpdr, c->regbase + GPDR_OFFSET); } + return 0; } #else #define pxa_gpio_suspend NULL #define pxa_gpio_resume NULL #endif -struct syscore_ops pxa_gpio_syscore_ops = { +struct sysdev_class pxa_gpio_sysclass = { + .name = "gpio", .suspend = pxa_gpio_suspend, .resume = pxa_gpio_resume, }; + +static int __init pxa_gpio_init(void) +{ + return sysdev_class_register(&pxa_gpio_sysclass); +} + +core_initcall(pxa_gpio_init); diff --git a/trunk/arch/arm/plat-pxa/mfp.c b/trunk/arch/arm/plat-pxa/mfp.c index be12eadcce20..a9aa5ad3f4eb 100644 --- a/trunk/arch/arm/plat-pxa/mfp.c +++ b/trunk/arch/arm/plat-pxa/mfp.c @@ -17,6 +17,7 @@ #include #include #include +#include #include diff --git a/trunk/arch/arm/plat-s3c24xx/dma.c b/trunk/arch/arm/plat-s3c24xx/dma.c index c10d10c56e2e..27ea852e3370 100644 --- a/trunk/arch/arm/plat-s3c24xx/dma.c +++ b/trunk/arch/arm/plat-s3c24xx/dma.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include @@ -1195,12 +1195,19 @@ int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *d EXPORT_SYMBOL(s3c2410_dma_getposition); -/* system core operations */ +static inline struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev) +{ + return container_of(dev, struct s3c2410_dma_chan, dev); +} + +/* system device class */ #ifdef CONFIG_PM -static void s3c2410_dma_suspend_chan(s3c2410_dma_chan *cp) +static int s3c2410_dma_suspend(struct sys_device *dev, pm_message_t state) { + struct s3c2410_dma_chan *cp = to_dma_chan(dev); + printk(KERN_DEBUG "suspending dma channel %d\n", cp->number); if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) { @@ -1215,21 +1222,13 @@ static void s3c2410_dma_suspend_chan(s3c2410_dma_chan *cp) s3c2410_dma_dostop(cp); } -} - -static int s3c2410_dma_suspend(void) -{ - struct s3c2410_dma_chan *cp = s3c2410_chans; - int channel; - - for (channel = 0; channel < dma_channels; cp++, channel++) - s3c2410_dma_suspend_chan(cp); return 0; } -static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp) +static int s3c2410_dma_resume(struct sys_device *dev) { + struct s3c2410_dma_chan *cp = to_dma_chan(dev); unsigned int no = cp->number | DMACH_LOW_LEVEL; /* restore channel's hardware configuration */ @@ -1250,21 +1249,13 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp) return 0; } -static void s3c2410_dma_resume(void) -{ - struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1; - int channel; - - for (channel = dma_channels - 1; channel >= 0; cp++, channel--) - s3c2410_dma_resume_chan(cp); -} - #else #define s3c2410_dma_suspend NULL #define s3c2410_dma_resume NULL #endif /* CONFIG_PM */ -struct syscore_ops dma_syscore_ops = { +struct sysdev_class dma_sysclass = { + .name = "s3c24xx-dma", .suspend = s3c2410_dma_suspend, .resume = s3c2410_dma_resume, }; @@ -1278,14 +1269,39 @@ static void s3c2410_dma_cache_ctor(void *p) /* initialisation code */ -static int __init s3c24xx_dma_syscore_init(void) +static int __init s3c24xx_dma_sysclass_init(void) { - register_syscore_ops(&dma_syscore_ops); + int ret = sysdev_class_register(&dma_sysclass); + + if (ret != 0) + printk(KERN_ERR "dma sysclass registration failed\n"); + + return ret; +} + +core_initcall(s3c24xx_dma_sysclass_init); + +static int __init s3c24xx_dma_sysdev_register(void) +{ + struct s3c2410_dma_chan *cp = s3c2410_chans; + int channel, ret; + + for (channel = 0; channel < dma_channels; cp++, channel++) { + cp->dev.cls = &dma_sysclass; + cp->dev.id = channel; + ret = sysdev_register(&cp->dev); + + if (ret) { + printk(KERN_ERR "error registering dev for dma %d\n", + channel); + return ret; + } + } return 0; } -late_initcall(s3c24xx_dma_syscore_init); +late_initcall(s3c24xx_dma_sysdev_register); int __init s3c24xx_dma_init(unsigned int channels, unsigned int irq, unsigned int stride) diff --git a/trunk/arch/arm/plat-s3c24xx/irq-pm.c b/trunk/arch/arm/plat-s3c24xx/irq-pm.c index 0efb2e2848c8..c3624d898630 100644 --- a/trunk/arch/arm/plat-s3c24xx/irq-pm.c +++ b/trunk/arch/arm/plat-s3c24xx/irq-pm.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -64,7 +65,7 @@ static unsigned long save_extint[3]; static unsigned long save_eintflt[4]; static unsigned long save_eintmask; -int s3c24xx_irq_suspend(void) +int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state) { unsigned int i; @@ -80,7 +81,7 @@ int s3c24xx_irq_suspend(void) return 0; } -void s3c24xx_irq_resume(void) +int s3c24xx_irq_resume(struct sys_device *dev) { unsigned int i; @@ -92,4 +93,6 @@ void s3c24xx_irq_resume(void) s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save)); __raw_writel(save_eintmask, S3C24XX_EINTMASK); + + return 0; } diff --git a/trunk/arch/arm/plat-s5p/irq-pm.c b/trunk/arch/arm/plat-s5p/irq-pm.c index 327acb3a4464..5259ad458bc8 100644 --- a/trunk/arch/arm/plat-s5p/irq-pm.c +++ b/trunk/arch/arm/plat-s5p/irq-pm.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -76,15 +77,17 @@ static struct sleep_save eint_save[] = { SAVE_ITEM(S5P_EINT_MASK(3)), }; -int s3c24xx_irq_suspend(void) +int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state) { s3c_pm_do_save(eint_save, ARRAY_SIZE(eint_save)); return 0; } -void s3c24xx_irq_resume(void) +int s3c24xx_irq_resume(struct sys_device *dev) { s3c_pm_do_restore(eint_save, ARRAY_SIZE(eint_save)); + + return 0; } diff --git a/trunk/arch/arm/plat-samsung/include/plat/cpu.h b/trunk/arch/arm/plat-samsung/include/plat/cpu.h index 3aedac0034ba..cedfff51c82b 100644 --- a/trunk/arch/arm/plat-samsung/include/plat/cpu.h +++ b/trunk/arch/arm/plat-samsung/include/plat/cpu.h @@ -68,12 +68,6 @@ extern void s3c24xx_init_uartdevs(char *name, struct sys_timer; extern struct sys_timer s3c24xx_timer; -extern struct syscore_ops s3c2410_pm_syscore_ops; -extern struct syscore_ops s3c2412_pm_syscore_ops; -extern struct syscore_ops s3c2416_pm_syscore_ops; -extern struct syscore_ops s3c244x_pm_syscore_ops; -extern struct syscore_ops s3c64xx_irq_syscore_ops; - /* system device classes */ extern struct sysdev_class s3c2410_sysclass; diff --git a/trunk/arch/arm/plat-samsung/include/plat/pm.h b/trunk/arch/arm/plat-samsung/include/plat/pm.h index 7fb6f6be8c81..937cc2ace517 100644 --- a/trunk/arch/arm/plat-samsung/include/plat/pm.h +++ b/trunk/arch/arm/plat-samsung/include/plat/pm.h @@ -103,16 +103,14 @@ extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count); #ifdef CONFIG_PM extern int s3c_irqext_wake(struct irq_data *data, unsigned int state); -extern int s3c24xx_irq_suspend(void); -extern void s3c24xx_irq_resume(void); +extern int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state); +extern int s3c24xx_irq_resume(struct sys_device *dev); #else #define s3c_irqext_wake NULL #define s3c24xx_irq_suspend NULL #define s3c24xx_irq_resume NULL #endif -extern struct syscore_ops s3c24xx_irq_syscore_ops; - /* PM debug functions */ #ifdef CONFIG_SAMSUNG_PM_DEBUG diff --git a/trunk/arch/arm/vfp/vfpmodule.c b/trunk/arch/arm/vfp/vfpmodule.c index f25e7ec89416..f74695075e64 100644 --- a/trunk/arch/arm/vfp/vfpmodule.c +++ b/trunk/arch/arm/vfp/vfpmodule.c @@ -398,9 +398,9 @@ static void vfp_enable(void *unused) } #ifdef CONFIG_PM -#include +#include -static int vfp_pm_suspend(void) +static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state) { struct thread_info *ti = current_thread_info(); u32 fpexc = fmrx(FPEXC); @@ -420,25 +420,34 @@ static int vfp_pm_suspend(void) return 0; } -static void vfp_pm_resume(void) +static int vfp_pm_resume(struct sys_device *dev) { /* ensure we have access to the vfp */ vfp_enable(NULL); /* and disable it to ensure the next usage restores the state */ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); + + return 0; } -static struct syscore_ops vfp_pm_syscore_ops = { +static struct sysdev_class vfp_pm_sysclass = { + .name = "vfp", .suspend = vfp_pm_suspend, .resume = vfp_pm_resume, }; +static struct sys_device vfp_pm_sysdev = { + .cls = &vfp_pm_sysclass, +}; + static void vfp_pm_init(void) { - register_syscore_ops(&vfp_pm_syscore_ops); + sysdev_class_register(&vfp_pm_sysclass); + sysdev_register(&vfp_pm_sysdev); } + #else static inline void vfp_pm_init(void) { } #endif /* CONFIG_PM */ diff --git a/trunk/arch/avr32/mach-at32ap/intc.c b/trunk/arch/avr32/mach-at32ap/intc.c index 3e3646186c9f..21ce35f33aa5 100644 --- a/trunk/arch/avr32/mach-at32ap/intc.c +++ b/trunk/arch/avr32/mach-at32ap/intc.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include @@ -21,6 +21,7 @@ struct intc { void __iomem *regs; struct irq_chip chip; + struct sys_device sysdev; #ifdef CONFIG_PM unsigned long suspend_ipr; unsigned long saved_ipr[64]; @@ -145,8 +146,9 @@ void intc_set_suspend_handler(unsigned long offset) intc0.suspend_ipr = offset; } -static int intc_suspend(void) +static int intc_suspend(struct sys_device *sdev, pm_message_t state) { + struct intc *intc = container_of(sdev, struct intc, sysdev); int i; if (unlikely(!irqs_disabled())) { @@ -154,25 +156,28 @@ static int intc_suspend(void) return -EINVAL; } - if (unlikely(!intc0.suspend_ipr)) { + if (unlikely(!intc->suspend_ipr)) { pr_err("intc_suspend: suspend_ipr not initialized\n"); return -EINVAL; } for (i = 0; i < 64; i++) { - intc0.saved_ipr[i] = intc_readl(&intc0, INTPR0 + 4 * i); - intc_writel(&intc0, INTPR0 + 4 * i, intc0.suspend_ipr); + intc->saved_ipr[i] = intc_readl(intc, INTPR0 + 4 * i); + intc_writel(intc, INTPR0 + 4 * i, intc->suspend_ipr); } return 0; } -static int intc_resume(void) +static int intc_resume(struct sys_device *sdev) { + struct intc *intc = container_of(sdev, struct intc, sysdev); int i; + WARN_ON(!irqs_disabled()); + for (i = 0; i < 64; i++) - intc_writel(&intc0, INTPR0 + 4 * i, intc0.saved_ipr[i]); + intc_writel(intc, INTPR0 + 4 * i, intc->saved_ipr[i]); return 0; } @@ -181,18 +186,27 @@ static int intc_resume(void) #define intc_resume NULL #endif -static struct syscore_ops intc_syscore_ops = { +static struct sysdev_class intc_class = { + .name = "intc", .suspend = intc_suspend, .resume = intc_resume, }; -static int __init intc_init_syscore(void) +static int __init intc_init_sysdev(void) { - register_syscore_ops(&intc_syscore_ops); + int ret; - return 0; + ret = sysdev_class_register(&intc_class); + if (ret) + return ret; + + intc0.sysdev.id = 0; + intc0.sysdev.cls = &intc_class; + ret = sysdev_register(&intc0.sysdev); + + return ret; } -device_initcall(intc_init_syscore); +device_initcall(intc_init_sysdev); unsigned long intc_get_pending(unsigned int group) { diff --git a/trunk/arch/blackfin/kernel/nmi.c b/trunk/arch/blackfin/kernel/nmi.c index 401eb1d8e3b4..0b5f72f17fd0 100644 --- a/trunk/arch/blackfin/kernel/nmi.c +++ b/trunk/arch/blackfin/kernel/nmi.c @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include #include @@ -196,31 +196,43 @@ void touch_nmi_watchdog(void) /* Suspend/resume support */ #ifdef CONFIG_PM -static int nmi_wdt_suspend(void) +static int nmi_wdt_suspend(struct sys_device *dev, pm_message_t state) { nmi_wdt_stop(); return 0; } -static void nmi_wdt_resume(void) +static int nmi_wdt_resume(struct sys_device *dev) { if (nmi_active) nmi_wdt_start(); + return 0; } -static struct syscore_ops nmi_syscore_ops = { +static struct sysdev_class nmi_sysclass = { + .name = DRV_NAME, .resume = nmi_wdt_resume, .suspend = nmi_wdt_suspend, }; -static int __init init_nmi_wdt_syscore(void) +static struct sys_device device_nmi_wdt = { + .id = 0, + .cls = &nmi_sysclass, +}; + +static int __init init_nmi_wdt_sysfs(void) { - if (nmi_active) - register_syscore_ops(&nmi_syscore_ops); + int error; - return 0; + if (!nmi_active) + return 0; + + error = sysdev_class_register(&nmi_sysclass); + if (!error) + error = sysdev_register(&device_nmi_wdt); + return error; } -late_initcall(init_nmi_wdt_syscore); +late_initcall(init_nmi_wdt_sysfs); #endif /* CONFIG_PM */ diff --git a/trunk/arch/blackfin/kernel/time-ts.c b/trunk/arch/blackfin/kernel/time-ts.c index 9e9b60d969dc..cdb4beb6bc8f 100644 --- a/trunk/arch/blackfin/kernel/time-ts.c +++ b/trunk/arch/blackfin/kernel/time-ts.c @@ -23,6 +23,29 @@ #include #include +/* Accelerators for sched_clock() + * convert from cycles(64bits) => nanoseconds (64bits) + * basic equation: + * ns = cycles / (freq / ns_per_sec) + * ns = cycles * (ns_per_sec / freq) + * ns = cycles * (10^9 / (cpu_khz * 10^3)) + * ns = cycles * (10^6 / cpu_khz) + * + * Then we use scaling math (suggested by george@mvista.com) to get: + * ns = cycles * (10^6 * SC / cpu_khz) / SC + * ns = cycles * cyc2ns_scale / SC + * + * And since SC is a constant power of two, we can convert the div + * into a shift. + * + * We can use khz divisor instead of mhz to keep a better precision, since + * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. + * (mathieu.desnoyers@polymtl.ca) + * + * -johnstul@us.ibm.com "math is hard, lets go shopping!" + */ + +#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ #if defined(CONFIG_CYCLES_CLOCKSOURCE) @@ -40,6 +63,7 @@ static struct clocksource bfin_cs_cycles = { .rating = 400, .read = bfin_read_cycles, .mask = CLOCKSOURCE_MASK(64), + .shift = CYC2NS_SCALE_FACTOR, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -51,7 +75,10 @@ static inline unsigned long long bfin_cs_cycles_sched_clock(void) static int __init bfin_cs_cycles_init(void) { - if (clocksource_register_hz(&bfin_cs_cycles, get_cclk())) + bfin_cs_cycles.mult = \ + clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift); + + if (clocksource_register(&bfin_cs_cycles)) panic("failed to register clocksource"); return 0; @@ -84,6 +111,7 @@ static struct clocksource bfin_cs_gptimer0 = { .rating = 350, .read = bfin_read_gptimer0, .mask = CLOCKSOURCE_MASK(32), + .shift = CYC2NS_SCALE_FACTOR, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -97,7 +125,10 @@ static int __init bfin_cs_gptimer0_init(void) { setup_gptimer0(); - if (clocksource_register_hz(&bfin_cs_gptimer0, get_sclk())) + bfin_cs_gptimer0.mult = \ + clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift); + + if (clocksource_register(&bfin_cs_gptimer0)) panic("failed to register clocksource"); return 0; diff --git a/trunk/arch/blackfin/mach-common/dpmc.c b/trunk/arch/blackfin/mach-common/dpmc.c index 5e4112e518a9..382099fd5561 100644 --- a/trunk/arch/blackfin/mach-common/dpmc.c +++ b/trunk/arch/blackfin/mach-common/dpmc.c @@ -19,6 +19,9 @@ #define DRIVER_NAME "bfin dpmc" +#define dprintk(msg...) \ + cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, DRIVER_NAME, msg) + struct bfin_dpmc_platform_data *pdata; /** diff --git a/trunk/arch/blackfin/mach-common/smp.c b/trunk/arch/blackfin/mach-common/smp.c index 1fbd94c44457..8bce5ed031e4 100644 --- a/trunk/arch/blackfin/mach-common/smp.c +++ b/trunk/arch/blackfin/mach-common/smp.c @@ -177,9 +177,6 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) while (msg_queue->count) { msg = &msg_queue->ipi_message[msg_queue->head]; switch (msg->type) { - case BFIN_IPI_RESCHEDULE: - scheduler_ipi(); - break; case BFIN_IPI_CALL_FUNC: spin_unlock_irqrestore(&msg_queue->lock, flags); ipi_call_function(cpu, msg); diff --git a/trunk/arch/cris/arch-v32/kernel/smp.c b/trunk/arch/cris/arch-v32/kernel/smp.c index 66cc75657e2f..4c9e3e1ba5d1 100644 --- a/trunk/arch/cris/arch-v32/kernel/smp.c +++ b/trunk/arch/cris/arch-v32/kernel/smp.c @@ -342,18 +342,15 @@ irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id) ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi); - if (ipi.vector & IPI_SCHEDULE) { - scheduler_ipi(); - } if (ipi.vector & IPI_CALL) { - func(info); + func(info); } if (ipi.vector & IPI_FLUSH_TLB) { - if (flush_mm == FLUSH_ALL) - __flush_tlb_all(); - else if (flush_vma == FLUSH_ALL) + if (flush_mm == FLUSH_ALL) + __flush_tlb_all(); + else if (flush_vma == FLUSH_ALL) __flush_tlb_mm(flush_mm); - else + else __flush_tlb_page(flush_vma, flush_addr); } diff --git a/trunk/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/trunk/arch/ia64/kernel/cpufreq/acpi-cpufreq.c index f09b174244d5..22f61526a8e1 100644 --- a/trunk/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ b/trunk/arch/ia64/kernel/cpufreq/acpi-cpufreq.c @@ -23,6 +23,8 @@ #include #include +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) + MODULE_AUTHOR("Venkatesh Pallipadi"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_LICENSE("GPL"); @@ -45,12 +47,12 @@ processor_set_pstate ( { s64 retval; - pr_debug("processor_set_pstate\n"); + dprintk("processor_set_pstate\n"); retval = ia64_pal_set_pstate((u64)value); if (retval) { - pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n", + dprintk("Failed to set freq to 0x%x, with error 0x%lx\n", value, retval); return -ENODEV; } @@ -65,14 +67,14 @@ processor_get_pstate ( u64 pstate_index = 0; s64 retval; - pr_debug("processor_get_pstate\n"); + dprintk("processor_get_pstate\n"); retval = ia64_pal_get_pstate(&pstate_index, PAL_GET_PSTATE_TYPE_INSTANT); *value = (u32) pstate_index; if (retval) - pr_debug("Failed to get current freq with " + dprintk("Failed to get current freq with " "error 0x%lx, idx 0x%x\n", retval, *value); return (int)retval; @@ -88,7 +90,7 @@ extract_clock ( { unsigned long i; - pr_debug("extract_clock\n"); + dprintk("extract_clock\n"); for (i = 0; i < data->acpi_data.state_count; i++) { if (value == data->acpi_data.states[i].status) @@ -108,7 +110,7 @@ processor_get_freq ( cpumask_t saved_mask; unsigned long clock_freq; - pr_debug("processor_get_freq\n"); + dprintk("processor_get_freq\n"); saved_mask = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); @@ -146,7 +148,7 @@ processor_set_freq ( cpumask_t saved_mask; int retval; - pr_debug("processor_set_freq\n"); + dprintk("processor_set_freq\n"); saved_mask = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); @@ -157,16 +159,16 @@ processor_set_freq ( if (state == data->acpi_data.state) { if (unlikely(data->resume)) { - pr_debug("Called after resume, resetting to P%d\n", state); + dprintk("Called after resume, resetting to P%d\n", state); data->resume = 0; } else { - pr_debug("Already at target state (P%d)\n", state); + dprintk("Already at target state (P%d)\n", state); retval = 0; goto migrate_end; } } - pr_debug("Transitioning from P%d to P%d\n", + dprintk("Transitioning from P%d to P%d\n", data->acpi_data.state, state); /* cpufreq frequency struct */ @@ -184,7 +186,7 @@ processor_set_freq ( value = (u32) data->acpi_data.states[state].control; - pr_debug("Transitioning to state: 0x%08x\n", value); + dprintk("Transitioning to state: 0x%08x\n", value); ret = processor_set_pstate(value); if (ret) { @@ -217,7 +219,7 @@ acpi_cpufreq_get ( { struct cpufreq_acpi_io *data = acpi_io_data[cpu]; - pr_debug("acpi_cpufreq_get\n"); + dprintk("acpi_cpufreq_get\n"); return processor_get_freq(data, cpu); } @@ -233,7 +235,7 @@ acpi_cpufreq_target ( unsigned int next_state = 0; unsigned int result = 0; - pr_debug("acpi_cpufreq_setpolicy\n"); + dprintk("acpi_cpufreq_setpolicy\n"); result = cpufreq_frequency_table_target(policy, data->freq_table, target_freq, relation, &next_state); @@ -253,7 +255,7 @@ acpi_cpufreq_verify ( unsigned int result = 0; struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - pr_debug("acpi_cpufreq_verify\n"); + dprintk("acpi_cpufreq_verify\n"); result = cpufreq_frequency_table_verify(policy, data->freq_table); @@ -271,7 +273,7 @@ acpi_cpufreq_cpu_init ( struct cpufreq_acpi_io *data; unsigned int result = 0; - pr_debug("acpi_cpufreq_cpu_init\n"); + dprintk("acpi_cpufreq_cpu_init\n"); data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); if (!data) @@ -286,7 +288,7 @@ acpi_cpufreq_cpu_init ( /* capability check */ if (data->acpi_data.state_count <= 1) { - pr_debug("No P-States\n"); + dprintk("No P-States\n"); result = -ENODEV; goto err_unreg; } @@ -295,7 +297,7 @@ acpi_cpufreq_cpu_init ( ACPI_ADR_SPACE_FIXED_HARDWARE) || (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { - pr_debug("Unsupported address space [%d, %d]\n", + dprintk("Unsupported address space [%d, %d]\n", (u32) (data->acpi_data.control_register.space_id), (u32) (data->acpi_data.status_register.space_id)); result = -ENODEV; @@ -346,7 +348,7 @@ acpi_cpufreq_cpu_init ( "activated.\n", cpu); for (i = 0; i < data->acpi_data.state_count; i++) - pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", + dprintk(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", (i == data->acpi_data.state?'*':' '), i, (u32) data->acpi_data.states[i].core_frequency, (u32) data->acpi_data.states[i].power, @@ -381,7 +383,7 @@ acpi_cpufreq_cpu_exit ( { struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - pr_debug("acpi_cpufreq_cpu_exit\n"); + dprintk("acpi_cpufreq_cpu_exit\n"); if (data) { cpufreq_frequency_table_put_attr(policy->cpu); @@ -416,7 +418,7 @@ static struct cpufreq_driver acpi_cpufreq_driver = { static int __init acpi_cpufreq_init (void) { - pr_debug("acpi_cpufreq_init\n"); + dprintk("acpi_cpufreq_init\n"); return cpufreq_register_driver(&acpi_cpufreq_driver); } @@ -425,7 +427,7 @@ acpi_cpufreq_init (void) static void __exit acpi_cpufreq_exit (void) { - pr_debug("acpi_cpufreq_exit\n"); + dprintk("acpi_cpufreq_exit\n"); cpufreq_unregister_driver(&acpi_cpufreq_driver); return; diff --git a/trunk/arch/ia64/kernel/cyclone.c b/trunk/arch/ia64/kernel/cyclone.c index f64097b5118a..1b811c61bdc6 100644 --- a/trunk/arch/ia64/kernel/cyclone.c +++ b/trunk/arch/ia64/kernel/cyclone.c @@ -31,6 +31,8 @@ static struct clocksource clocksource_cyclone = { .rating = 300, .read = read_cyclone, .mask = (1LL << 40) - 1, + .mult = 0, /*to be calculated*/ + .shift = 16, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -116,7 +118,9 @@ int __init init_cyclone_clock(void) /* initialize last tick */ cyclone_mc = cyclone_timer; clocksource_cyclone.fsys_mmio = cyclone_timer; - clocksource_register_hz(&clocksource_cyclone, CYCLONE_TIMER_FREQ); + clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ, + clocksource_cyclone.shift); + clocksource_register(&clocksource_cyclone); return 0; } diff --git a/trunk/arch/ia64/kernel/irq_ia64.c b/trunk/arch/ia64/kernel/irq_ia64.c index 782c3a357f24..5b704740f160 100644 --- a/trunk/arch/ia64/kernel/irq_ia64.c +++ b/trunk/arch/ia64/kernel/irq_ia64.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include @@ -497,7 +496,6 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) smp_local_flush_tlb(); kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) { - scheduler_ipi(); kstat_incr_irqs_this_cpu(irq, desc); } else { ia64_setreg(_IA64_REG_CR_TPR, vector); diff --git a/trunk/arch/ia64/kernel/time.c b/trunk/arch/ia64/kernel/time.c index 04440cc09b40..156ad803d5b7 100644 --- a/trunk/arch/ia64/kernel/time.c +++ b/trunk/arch/ia64/kernel/time.c @@ -73,6 +73,8 @@ static struct clocksource clocksource_itc = { .rating = 350, .read = itc_get_cycles, .mask = CLOCKSOURCE_MASK(64), + .mult = 0, /*to be calculated*/ + .shift = 16, .flags = CLOCK_SOURCE_IS_CONTINUOUS, #ifdef CONFIG_PARAVIRT .resume = paravirt_clocksource_resume, @@ -363,8 +365,11 @@ ia64_init_itm (void) ia64_cpu_local_tick(); if (!itc_clocksource) { - clocksource_register_hz(&clocksource_itc, - local_cpu_data->itc_freq); + /* Sort out mult/shift values: */ + clocksource_itc.mult = + clocksource_hz2mult(local_cpu_data->itc_freq, + clocksource_itc.shift); + clocksource_register(&clocksource_itc); itc_clocksource = &clocksource_itc; } } diff --git a/trunk/arch/ia64/sn/kernel/sn2/timer.c b/trunk/arch/ia64/sn/kernel/sn2/timer.c index c34efda122e1..21d6f09e3447 100644 --- a/trunk/arch/ia64/sn/kernel/sn2/timer.c +++ b/trunk/arch/ia64/sn/kernel/sn2/timer.c @@ -33,6 +33,8 @@ static struct clocksource clocksource_sn2 = { .rating = 450, .read = read_sn2, .mask = (1LL << 55) - 1, + .mult = 0, + .shift = 10, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -55,7 +57,9 @@ ia64_sn_udelay (unsigned long usecs) void __init sn_timer_init(void) { clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; - clocksource_register_hz(&clocksource_sn2, sn_rtc_cycles_per_second); + clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, + clocksource_sn2.shift); + clocksource_register(&clocksource_sn2); ia64_udelay = &ia64_sn_udelay; } diff --git a/trunk/arch/ia64/xen/irq_xen.c b/trunk/arch/ia64/xen/irq_xen.c index b279e142c633..108bb858acf2 100644 --- a/trunk/arch/ia64/xen/irq_xen.c +++ b/trunk/arch/ia64/xen/irq_xen.c @@ -92,8 +92,6 @@ static unsigned short saved_irq_cnt; static int xen_slab_ready; #ifdef CONFIG_SMP -#include - /* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, * it ends up to issue several memory accesses upon percpu data and * thus adds unnecessary traffic to other paths. @@ -101,13 +99,7 @@ static int xen_slab_ready; static irqreturn_t xen_dummy_handler(int irq, void *dev_id) { - return IRQ_HANDLED; -} -static irqreturn_t -xen_resched_handler(int irq, void *dev_id) -{ - scheduler_ipi(); return IRQ_HANDLED; } @@ -118,7 +110,7 @@ static struct irqaction xen_ipi_irqaction = { }; static struct irqaction xen_resched_irqaction = { - .handler = xen_resched_handler, + .handler = xen_dummy_handler, .flags = IRQF_DISABLED, .name = "resched" }; diff --git a/trunk/arch/m32r/kernel/smp.c b/trunk/arch/m32r/kernel/smp.c index fc10b39893d4..31cef20b2996 100644 --- a/trunk/arch/m32r/kernel/smp.c +++ b/trunk/arch/m32r/kernel/smp.c @@ -122,6 +122,8 @@ void smp_send_reschedule(int cpu_id) * * Description: This routine executes on CPU which received * 'RESCHEDULE_IPI'. + * Rescheduling is processed at the exit of interrupt + * operation. * * Born on Date: 2002.02.05 * @@ -136,7 +138,7 @@ void smp_send_reschedule(int cpu_id) *==========================================================================*/ void smp_reschedule_interrupt(void) { - scheduler_ipi(); + /* nothing to do */ } /*==========================================================================* diff --git a/trunk/arch/m68k/atari/atakeyb.c b/trunk/arch/m68k/atari/atakeyb.c index 95022b04b62d..b995513d527f 100644 --- a/trunk/arch/m68k/atari/atakeyb.c +++ b/trunk/arch/m68k/atari/atakeyb.c @@ -36,10 +36,13 @@ /* Hook for MIDI serial driver */ void (*atari_MIDI_interrupt_hook) (void); +/* Hook for mouse driver */ +void (*atari_mouse_interrupt_hook) (char *); /* Hook for keyboard inputdev driver */ void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); /* Hook for mouse inputdev driver */ void (*atari_input_mouse_interrupt_hook) (char *); +EXPORT_SYMBOL(atari_mouse_interrupt_hook); EXPORT_SYMBOL(atari_input_keyboard_interrupt_hook); EXPORT_SYMBOL(atari_input_mouse_interrupt_hook); @@ -260,8 +263,8 @@ static irqreturn_t atari_keyboard_interrupt(int irq, void *dummy) kb_state.buf[kb_state.len++] = scancode; if (kb_state.len == 3) { kb_state.state = KEYBOARD; - if (atari_input_mouse_interrupt_hook) - atari_input_mouse_interrupt_hook(kb_state.buf); + if (atari_mouse_interrupt_hook) + atari_mouse_interrupt_hook(kb_state.buf); } break; @@ -572,7 +575,7 @@ int atari_keyb_init(void) kb_state.len = 0; error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, - IRQ_TYPE_SLOW, "keyboard,mouse,MIDI", + IRQ_TYPE_SLOW, "keyboard/mouse/MIDI", atari_keyboard_interrupt); if (error) return error; diff --git a/trunk/arch/m68k/atari/stdma.c b/trunk/arch/m68k/atari/stdma.c index ddbf43ca8858..604329fafbb8 100644 --- a/trunk/arch/m68k/atari/stdma.c +++ b/trunk/arch/m68k/atari/stdma.c @@ -180,7 +180,7 @@ void __init stdma_init(void) { stdma_isr = NULL; if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED, - "ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int)) + "ST-DMA: floppy/ACSI/IDE/Falcon-SCSI", stdma_int)) pr_err("Couldn't register ST-DMA interrupt\n"); } diff --git a/trunk/arch/m68k/include/asm/atarikb.h b/trunk/arch/m68k/include/asm/atarikb.h index 68f3622bf591..546e7da5804f 100644 --- a/trunk/arch/m68k/include/asm/atarikb.h +++ b/trunk/arch/m68k/include/asm/atarikb.h @@ -34,6 +34,8 @@ void ikbd_joystick_disable(void); /* Hook for MIDI serial driver */ extern void (*atari_MIDI_interrupt_hook) (void); +/* Hook for mouse driver */ +extern void (*atari_mouse_interrupt_hook) (char *); /* Hook for keyboard inputdev driver */ extern void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); /* Hook for mouse inputdev driver */ diff --git a/trunk/arch/m68k/include/asm/bitops_mm.h b/trunk/arch/m68k/include/asm/bitops_mm.h index e9020f88a748..9d69f6e62365 100644 --- a/trunk/arch/m68k/include/asm/bitops_mm.h +++ b/trunk/arch/m68k/include/asm/bitops_mm.h @@ -181,15 +181,14 @@ static inline int find_first_zero_bit(const unsigned long *vaddr, { const unsigned long *p = vaddr; int res = 32; - unsigned int words; unsigned long num; if (!size) return 0; - words = (size + 31) >> 5; + size = (size + 31) >> 5; while (!(num = ~*p++)) { - if (!--words) + if (!--size) goto out; } @@ -197,8 +196,7 @@ static inline int find_first_zero_bit(const unsigned long *vaddr, : "=d" (res) : "d" (num & -num)); res ^= 31; out: - res += ((long)p - (long)vaddr - 4) * 8; - return res < size ? res : size; + return ((long)p - (long)vaddr - 4) * 8 + res; } static inline int find_next_zero_bit(const unsigned long *vaddr, int size, @@ -217,32 +215,27 @@ static inline int find_next_zero_bit(const unsigned long *vaddr, int size, /* Look for zero in first longword */ __asm__ __volatile__ ("bfffo %1{#0,#0},%0" : "=d" (res) : "d" (num & -num)); - if (res < 32) { - offset += res ^ 31; - return offset < size ? offset : size; - } + if (res < 32) + return offset + (res ^ 31); offset += 32; - - if (offset >= size) - return size; } /* No zero yet, search remaining full bytes for a zero */ - return offset + find_first_zero_bit(p, size - offset); + res = find_first_zero_bit(p, size - ((long)p - (long)vaddr) * 8); + return offset + res; } static inline int find_first_bit(const unsigned long *vaddr, unsigned size) { const unsigned long *p = vaddr; int res = 32; - unsigned int words; unsigned long num; if (!size) return 0; - words = (size + 31) >> 5; + size = (size + 31) >> 5; while (!(num = *p++)) { - if (!--words) + if (!--size) goto out; } @@ -250,8 +243,7 @@ static inline int find_first_bit(const unsigned long *vaddr, unsigned size) : "=d" (res) : "d" (num & -num)); res ^= 31; out: - res += ((long)p - (long)vaddr - 4) * 8; - return res < size ? res : size; + return ((long)p - (long)vaddr - 4) * 8 + res; } static inline int find_next_bit(const unsigned long *vaddr, int size, @@ -270,17 +262,13 @@ static inline int find_next_bit(const unsigned long *vaddr, int size, /* Look for one in first longword */ __asm__ __volatile__ ("bfffo %1{#0,#0},%0" : "=d" (res) : "d" (num & -num)); - if (res < 32) { - offset += res ^ 31; - return offset < size ? offset : size; - } + if (res < 32) + return offset + (res ^ 31); offset += 32; - - if (offset >= size) - return size; } /* No one yet, search remaining full bytes for a one */ - return offset + find_first_bit(p, size - offset); + res = find_first_bit(p, size - ((long)p - (long)vaddr) * 8); + return offset + res; } /* @@ -378,25 +366,23 @@ static inline int test_bit_le(int nr, const void *vaddr) static inline int find_first_zero_bit_le(const void *vaddr, unsigned size) { const unsigned long *p = vaddr, *addr = vaddr; - int res = 0; - unsigned int words; + int res; if (!size) return 0; - words = (size >> 5) + ((size & 31) > 0); - while (*p++ == ~0UL) { - if (--words == 0) - goto out; + size = (size >> 5) + ((size & 31) > 0); + while (*p++ == ~0UL) + { + if (--size == 0) + return (p - addr) << 5; } --p; for (res = 0; res < 32; res++) if (!test_bit_le(res, p)) break; -out: - res += (p - addr) * 32; - return res < size ? res : size; + return (p - addr) * 32 + res; } static inline unsigned long find_next_zero_bit_le(const void *addr, @@ -414,15 +400,10 @@ static inline unsigned long find_next_zero_bit_le(const void *addr, offset -= bit; /* Look for zero in first longword */ for (res = bit; res < 32; res++) - if (!test_bit_le(res, p)) { - offset += res; - return offset < size ? offset : size; - } + if (!test_bit_le(res, p)) + return offset + res; p++; offset += 32; - - if (offset >= size) - return size; } /* No zero yet, search remaining full bytes for a zero */ return offset + find_first_zero_bit_le(p, size - offset); @@ -431,25 +412,22 @@ static inline unsigned long find_next_zero_bit_le(const void *addr, static inline int find_first_bit_le(const void *vaddr, unsigned size) { const unsigned long *p = vaddr, *addr = vaddr; - int res = 0; - unsigned int words; + int res; if (!size) return 0; - words = (size >> 5) + ((size & 31) > 0); + size = (size >> 5) + ((size & 31) > 0); while (*p++ == 0UL) { - if (--words == 0) - goto out; + if (--size == 0) + return (p - addr) << 5; } --p; for (res = 0; res < 32; res++) if (test_bit_le(res, p)) break; -out: - res += (p - addr) * 32; - return res < size ? res : size; + return (p - addr) * 32 + res; } static inline unsigned long find_next_bit_le(const void *addr, @@ -467,15 +445,10 @@ static inline unsigned long find_next_bit_le(const void *addr, offset -= bit; /* Look for one in first longword */ for (res = bit; res < 32; res++) - if (test_bit_le(res, p)) { - offset += res; - return offset < size ? offset : size; - } + if (test_bit_le(res, p)) + return offset + res; p++; offset += 32; - - if (offset >= size) - return size; } /* No set bit yet, search remaining full bytes for a set bit */ return offset + find_first_bit_le(p, size - offset); diff --git a/trunk/arch/m68k/include/asm/unistd.h b/trunk/arch/m68k/include/asm/unistd.h index f3b649de2a1b..29e17907d9f2 100644 --- a/trunk/arch/m68k/include/asm/unistd.h +++ b/trunk/arch/m68k/include/asm/unistd.h @@ -22,7 +22,7 @@ #define __NR_mknod 14 #define __NR_chmod 15 #define __NR_chown 16 -/*#define __NR_break 17*/ +#define __NR_break 17 #define __NR_oldstat 18 #define __NR_lseek 19 #define __NR_getpid 20 @@ -36,11 +36,11 @@ #define __NR_oldfstat 28 #define __NR_pause 29 #define __NR_utime 30 -/*#define __NR_stty 31*/ -/*#define __NR_gtty 32*/ +#define __NR_stty 31 +#define __NR_gtty 32 #define __NR_access 33 #define __NR_nice 34 -/*#define __NR_ftime 35*/ +#define __NR_ftime 35 #define __NR_sync 36 #define __NR_kill 37 #define __NR_rename 38 @@ -49,7 +49,7 @@ #define __NR_dup 41 #define __NR_pipe 42 #define __NR_times 43 -/*#define __NR_prof 44*/ +#define __NR_prof 44 #define __NR_brk 45 #define __NR_setgid 46 #define __NR_getgid 47 @@ -58,13 +58,13 @@ #define __NR_getegid 50 #define __NR_acct 51 #define __NR_umount2 52 -/*#define __NR_lock 53*/ +#define __NR_lock 53 #define __NR_ioctl 54 #define __NR_fcntl 55 -/*#define __NR_mpx 56*/ +#define __NR_mpx 56 #define __NR_setpgid 57 -/*#define __NR_ulimit 58*/ -/*#define __NR_oldolduname 59*/ +#define __NR_ulimit 58 +#define __NR_oldolduname 59 #define __NR_umask 60 #define __NR_chroot 61 #define __NR_ustat 62 @@ -103,10 +103,10 @@ #define __NR_fchown 95 #define __NR_getpriority 96 #define __NR_setpriority 97 -/*#define __NR_profil 98*/ +#define __NR_profil 98 #define __NR_statfs 99 #define __NR_fstatfs 100 -/*#define __NR_ioperm 101*/ +#define __NR_ioperm 101 #define __NR_socketcall 102 #define __NR_syslog 103 #define __NR_setitimer 104 @@ -114,11 +114,11 @@ #define __NR_stat 106 #define __NR_lstat 107 #define __NR_fstat 108 -/*#define __NR_olduname 109*/ -/*#define __NR_iopl 110*/ /* not supported */ +#define __NR_olduname 109 +#define __NR_iopl /* 110 */ not supported #define __NR_vhangup 111 -/*#define __NR_idle 112*/ /* Obsolete */ -/*#define __NR_vm86 113*/ /* not supported */ +#define __NR_idle /* 112 */ Obsolete +#define __NR_vm86 /* 113 */ not supported #define __NR_wait4 114 #define __NR_swapoff 115 #define __NR_sysinfo 116 @@ -132,17 +132,17 @@ #define __NR_adjtimex 124 #define __NR_mprotect 125 #define __NR_sigprocmask 126 -/*#define __NR_create_module 127*/ +#define __NR_create_module 127 #define __NR_init_module 128 #define __NR_delete_module 129 -/*#define __NR_get_kernel_syms 130*/ +#define __NR_get_kernel_syms 130 #define __NR_quotactl 131 #define __NR_getpgid 132 #define __NR_fchdir 133 #define __NR_bdflush 134 #define __NR_sysfs 135 #define __NR_personality 136 -/*#define __NR_afs_syscall 137*/ /* Syscall for Andrew File System */ +#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ #define __NR_setfsuid 138 #define __NR_setfsgid 139 #define __NR__llseek 140 @@ -172,7 +172,7 @@ #define __NR_setresuid 164 #define __NR_getresuid 165 #define __NR_getpagesize 166 -/*#define __NR_query_module 167*/ +#define __NR_query_module 167 #define __NR_poll 168 #define __NR_nfsservctl 169 #define __NR_setresgid 170 @@ -193,8 +193,8 @@ #define __NR_capset 185 #define __NR_sigaltstack 186 #define __NR_sendfile 187 -/*#define __NR_getpmsg 188*/ /* some people actually want streams */ -/*#define __NR_putpmsg 189*/ /* some people actually want streams */ +#define __NR_getpmsg 188 /* some people actually want streams */ +#define __NR_putpmsg 189 /* some people actually want streams */ #define __NR_vfork 190 #define __NR_ugetrlimit 191 #define __NR_mmap2 192 @@ -223,8 +223,6 @@ #define __NR_setfsuid32 215 #define __NR_setfsgid32 216 #define __NR_pivot_root 217 -/* 218*/ -/* 219*/ #define __NR_getdents64 220 #define __NR_gettid 221 #define __NR_tkill 222 @@ -283,7 +281,7 @@ #define __NR_mq_notify 275 #define __NR_mq_getsetattr 276 #define __NR_waitid 277 -/*#define __NR_vserver 278*/ +#define __NR_vserver 278 #define __NR_add_key 279 #define __NR_request_key 280 #define __NR_keyctl 281 diff --git a/trunk/arch/m68k/kernel/Makefile_mm b/trunk/arch/m68k/kernel/Makefile_mm index aced67804579..55d5d6b680a2 100644 --- a/trunk/arch/m68k/kernel/Makefile_mm +++ b/trunk/arch/m68k/kernel/Makefile_mm @@ -10,7 +10,7 @@ endif extra-y += vmlinux.lds obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ - sys_m68k.o time.o setup.o m68k_ksyms.o devres.o syscalltable.o + sys_m68k.o time.o setup.o m68k_ksyms.o devres.o devres-y = ../../../kernel/irq/devres.o diff --git a/trunk/arch/m68k/kernel/entry_mm.S b/trunk/arch/m68k/kernel/entry_mm.S index bd0ec05263b2..1359ee659574 100644 --- a/trunk/arch/m68k/kernel/entry_mm.S +++ b/trunk/arch/m68k/kernel/entry_mm.S @@ -407,3 +407,351 @@ resume: rts +.data +ALIGN +sys_call_table: + .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ + .long sys_exit + .long sys_fork + .long sys_read + .long sys_write + .long sys_open /* 5 */ + .long sys_close + .long sys_waitpid + .long sys_creat + .long sys_link + .long sys_unlink /* 10 */ + .long sys_execve + .long sys_chdir + .long sys_time + .long sys_mknod + .long sys_chmod /* 15 */ + .long sys_chown16 + .long sys_ni_syscall /* old break syscall holder */ + .long sys_stat + .long sys_lseek + .long sys_getpid /* 20 */ + .long sys_mount + .long sys_oldumount + .long sys_setuid16 + .long sys_getuid16 + .long sys_stime /* 25 */ + .long sys_ptrace + .long sys_alarm + .long sys_fstat + .long sys_pause + .long sys_utime /* 30 */ + .long sys_ni_syscall /* old stty syscall holder */ + .long sys_ni_syscall /* old gtty syscall holder */ + .long sys_access + .long sys_nice + .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ + .long sys_sync + .long sys_kill + .long sys_rename + .long sys_mkdir + .long sys_rmdir /* 40 */ + .long sys_dup + .long sys_pipe + .long sys_times + .long sys_ni_syscall /* old prof syscall holder */ + .long sys_brk /* 45 */ + .long sys_setgid16 + .long sys_getgid16 + .long sys_signal + .long sys_geteuid16 + .long sys_getegid16 /* 50 */ + .long sys_acct + .long sys_umount /* recycled never used phys() */ + .long sys_ni_syscall /* old lock syscall holder */ + .long sys_ioctl + .long sys_fcntl /* 55 */ + .long sys_ni_syscall /* old mpx syscall holder */ + .long sys_setpgid + .long sys_ni_syscall /* old ulimit syscall holder */ + .long sys_ni_syscall + .long sys_umask /* 60 */ + .long sys_chroot + .long sys_ustat + .long sys_dup2 + .long sys_getppid + .long sys_getpgrp /* 65 */ + .long sys_setsid + .long sys_sigaction + .long sys_sgetmask + .long sys_ssetmask + .long sys_setreuid16 /* 70 */ + .long sys_setregid16 + .long sys_sigsuspend + .long sys_sigpending + .long sys_sethostname + .long sys_setrlimit /* 75 */ + .long sys_old_getrlimit + .long sys_getrusage + .long sys_gettimeofday + .long sys_settimeofday + .long sys_getgroups16 /* 80 */ + .long sys_setgroups16 + .long sys_old_select + .long sys_symlink + .long sys_lstat + .long sys_readlink /* 85 */ + .long sys_uselib + .long sys_swapon + .long sys_reboot + .long sys_old_readdir + .long sys_old_mmap /* 90 */ + .long sys_munmap + .long sys_truncate + .long sys_ftruncate + .long sys_fchmod + .long sys_fchown16 /* 95 */ + .long sys_getpriority + .long sys_setpriority + .long sys_ni_syscall /* old profil syscall holder */ + .long sys_statfs + .long sys_fstatfs /* 100 */ + .long sys_ni_syscall /* ioperm for i386 */ + .long sys_socketcall + .long sys_syslog + .long sys_setitimer + .long sys_getitimer /* 105 */ + .long sys_newstat + .long sys_newlstat + .long sys_newfstat + .long sys_ni_syscall + .long sys_ni_syscall /* 110 */ /* iopl for i386 */ + .long sys_vhangup + .long sys_ni_syscall /* obsolete idle() syscall */ + .long sys_ni_syscall /* vm86old for i386 */ + .long sys_wait4 + .long sys_swapoff /* 115 */ + .long sys_sysinfo + .long sys_ipc + .long sys_fsync + .long sys_sigreturn + .long sys_clone /* 120 */ + .long sys_setdomainname + .long sys_newuname + .long sys_cacheflush /* modify_ldt for i386 */ + .long sys_adjtimex + .long sys_mprotect /* 125 */ + .long sys_sigprocmask + .long sys_ni_syscall /* old "create_module" */ + .long sys_init_module + .long sys_delete_module + .long sys_ni_syscall /* 130 - old "get_kernel_syms" */ + .long sys_quotactl + .long sys_getpgid + .long sys_fchdir + .long sys_bdflush + .long sys_sysfs /* 135 */ + .long sys_personality + .long sys_ni_syscall /* for afs_syscall */ + .long sys_setfsuid16 + .long sys_setfsgid16 + .long sys_llseek /* 140 */ + .long sys_getdents + .long sys_select + .long sys_flock + .long sys_msync + .long sys_readv /* 145 */ + .long sys_writev + .long sys_getsid + .long sys_fdatasync + .long sys_sysctl + .long sys_mlock /* 150 */ + .long sys_munlock + .long sys_mlockall + .long sys_munlockall + .long sys_sched_setparam + .long sys_sched_getparam /* 155 */ + .long sys_sched_setscheduler + .long sys_sched_getscheduler + .long sys_sched_yield + .long sys_sched_get_priority_max + .long sys_sched_get_priority_min /* 160 */ + .long sys_sched_rr_get_interval + .long sys_nanosleep + .long sys_mremap + .long sys_setresuid16 + .long sys_getresuid16 /* 165 */ + .long sys_getpagesize + .long sys_ni_syscall /* old sys_query_module */ + .long sys_poll + .long sys_nfsservctl + .long sys_setresgid16 /* 170 */ + .long sys_getresgid16 + .long sys_prctl + .long sys_rt_sigreturn + .long sys_rt_sigaction + .long sys_rt_sigprocmask /* 175 */ + .long sys_rt_sigpending + .long sys_rt_sigtimedwait + .long sys_rt_sigqueueinfo + .long sys_rt_sigsuspend + .long sys_pread64 /* 180 */ + .long sys_pwrite64 + .long sys_lchown16; + .long sys_getcwd + .long sys_capget + .long sys_capset /* 185 */ + .long sys_sigaltstack + .long sys_sendfile + .long sys_ni_syscall /* streams1 */ + .long sys_ni_syscall /* streams2 */ + .long sys_vfork /* 190 */ + .long sys_getrlimit + .long sys_mmap2 + .long sys_truncate64 + .long sys_ftruncate64 + .long sys_stat64 /* 195 */ + .long sys_lstat64 + .long sys_fstat64 + .long sys_chown + .long sys_getuid + .long sys_getgid /* 200 */ + .long sys_geteuid + .long sys_getegid + .long sys_setreuid + .long sys_setregid + .long sys_getgroups /* 205 */ + .long sys_setgroups + .long sys_fchown + .long sys_setresuid + .long sys_getresuid + .long sys_setresgid /* 210 */ + .long sys_getresgid + .long sys_lchown + .long sys_setuid + .long sys_setgid + .long sys_setfsuid /* 215 */ + .long sys_setfsgid + .long sys_pivot_root + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_getdents64 /* 220 */ + .long sys_gettid + .long sys_tkill + .long sys_setxattr + .long sys_lsetxattr + .long sys_fsetxattr /* 225 */ + .long sys_getxattr + .long sys_lgetxattr + .long sys_fgetxattr + .long sys_listxattr + .long sys_llistxattr /* 230 */ + .long sys_flistxattr + .long sys_removexattr + .long sys_lremovexattr + .long sys_fremovexattr + .long sys_futex /* 235 */ + .long sys_sendfile64 + .long sys_mincore + .long sys_madvise + .long sys_fcntl64 + .long sys_readahead /* 240 */ + .long sys_io_setup + .long sys_io_destroy + .long sys_io_getevents + .long sys_io_submit + .long sys_io_cancel /* 245 */ + .long sys_fadvise64 + .long sys_exit_group + .long sys_lookup_dcookie + .long sys_epoll_create + .long sys_epoll_ctl /* 250 */ + .long sys_epoll_wait + .long sys_remap_file_pages + .long sys_set_tid_address + .long sys_timer_create + .long sys_timer_settime /* 255 */ + .long sys_timer_gettime + .long sys_timer_getoverrun + .long sys_timer_delete + .long sys_clock_settime + .long sys_clock_gettime /* 260 */ + .long sys_clock_getres + .long sys_clock_nanosleep + .long sys_statfs64 + .long sys_fstatfs64 + .long sys_tgkill /* 265 */ + .long sys_utimes + .long sys_fadvise64_64 + .long sys_mbind + .long sys_get_mempolicy + .long sys_set_mempolicy /* 270 */ + .long sys_mq_open + .long sys_mq_unlink + .long sys_mq_timedsend + .long sys_mq_timedreceive + .long sys_mq_notify /* 275 */ + .long sys_mq_getsetattr + .long sys_waitid + .long sys_ni_syscall /* for sys_vserver */ + .long sys_add_key + .long sys_request_key /* 280 */ + .long sys_keyctl + .long sys_ioprio_set + .long sys_ioprio_get + .long sys_inotify_init + .long sys_inotify_add_watch /* 285 */ + .long sys_inotify_rm_watch + .long sys_migrate_pages + .long sys_openat + .long sys_mkdirat + .long sys_mknodat /* 290 */ + .long sys_fchownat + .long sys_futimesat + .long sys_fstatat64 + .long sys_unlinkat + .long sys_renameat /* 295 */ + .long sys_linkat + .long sys_symlinkat + .long sys_readlinkat + .long sys_fchmodat + .long sys_faccessat /* 300 */ + .long sys_ni_syscall /* Reserved for pselect6 */ + .long sys_ni_syscall /* Reserved for ppoll */ + .long sys_unshare + .long sys_set_robust_list + .long sys_get_robust_list /* 305 */ + .long sys_splice + .long sys_sync_file_range + .long sys_tee + .long sys_vmsplice + .long sys_move_pages /* 310 */ + .long sys_sched_setaffinity + .long sys_sched_getaffinity + .long sys_kexec_load + .long sys_getcpu + .long sys_epoll_pwait /* 315 */ + .long sys_utimensat + .long sys_signalfd + .long sys_timerfd_create + .long sys_eventfd + .long sys_fallocate /* 320 */ + .long sys_timerfd_settime + .long sys_timerfd_gettime + .long sys_signalfd4 + .long sys_eventfd2 + .long sys_epoll_create1 /* 325 */ + .long sys_dup3 + .long sys_pipe2 + .long sys_inotify_init1 + .long sys_preadv + .long sys_pwritev /* 330 */ + .long sys_rt_tgsigqueueinfo + .long sys_perf_event_open + .long sys_get_thread_area + .long sys_set_thread_area + .long sys_atomic_cmpxchg_32 /* 335 */ + .long sys_atomic_barrier + .long sys_fanotify_init + .long sys_fanotify_mark + .long sys_prlimit64 + .long sys_name_to_handle_at /* 340 */ + .long sys_open_by_handle_at + .long sys_clock_adjtime + .long sys_syncfs + diff --git a/trunk/arch/m68k/kernel/syscalltable.S b/trunk/arch/m68k/kernel/syscalltable.S index 5909e392cb1e..9b8393d8adb8 100644 --- a/trunk/arch/m68k/kernel/syscalltable.S +++ b/trunk/arch/m68k/kernel/syscalltable.S @@ -1,4 +1,6 @@ /* + * linux/arch/m68knommu/kernel/syscalltable.S + * * Copyright (C) 2002, Greg Ungerer (gerg@snapgear.com) * * Based on older entry.S files, the following copyrights apply: @@ -7,176 +9,171 @@ * Kenneth Albanowski , * Copyright (C) 2000 Lineo Inc. (www.lineo.com) * Copyright (C) 1991, 1992 Linus Torvalds - * - * Linux/m68k support by Hamish Macdonald */ #include #include +#include -#ifndef CONFIG_MMU -#define sys_mmap2 sys_mmap_pgoff -#endif - -.section .rodata +.text ALIGN ENTRY(sys_call_table) - .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ + .long sys_restart_syscall /* 0 - old "setup()" system call */ .long sys_exit .long sys_fork .long sys_read .long sys_write - .long sys_open /* 5 */ + .long sys_open /* 5 */ .long sys_close .long sys_waitpid .long sys_creat .long sys_link - .long sys_unlink /* 10 */ + .long sys_unlink /* 10 */ .long sys_execve .long sys_chdir .long sys_time .long sys_mknod - .long sys_chmod /* 15 */ + .long sys_chmod /* 15 */ .long sys_chown16 - .long sys_ni_syscall /* old break syscall holder */ + .long sys_ni_syscall /* old break syscall holder */ .long sys_stat .long sys_lseek - .long sys_getpid /* 20 */ + .long sys_getpid /* 20 */ .long sys_mount .long sys_oldumount .long sys_setuid16 .long sys_getuid16 - .long sys_stime /* 25 */ + .long sys_stime /* 25 */ .long sys_ptrace .long sys_alarm .long sys_fstat .long sys_pause - .long sys_utime /* 30 */ - .long sys_ni_syscall /* old stty syscall holder */ - .long sys_ni_syscall /* old gtty syscall holder */ + .long sys_utime /* 30 */ + .long sys_ni_syscall /* old stty syscall holder */ + .long sys_ni_syscall /* old gtty syscall holder */ .long sys_access .long sys_nice - .long sys_ni_syscall /* 35 - old ftime syscall holder */ + .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ .long sys_sync .long sys_kill .long sys_rename .long sys_mkdir - .long sys_rmdir /* 40 */ + .long sys_rmdir /* 40 */ .long sys_dup .long sys_pipe .long sys_times - .long sys_ni_syscall /* old prof syscall holder */ - .long sys_brk /* 45 */ + .long sys_ni_syscall /* old prof syscall holder */ + .long sys_brk /* 45 */ .long sys_setgid16 .long sys_getgid16 .long sys_signal .long sys_geteuid16 - .long sys_getegid16 /* 50 */ + .long sys_getegid16 /* 50 */ .long sys_acct - .long sys_umount /* recycled never used phys() */ - .long sys_ni_syscall /* old lock syscall holder */ + .long sys_umount /* recycled never used phys() */ + .long sys_ni_syscall /* old lock syscall holder */ .long sys_ioctl - .long sys_fcntl /* 55 */ - .long sys_ni_syscall /* old mpx syscall holder */ + .long sys_fcntl /* 55 */ + .long sys_ni_syscall /* old mpx syscall holder */ .long sys_setpgid - .long sys_ni_syscall /* old ulimit syscall holder */ + .long sys_ni_syscall /* old ulimit syscall holder */ .long sys_ni_syscall - .long sys_umask /* 60 */ + .long sys_umask /* 60 */ .long sys_chroot .long sys_ustat .long sys_dup2 .long sys_getppid - .long sys_getpgrp /* 65 */ + .long sys_getpgrp /* 65 */ .long sys_setsid .long sys_sigaction .long sys_sgetmask .long sys_ssetmask - .long sys_setreuid16 /* 70 */ + .long sys_setreuid16 /* 70 */ .long sys_setregid16 .long sys_sigsuspend .long sys_sigpending .long sys_sethostname - .long sys_setrlimit /* 75 */ + .long sys_setrlimit /* 75 */ .long sys_old_getrlimit .long sys_getrusage .long sys_gettimeofday .long sys_settimeofday - .long sys_getgroups16 /* 80 */ + .long sys_getgroups16 /* 80 */ .long sys_setgroups16 .long sys_old_select .long sys_symlink .long sys_lstat - .long sys_readlink /* 85 */ + .long sys_readlink /* 85 */ .long sys_uselib - .long sys_swapon + .long sys_ni_syscall /* sys_swapon */ .long sys_reboot .long sys_old_readdir - .long sys_old_mmap /* 90 */ + .long sys_old_mmap /* 90 */ .long sys_munmap .long sys_truncate .long sys_ftruncate .long sys_fchmod - .long sys_fchown16 /* 95 */ + .long sys_fchown16 /* 95 */ .long sys_getpriority .long sys_setpriority - .long sys_ni_syscall /* old profil syscall holder */ + .long sys_ni_syscall /* old profil syscall holder */ .long sys_statfs - .long sys_fstatfs /* 100 */ - .long sys_ni_syscall /* ioperm for i386 */ + .long sys_fstatfs /* 100 */ + .long sys_ni_syscall /* ioperm for i386 */ .long sys_socketcall .long sys_syslog .long sys_setitimer - .long sys_getitimer /* 105 */ + .long sys_getitimer /* 105 */ .long sys_newstat .long sys_newlstat .long sys_newfstat .long sys_ni_syscall - .long sys_ni_syscall /* 110 - iopl for i386 */ + .long sys_ni_syscall /* iopl for i386 */ /* 110 */ .long sys_vhangup - .long sys_ni_syscall /* obsolete idle() syscall */ - .long sys_ni_syscall /* vm86old for i386 */ + .long sys_ni_syscall /* obsolete idle() syscall */ + .long sys_ni_syscall /* vm86old for i386 */ .long sys_wait4 - .long sys_swapoff /* 115 */ + .long sys_ni_syscall /* 115 */ /* sys_swapoff */ .long sys_sysinfo .long sys_ipc .long sys_fsync .long sys_sigreturn - .long sys_clone /* 120 */ + .long sys_clone /* 120 */ .long sys_setdomainname .long sys_newuname - .long sys_cacheflush /* modify_ldt for i386 */ + .long sys_cacheflush /* modify_ldt for i386 */ .long sys_adjtimex - .long sys_mprotect /* 125 */ + .long sys_ni_syscall /* 125 */ /* sys_mprotect */ .long sys_sigprocmask - .long sys_ni_syscall /* old "create_module" */ + .long sys_ni_syscall /* old "creat_module" */ .long sys_init_module .long sys_delete_module - .long sys_ni_syscall /* 130 - old "get_kernel_syms" */ + .long sys_ni_syscall /* 130: old "get_kernel_syms" */ .long sys_quotactl .long sys_getpgid .long sys_fchdir .long sys_bdflush - .long sys_sysfs /* 135 */ + .long sys_sysfs /* 135 */ .long sys_personality - .long sys_ni_syscall /* for afs_syscall */ + .long sys_ni_syscall /* for afs_syscall */ .long sys_setfsuid16 .long sys_setfsgid16 - .long sys_llseek /* 140 */ + .long sys_llseek /* 140 */ .long sys_getdents .long sys_select .long sys_flock - .long sys_msync - .long sys_readv /* 145 */ + .long sys_ni_syscall /* sys_msync */ + .long sys_readv /* 145 */ .long sys_writev .long sys_getsid .long sys_fdatasync .long sys_sysctl - .long sys_mlock /* 150 */ - .long sys_munlock - .long sys_mlockall - .long sys_munlockall + .long sys_ni_syscall /* 150 */ /* sys_mlock */ + .long sys_ni_syscall /* sys_munlock */ + .long sys_ni_syscall /* sys_mlockall */ + .long sys_ni_syscall /* sys_munlockall */ .long sys_sched_setparam - .long sys_sched_getparam /* 155 */ + .long sys_sched_getparam /* 155 */ .long sys_sched_setscheduler .long sys_sched_getscheduler .long sys_sched_yield @@ -184,124 +181,124 @@ ENTRY(sys_call_table) .long sys_sched_get_priority_min /* 160 */ .long sys_sched_rr_get_interval .long sys_nanosleep - .long sys_mremap + .long sys_ni_syscall /* sys_mremap */ .long sys_setresuid16 - .long sys_getresuid16 /* 165 */ - .long sys_getpagesize - .long sys_ni_syscall /* old "query_module" */ + .long sys_getresuid16 /* 165 */ + .long sys_getpagesize /* sys_getpagesize */ + .long sys_ni_syscall /* old "query_module" */ .long sys_poll - .long sys_nfsservctl - .long sys_setresgid16 /* 170 */ + .long sys_ni_syscall /* sys_nfsservctl */ + .long sys_setresgid16 /* 170 */ .long sys_getresgid16 .long sys_prctl .long sys_rt_sigreturn .long sys_rt_sigaction - .long sys_rt_sigprocmask /* 175 */ + .long sys_rt_sigprocmask /* 175 */ .long sys_rt_sigpending .long sys_rt_sigtimedwait .long sys_rt_sigqueueinfo .long sys_rt_sigsuspend - .long sys_pread64 /* 180 */ + .long sys_pread64 /* 180 */ .long sys_pwrite64 .long sys_lchown16 .long sys_getcwd .long sys_capget - .long sys_capset /* 185 */ + .long sys_capset /* 185 */ .long sys_sigaltstack .long sys_sendfile - .long sys_ni_syscall /* streams1 */ - .long sys_ni_syscall /* streams2 */ - .long sys_vfork /* 190 */ + .long sys_ni_syscall /* streams1 */ + .long sys_ni_syscall /* streams2 */ + .long sys_vfork /* 190 */ .long sys_getrlimit - .long sys_mmap2 + .long sys_mmap_pgoff .long sys_truncate64 .long sys_ftruncate64 - .long sys_stat64 /* 195 */ + .long sys_stat64 /* 195 */ .long sys_lstat64 .long sys_fstat64 .long sys_chown .long sys_getuid - .long sys_getgid /* 200 */ + .long sys_getgid /* 200 */ .long sys_geteuid .long sys_getegid .long sys_setreuid .long sys_setregid - .long sys_getgroups /* 205 */ + .long sys_getgroups /* 205 */ .long sys_setgroups .long sys_fchown .long sys_setresuid .long sys_getresuid - .long sys_setresgid /* 210 */ + .long sys_setresgid /* 210 */ .long sys_getresgid .long sys_lchown .long sys_setuid .long sys_setgid - .long sys_setfsuid /* 215 */ + .long sys_setfsuid /* 215 */ .long sys_setfsgid .long sys_pivot_root .long sys_ni_syscall .long sys_ni_syscall - .long sys_getdents64 /* 220 */ + .long sys_getdents64 /* 220 */ .long sys_gettid .long sys_tkill .long sys_setxattr .long sys_lsetxattr - .long sys_fsetxattr /* 225 */ + .long sys_fsetxattr /* 225 */ .long sys_getxattr .long sys_lgetxattr .long sys_fgetxattr .long sys_listxattr - .long sys_llistxattr /* 230 */ + .long sys_llistxattr /* 230 */ .long sys_flistxattr .long sys_removexattr .long sys_lremovexattr .long sys_fremovexattr - .long sys_futex /* 235 */ + .long sys_futex /* 235 */ .long sys_sendfile64 - .long sys_mincore - .long sys_madvise + .long sys_ni_syscall /* sys_mincore */ + .long sys_ni_syscall /* sys_madvise */ .long sys_fcntl64 - .long sys_readahead /* 240 */ + .long sys_readahead /* 240 */ .long sys_io_setup .long sys_io_destroy .long sys_io_getevents .long sys_io_submit - .long sys_io_cancel /* 245 */ + .long sys_io_cancel /* 245 */ .long sys_fadvise64 .long sys_exit_group .long sys_lookup_dcookie .long sys_epoll_create - .long sys_epoll_ctl /* 250 */ + .long sys_epoll_ctl /* 250 */ .long sys_epoll_wait - .long sys_remap_file_pages + .long sys_ni_syscall /* sys_remap_file_pages */ .long sys_set_tid_address .long sys_timer_create - .long sys_timer_settime /* 255 */ + .long sys_timer_settime /* 255 */ .long sys_timer_gettime .long sys_timer_getoverrun .long sys_timer_delete .long sys_clock_settime - .long sys_clock_gettime /* 260 */ + .long sys_clock_gettime /* 260 */ .long sys_clock_getres .long sys_clock_nanosleep .long sys_statfs64 .long sys_fstatfs64 - .long sys_tgkill /* 265 */ + .long sys_tgkill /* 265 */ .long sys_utimes .long sys_fadvise64_64 - .long sys_mbind + .long sys_mbind .long sys_get_mempolicy - .long sys_set_mempolicy /* 270 */ + .long sys_set_mempolicy /* 270 */ .long sys_mq_open .long sys_mq_unlink .long sys_mq_timedsend .long sys_mq_timedreceive - .long sys_mq_notify /* 275 */ + .long sys_mq_notify /* 275 */ .long sys_mq_getsetattr .long sys_waitid - .long sys_ni_syscall /* for sys_vserver */ + .long sys_ni_syscall /* for sys_vserver */ .long sys_add_key - .long sys_request_key /* 280 */ + .long sys_request_key /* 280 */ .long sys_keyctl .long sys_ioprio_set .long sys_ioprio_get @@ -322,8 +319,8 @@ ENTRY(sys_call_table) .long sys_readlinkat .long sys_fchmodat .long sys_faccessat /* 300 */ - .long sys_pselect6 - .long sys_ppoll + .long sys_ni_syscall /* Reserved for pselect6 */ + .long sys_ni_syscall /* Reserved for ppoll */ .long sys_unshare .long sys_set_robust_list .long sys_get_robust_list /* 305 */ @@ -366,3 +363,7 @@ ENTRY(sys_call_table) .long sys_clock_adjtime .long sys_syncfs + .rept NR_syscalls-(.-sys_call_table)/4 + .long sys_ni_syscall + .endr + diff --git a/trunk/arch/m68k/mm/motorola.c b/trunk/arch/m68k/mm/motorola.c index 8b3db1c587fc..02b7a03e4226 100644 --- a/trunk/arch/m68k/mm/motorola.c +++ b/trunk/arch/m68k/mm/motorola.c @@ -300,8 +300,6 @@ void __init paging_init(void) zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; free_area_init_node(i, zones_size, m68k_memory[i].addr >> PAGE_SHIFT, NULL); - if (node_present_pages(i)) - node_set_state(i, N_NORMAL_MEMORY); } } diff --git a/trunk/arch/microblaze/kernel/timer.c b/trunk/arch/microblaze/kernel/timer.c index e5550ce4e0eb..d8a214f11ac2 100644 --- a/trunk/arch/microblaze/kernel/timer.c +++ b/trunk/arch/microblaze/kernel/timer.c @@ -217,12 +217,16 @@ static struct clocksource clocksource_microblaze = { .rating = 300, .read = microblaze_read, .mask = CLOCKSOURCE_MASK(32), + .shift = 8, /* I can shift it */ .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static int __init microblaze_clocksource_init(void) { - if (clocksource_register_hz(&clocksource_microblaze, timer_clock_freq)) + clocksource_microblaze.mult = + clocksource_hz2mult(timer_clock_freq, + clocksource_microblaze.shift); + if (clocksource_register(&clocksource_microblaze)) panic("failed to register clocksource"); /* stop timer1 */ diff --git a/trunk/arch/mips/Kbuild.platforms b/trunk/arch/mips/Kbuild.platforms index aef6c917b45a..7ff9b5492041 100644 --- a/trunk/arch/mips/Kbuild.platforms +++ b/trunk/arch/mips/Kbuild.platforms @@ -11,7 +11,6 @@ platforms += dec platforms += emma platforms += jazz platforms += jz4740 -platforms += lantiq platforms += lasat platforms += loongson platforms += mipssim diff --git a/trunk/arch/mips/Kconfig b/trunk/arch/mips/Kconfig index cef1a854487d..8e256cc5dcd9 100644 --- a/trunk/arch/mips/Kconfig +++ b/trunk/arch/mips/Kconfig @@ -212,24 +212,6 @@ config MACH_JZ4740 select HAVE_PWM select HAVE_CLK -config LANTIQ - bool "Lantiq based platforms" - select DMA_NONCOHERENT - select IRQ_CPU - select CEVT_R4K - select CSRC_R4K - select SYS_HAS_CPU_MIPS32_R1 - select SYS_HAS_CPU_MIPS32_R2 - select SYS_SUPPORTS_BIG_ENDIAN - select SYS_SUPPORTS_32BIT_KERNEL - select SYS_SUPPORTS_MULTITHREADING - select SYS_HAS_EARLY_PRINTK - select ARCH_REQUIRE_GPIOLIB - select SWAP_IO_SPACE - select BOOT_RAW - select HAVE_CLK - select MIPS_MACHINE - config LASAT bool "LASAT Networks platforms" select CEVT_R4K @@ -754,33 +736,6 @@ config CAVIUM_OCTEON_REFERENCE_BOARD Hikari Say Y here for most Octeon reference boards. -config NLM_XLR_BOARD - bool "Netlogic XLR/XLS based systems" - depends on EXPERIMENTAL - select BOOT_ELF32 - select NLM_COMMON - select NLM_XLR - select SYS_HAS_CPU_XLR - select SYS_SUPPORTS_SMP - select HW_HAS_PCI - select SWAP_IO_SPACE - select SYS_SUPPORTS_32BIT_KERNEL - select SYS_SUPPORTS_64BIT_KERNEL - select 64BIT_PHYS_ADDR - select SYS_SUPPORTS_BIG_ENDIAN - select SYS_SUPPORTS_HIGHMEM - select DMA_COHERENT - select NR_CPUS_DEFAULT_32 - select CEVT_R4K - select CSRC_R4K - select IRQ_CPU - select ZONE_DMA if 64BIT - select SYNC_R4K - select SYS_HAS_EARLY_PRINTK - help - Support for systems based on Netlogic XLR and XLS processors. - Say Y here if you have a XLR or XLS based board. - endchoice source "arch/mips/alchemy/Kconfig" @@ -788,7 +743,6 @@ source "arch/mips/ath79/Kconfig" source "arch/mips/bcm63xx/Kconfig" source "arch/mips/jazz/Kconfig" source "arch/mips/jz4740/Kconfig" -source "arch/mips/lantiq/Kconfig" source "arch/mips/lasat/Kconfig" source "arch/mips/pmc-sierra/Kconfig" source "arch/mips/powertv/Kconfig" @@ -798,7 +752,6 @@ source "arch/mips/txx9/Kconfig" source "arch/mips/vr41xx/Kconfig" source "arch/mips/cavium-octeon/Kconfig" source "arch/mips/loongson/Kconfig" -source "arch/mips/netlogic/Kconfig" endmenu @@ -1044,6 +997,9 @@ config IRQ_GT641XX config IRQ_GIC bool +config IRQ_CPU_OCTEON + bool + config MIPS_BOARDS_GEN bool @@ -1403,6 +1359,8 @@ config CPU_SB1 config CPU_CAVIUM_OCTEON bool "Cavium Octeon processor" depends on SYS_HAS_CPU_CAVIUM_OCTEON + select IRQ_CPU + select IRQ_CPU_OCTEON select CPU_HAS_PREFETCH select CPU_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_SMP @@ -1467,17 +1425,6 @@ config CPU_BMIPS5000 help Broadcom BMIPS5000 processors. -config CPU_XLR - bool "Netlogic XLR SoC" - depends on SYS_HAS_CPU_XLR - select CPU_SUPPORTS_32BIT_KERNEL - select CPU_SUPPORTS_64BIT_KERNEL - select CPU_SUPPORTS_HIGHMEM - select WEAK_ORDERING - select WEAK_REORDERING_BEYOND_LLSC - select CPU_SUPPORTS_HUGEPAGES - help - Netlogic Microsystems XLR/XLS processors. endchoice if CPU_LOONGSON2F @@ -1608,9 +1555,6 @@ config SYS_HAS_CPU_BMIPS4380 config SYS_HAS_CPU_BMIPS5000 bool -config SYS_HAS_CPU_XLR - bool - # # CPU may reorder R->R, R->W, W->R, W->W # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC @@ -2395,7 +2339,6 @@ config MMU config I8253 bool - select CLKSRC_I8253 select MIPS_EXTERNAL_TIMER config ZONE_DMA32 diff --git a/trunk/arch/mips/Makefile b/trunk/arch/mips/Makefile index 884819cd0607..53e3514ba10e 100644 --- a/trunk/arch/mips/Makefile +++ b/trunk/arch/mips/Makefile @@ -191,18 +191,6 @@ endif # include $(srctree)/arch/mips/Kbuild.platforms -# -# NETLOGIC SOC Common (common) -# -cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/mach-netlogic -cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic - -# -# NETLOGIC XLR/XLS SoC, Simulator and boards -# -core-$(CONFIG_NLM_XLR) += arch/mips/netlogic/xlr/ -load-$(CONFIG_NLM_XLR_BOARD) += 0xffffffff84000000 - cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic drivers-$(CONFIG_PCI) += arch/mips/pci/ diff --git a/trunk/arch/mips/alchemy/common/dbdma.c b/trunk/arch/mips/alchemy/common/dbdma.c index 3a5abb54d505..ca0506a8585a 100644 --- a/trunk/arch/mips/alchemy/common/dbdma.c +++ b/trunk/arch/mips/alchemy/common/dbdma.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include #include @@ -58,8 +58,7 @@ static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock); /* I couldn't find a macro that did this... */ #define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1)) -static dbdma_global_t *dbdma_gptr = - (dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); +static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE; static int dbdma_initialized; static dbdev_tab_t dbdev_tab[] = { @@ -300,7 +299,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, if (ctp != NULL) { memset(ctp, 0, sizeof(chan_tab_t)); ctp->chan_index = chan = i; - dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR); + dcp = DDMA_CHANNEL_BASE; dcp += (0x0100 * chan); ctp->chan_ptr = (au1x_dma_chan_t *)dcp; cp = (au1x_dma_chan_t *)dcp; @@ -959,75 +958,105 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr) } -static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6]; +struct alchemy_dbdma_sysdev { + struct sys_device sysdev; + u32 pm_regs[NUM_DBDMA_CHANS + 1][6]; +}; -static int alchemy_dbdma_suspend(void) +static int alchemy_dbdma_suspend(struct sys_device *dev, + pm_message_t state) { + struct alchemy_dbdma_sysdev *sdev = + container_of(dev, struct alchemy_dbdma_sysdev, sysdev); int i; - void __iomem *addr; + u32 addr; - addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); - alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00); - alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04); - alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08); - alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c); + addr = DDMA_GLOBAL_BASE; + sdev->pm_regs[0][0] = au_readl(addr + 0x00); + sdev->pm_regs[0][1] = au_readl(addr + 0x04); + sdev->pm_regs[0][2] = au_readl(addr + 0x08); + sdev->pm_regs[0][3] = au_readl(addr + 0x0c); /* save channel configurations */ - addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR); - for (i = 1; i <= NUM_DBDMA_CHANS; i++) { - alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00); - alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04); - alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08); - alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c); - alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10); - alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14); + for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) { + sdev->pm_regs[i][0] = au_readl(addr + 0x00); + sdev->pm_regs[i][1] = au_readl(addr + 0x04); + sdev->pm_regs[i][2] = au_readl(addr + 0x08); + sdev->pm_regs[i][3] = au_readl(addr + 0x0c); + sdev->pm_regs[i][4] = au_readl(addr + 0x10); + sdev->pm_regs[i][5] = au_readl(addr + 0x14); /* halt channel */ - __raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00); - wmb(); - while (!(__raw_readl(addr + 0x14) & 1)) - wmb(); + au_writel(sdev->pm_regs[i][0] & ~1, addr + 0x00); + au_sync(); + while (!(au_readl(addr + 0x14) & 1)) + au_sync(); addr += 0x100; /* next channel base */ } /* disable channel interrupts */ - addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); - __raw_writel(0, addr + 0x0c); - wmb(); + au_writel(0, DDMA_GLOBAL_BASE + 0x0c); + au_sync(); return 0; } -static void alchemy_dbdma_resume(void) +static int alchemy_dbdma_resume(struct sys_device *dev) { + struct alchemy_dbdma_sysdev *sdev = + container_of(dev, struct alchemy_dbdma_sysdev, sysdev); int i; - void __iomem *addr; + u32 addr; - addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); - __raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00); - __raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04); - __raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08); - __raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c); + addr = DDMA_GLOBAL_BASE; + au_writel(sdev->pm_regs[0][0], addr + 0x00); + au_writel(sdev->pm_regs[0][1], addr + 0x04); + au_writel(sdev->pm_regs[0][2], addr + 0x08); + au_writel(sdev->pm_regs[0][3], addr + 0x0c); /* restore channel configurations */ - addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR); - for (i = 1; i <= NUM_DBDMA_CHANS; i++) { - __raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00); - __raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04); - __raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08); - __raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c); - __raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10); - __raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14); - wmb(); + for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) { + au_writel(sdev->pm_regs[i][0], addr + 0x00); + au_writel(sdev->pm_regs[i][1], addr + 0x04); + au_writel(sdev->pm_regs[i][2], addr + 0x08); + au_writel(sdev->pm_regs[i][3], addr + 0x0c); + au_writel(sdev->pm_regs[i][4], addr + 0x10); + au_writel(sdev->pm_regs[i][5], addr + 0x14); + au_sync(); addr += 0x100; /* next channel base */ } + + return 0; } -static struct syscore_ops alchemy_dbdma_syscore_ops = { +static struct sysdev_class alchemy_dbdma_sysdev_class = { + .name = "dbdma", .suspend = alchemy_dbdma_suspend, .resume = alchemy_dbdma_resume, }; +static int __init alchemy_dbdma_sysdev_init(void) +{ + struct alchemy_dbdma_sysdev *sdev; + int ret; + + ret = sysdev_class_register(&alchemy_dbdma_sysdev_class); + if (ret) + return ret; + + sdev = kzalloc(sizeof(struct alchemy_dbdma_sysdev), GFP_KERNEL); + if (!sdev) + return -ENOMEM; + + sdev->sysdev.id = -1; + sdev->sysdev.cls = &alchemy_dbdma_sysdev_class; + ret = sysdev_register(&sdev->sysdev); + if (ret) + kfree(sdev); + + return ret; +} + static int __init au1xxx_dbdma_init(void) { int irq_nr, ret; @@ -1055,7 +1084,11 @@ static int __init au1xxx_dbdma_init(void) else { dbdma_initialized = 1; printk(KERN_INFO "Alchemy DBDMA initialized\n"); - register_syscore_ops(&alchemy_dbdma_syscore_ops); + ret = alchemy_dbdma_sysdev_init(); + if (ret) { + printk(KERN_ERR "DBDMA PM init failed\n"); + ret = 0; + } } return ret; diff --git a/trunk/arch/mips/alchemy/common/dma.c b/trunk/arch/mips/alchemy/common/dma.c index 347980e79a89..d5278877891d 100644 --- a/trunk/arch/mips/alchemy/common/dma.c +++ b/trunk/arch/mips/alchemy/common/dma.c @@ -58,9 +58,6 @@ * returned from request_dma. */ -/* DMA Channel register block spacing */ -#define DMA_CHANNEL_LEN 0x00000100 - DEFINE_SPINLOCK(au1000_dma_spin_lock); struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = { @@ -80,23 +77,22 @@ static const struct dma_dev { unsigned int fifo_addr; unsigned int dma_mode; } dma_dev_table[DMA_NUM_DEV] = { - { AU1000_UART0_PHYS_ADDR + 0x04, DMA_DW8 }, /* UART0_TX */ - { AU1000_UART0_PHYS_ADDR + 0x00, DMA_DW8 | DMA_DR }, /* UART0_RX */ - { 0, 0 }, /* DMA_REQ0 */ - { 0, 0 }, /* DMA_REQ1 */ - { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 }, /* AC97 TX c */ - { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 | DMA_DR }, /* AC97 RX c */ - { AU1000_UART3_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* UART3_TX */ - { AU1000_UART3_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* UART3_RX */ - { AU1000_USBD_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */ - { AU1000_USBD_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */ - { AU1000_USBD_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */ - { AU1000_USBD_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */ - { AU1000_USBD_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */ - { AU1000_USBD_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */ - /* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */ - { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC}, /* I2S TX */ - { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC | DMA_DR}, /* I2S RX */ + {UART0_ADDR + UART_TX, 0}, + {UART0_ADDR + UART_RX, 0}, + {0, 0}, + {0, 0}, + {AC97C_DATA, DMA_DW16 }, /* coherent */ + {AC97C_DATA, DMA_DR | DMA_DW16 }, /* coherent */ + {UART3_ADDR + UART_TX, DMA_DW8 | DMA_NC}, + {UART3_ADDR + UART_RX, DMA_DR | DMA_DW8 | DMA_NC}, + {USBD_EP0RD, DMA_DR | DMA_DW8 | DMA_NC}, + {USBD_EP0WR, DMA_DW8 | DMA_NC}, + {USBD_EP2WR, DMA_DW8 | DMA_NC}, + {USBD_EP3WR, DMA_DW8 | DMA_NC}, + {USBD_EP4RD, DMA_DR | DMA_DW8 | DMA_NC}, + {USBD_EP5RD, DMA_DR | DMA_DW8 | DMA_NC}, + {I2S_DATA, DMA_DW32 | DMA_NC}, + {I2S_DATA, DMA_DR | DMA_DW32 | DMA_NC} }; int au1000_dma_read_proc(char *buf, char **start, off_t fpos, @@ -127,10 +123,10 @@ int au1000_dma_read_proc(char *buf, char **start, off_t fpos, /* Device FIFO addresses and default DMA modes - 2nd bank */ static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = { - { AU1100_SD0_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */ - { AU1100_SD0_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR }, /* coherent */ - { AU1100_SD1_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */ - { AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR } /* coherent */ + { SD0_XMIT_FIFO, DMA_DS | DMA_DW8 }, /* coherent */ + { SD0_RECV_FIFO, DMA_DS | DMA_DR | DMA_DW8 }, /* coherent */ + { SD1_XMIT_FIFO, DMA_DS | DMA_DW8 }, /* coherent */ + { SD1_RECV_FIFO, DMA_DS | DMA_DR | DMA_DW8 } /* coherent */ }; void dump_au1000_dma_channel(unsigned int dmanr) @@ -206,7 +202,7 @@ int request_au1000_dma(int dev_id, const char *dev_str, } /* fill it in */ - chan->io = KSEG1ADDR(AU1000_DMA_PHYS_ADDR) + i * DMA_CHANNEL_LEN; + chan->io = DMA_CHANNEL_BASE + i * DMA_CHANNEL_LEN; chan->dev_id = dev_id; chan->dev_str = dev_str; chan->fifo_addr = dev->fifo_addr; diff --git a/trunk/arch/mips/alchemy/common/irq.c b/trunk/arch/mips/alchemy/common/irq.c index 8b60ba0675e2..55dd7c888517 100644 --- a/trunk/arch/mips/alchemy/common/irq.c +++ b/trunk/arch/mips/alchemy/common/irq.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include @@ -39,36 +39,6 @@ #include #endif -/* Interrupt Controller register offsets */ -#define IC_CFG0RD 0x40 -#define IC_CFG0SET 0x40 -#define IC_CFG0CLR 0x44 -#define IC_CFG1RD 0x48 -#define IC_CFG1SET 0x48 -#define IC_CFG1CLR 0x4C -#define IC_CFG2RD 0x50 -#define IC_CFG2SET 0x50 -#define IC_CFG2CLR 0x54 -#define IC_REQ0INT 0x54 -#define IC_SRCRD 0x58 -#define IC_SRCSET 0x58 -#define IC_SRCCLR 0x5C -#define IC_REQ1INT 0x5C -#define IC_ASSIGNRD 0x60 -#define IC_ASSIGNSET 0x60 -#define IC_ASSIGNCLR 0x64 -#define IC_WAKERD 0x68 -#define IC_WAKESET 0x68 -#define IC_WAKECLR 0x6C -#define IC_MASKRD 0x70 -#define IC_MASKSET 0x70 -#define IC_MASKCLR 0x74 -#define IC_RISINGRD 0x78 -#define IC_RISINGCLR 0x78 -#define IC_FALLINGRD 0x7C -#define IC_FALLINGCLR 0x7C -#define IC_TESTBIT 0x80 - static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type); /* NOTE on interrupt priorities: The original writers of this code said: @@ -251,101 +221,89 @@ struct au1xxx_irqmap au1200_irqmap[] __initdata = { static void au1x_ic0_unmask(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); - - __raw_writel(1 << bit, base + IC_MASKSET); - __raw_writel(1 << bit, base + IC_WAKESET); - wmb(); + au_writel(1 << bit, IC0_MASKSET); + au_writel(1 << bit, IC0_WAKESET); + au_sync(); } static void au1x_ic1_unmask(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); - - __raw_writel(1 << bit, base + IC_MASKSET); - __raw_writel(1 << bit, base + IC_WAKESET); + au_writel(1 << bit, IC1_MASKSET); + au_writel(1 << bit, IC1_WAKESET); /* very hacky. does the pb1000 cpld auto-disable this int? * nowhere in the current kernel sources is it disabled. --mlau */ #if defined(CONFIG_MIPS_PB1000) if (d->irq == AU1000_GPIO15_INT) - __raw_writel(0x4000, (void __iomem *)PB1000_MDR); /* enable int */ + au_writel(0x4000, PB1000_MDR); /* enable int */ #endif - wmb(); + au_sync(); } static void au1x_ic0_mask(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); - - __raw_writel(1 << bit, base + IC_MASKCLR); - __raw_writel(1 << bit, base + IC_WAKECLR); - wmb(); + au_writel(1 << bit, IC0_MASKCLR); + au_writel(1 << bit, IC0_WAKECLR); + au_sync(); } static void au1x_ic1_mask(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); - - __raw_writel(1 << bit, base + IC_MASKCLR); - __raw_writel(1 << bit, base + IC_WAKECLR); - wmb(); + au_writel(1 << bit, IC1_MASKCLR); + au_writel(1 << bit, IC1_WAKECLR); + au_sync(); } static void au1x_ic0_ack(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); /* * This may assume that we don't get interrupts from * both edges at once, or if we do, that we don't care. */ - __raw_writel(1 << bit, base + IC_FALLINGCLR); - __raw_writel(1 << bit, base + IC_RISINGCLR); - wmb(); + au_writel(1 << bit, IC0_FALLINGCLR); + au_writel(1 << bit, IC0_RISINGCLR); + au_sync(); } static void au1x_ic1_ack(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); /* * This may assume that we don't get interrupts from * both edges at once, or if we do, that we don't care. */ - __raw_writel(1 << bit, base + IC_FALLINGCLR); - __raw_writel(1 << bit, base + IC_RISINGCLR); - wmb(); + au_writel(1 << bit, IC1_FALLINGCLR); + au_writel(1 << bit, IC1_RISINGCLR); + au_sync(); } static void au1x_ic0_maskack(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); - __raw_writel(1 << bit, base + IC_WAKECLR); - __raw_writel(1 << bit, base + IC_MASKCLR); - __raw_writel(1 << bit, base + IC_RISINGCLR); - __raw_writel(1 << bit, base + IC_FALLINGCLR); - wmb(); + au_writel(1 << bit, IC0_WAKECLR); + au_writel(1 << bit, IC0_MASKCLR); + au_writel(1 << bit, IC0_RISINGCLR); + au_writel(1 << bit, IC0_FALLINGCLR); + au_sync(); } static void au1x_ic1_maskack(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); - __raw_writel(1 << bit, base + IC_WAKECLR); - __raw_writel(1 << bit, base + IC_MASKCLR); - __raw_writel(1 << bit, base + IC_RISINGCLR); - __raw_writel(1 << bit, base + IC_FALLINGCLR); - wmb(); + au_writel(1 << bit, IC1_WAKECLR); + au_writel(1 << bit, IC1_MASKCLR); + au_writel(1 << bit, IC1_RISINGCLR); + au_writel(1 << bit, IC1_FALLINGCLR); + au_sync(); } static int au1x_ic1_setwake(struct irq_data *d, unsigned int on) @@ -360,13 +318,13 @@ static int au1x_ic1_setwake(struct irq_data *d, unsigned int on) return -EINVAL; local_irq_save(flags); - wakemsk = __raw_readl((void __iomem *)SYS_WAKEMSK); + wakemsk = au_readl(SYS_WAKEMSK); if (on) wakemsk |= 1 << bit; else wakemsk &= ~(1 << bit); - __raw_writel(wakemsk, (void __iomem *)SYS_WAKEMSK); - wmb(); + au_writel(wakemsk, SYS_WAKEMSK); + au_sync(); local_irq_restore(flags); return 0; @@ -398,74 +356,81 @@ static struct irq_chip au1x_ic1_chip = { static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type) { struct irq_chip *chip; - unsigned int bit, irq = d->irq; + unsigned long icr[6]; + unsigned int bit, ic, irq = d->irq; irq_flow_handler_t handler = NULL; unsigned char *name = NULL; - void __iomem *base; int ret; if (irq >= AU1000_INTC1_INT_BASE) { bit = irq - AU1000_INTC1_INT_BASE; chip = &au1x_ic1_chip; - base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); + ic = 1; } else { bit = irq - AU1000_INTC0_INT_BASE; chip = &au1x_ic0_chip; - base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); + ic = 0; } if (bit > 31) return -EINVAL; + icr[0] = ic ? IC1_CFG0SET : IC0_CFG0SET; + icr[1] = ic ? IC1_CFG1SET : IC0_CFG1SET; + icr[2] = ic ? IC1_CFG2SET : IC0_CFG2SET; + icr[3] = ic ? IC1_CFG0CLR : IC0_CFG0CLR; + icr[4] = ic ? IC1_CFG1CLR : IC0_CFG1CLR; + icr[5] = ic ? IC1_CFG2CLR : IC0_CFG2CLR; + ret = 0; switch (flow_type) { /* cfgregs 2:1:0 */ case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */ - __raw_writel(1 << bit, base + IC_CFG2CLR); - __raw_writel(1 << bit, base + IC_CFG1CLR); - __raw_writel(1 << bit, base + IC_CFG0SET); + au_writel(1 << bit, icr[5]); + au_writel(1 << bit, icr[4]); + au_writel(1 << bit, icr[0]); handler = handle_edge_irq; name = "riseedge"; break; case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */ - __raw_writel(1 << bit, base + IC_CFG2CLR); - __raw_writel(1 << bit, base + IC_CFG1SET); - __raw_writel(1 << bit, base + IC_CFG0CLR); + au_writel(1 << bit, icr[5]); + au_writel(1 << bit, icr[1]); + au_writel(1 << bit, icr[3]); handler = handle_edge_irq; name = "falledge"; break; case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */ - __raw_writel(1 << bit, base + IC_CFG2CLR); - __raw_writel(1 << bit, base + IC_CFG1SET); - __raw_writel(1 << bit, base + IC_CFG0SET); + au_writel(1 << bit, icr[5]); + au_writel(1 << bit, icr[1]); + au_writel(1 << bit, icr[0]); handler = handle_edge_irq; name = "bothedge"; break; case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */ - __raw_writel(1 << bit, base + IC_CFG2SET); - __raw_writel(1 << bit, base + IC_CFG1CLR); - __raw_writel(1 << bit, base + IC_CFG0SET); + au_writel(1 << bit, icr[2]); + au_writel(1 << bit, icr[4]); + au_writel(1 << bit, icr[0]); handler = handle_level_irq; name = "hilevel"; break; case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */ - __raw_writel(1 << bit, base + IC_CFG2SET); - __raw_writel(1 << bit, base + IC_CFG1SET); - __raw_writel(1 << bit, base + IC_CFG0CLR); + au_writel(1 << bit, icr[2]); + au_writel(1 << bit, icr[1]); + au_writel(1 << bit, icr[3]); handler = handle_level_irq; name = "lowlevel"; break; case IRQ_TYPE_NONE: /* 0:0:0 */ - __raw_writel(1 << bit, base + IC_CFG2CLR); - __raw_writel(1 << bit, base + IC_CFG1CLR); - __raw_writel(1 << bit, base + IC_CFG0CLR); + au_writel(1 << bit, icr[5]); + au_writel(1 << bit, icr[4]); + au_writel(1 << bit, icr[3]); break; default: ret = -EINVAL; } __irq_set_chip_handler_name_locked(d->irq, chip, handler, name); - wmb(); + au_sync(); return ret; } @@ -479,21 +444,21 @@ asmlinkage void plat_irq_dispatch(void) off = MIPS_CPU_IRQ_BASE + 7; goto handle; } else if (pending & CAUSEF_IP2) { - s = KSEG1ADDR(AU1000_IC0_PHYS_ADDR) + IC_REQ0INT; + s = IC0_REQ0INT; off = AU1000_INTC0_INT_BASE; } else if (pending & CAUSEF_IP3) { - s = KSEG1ADDR(AU1000_IC0_PHYS_ADDR) + IC_REQ1INT; + s = IC0_REQ1INT; off = AU1000_INTC0_INT_BASE; } else if (pending & CAUSEF_IP4) { - s = KSEG1ADDR(AU1000_IC1_PHYS_ADDR) + IC_REQ0INT; + s = IC1_REQ0INT; off = AU1000_INTC1_INT_BASE; } else if (pending & CAUSEF_IP5) { - s = KSEG1ADDR(AU1000_IC1_PHYS_ADDR) + IC_REQ1INT; + s = IC1_REQ1INT; off = AU1000_INTC1_INT_BASE; } else goto spurious; - s = __raw_readl((void __iomem *)s); + s = au_readl(s); if (unlikely(!s)) { spurious: spurious_interrupt(); @@ -504,42 +469,48 @@ asmlinkage void plat_irq_dispatch(void) do_IRQ(off); } - -static inline void ic_init(void __iomem *base) -{ - /* initialize interrupt controller to a safe state */ - __raw_writel(0xffffffff, base + IC_CFG0CLR); - __raw_writel(0xffffffff, base + IC_CFG1CLR); - __raw_writel(0xffffffff, base + IC_CFG2CLR); - __raw_writel(0xffffffff, base + IC_MASKCLR); - __raw_writel(0xffffffff, base + IC_ASSIGNCLR); - __raw_writel(0xffffffff, base + IC_WAKECLR); - __raw_writel(0xffffffff, base + IC_SRCSET); - __raw_writel(0xffffffff, base + IC_FALLINGCLR); - __raw_writel(0xffffffff, base + IC_RISINGCLR); - __raw_writel(0x00000000, base + IC_TESTBIT); - wmb(); -} - static void __init au1000_init_irq(struct au1xxx_irqmap *map) { unsigned int bit, irq_nr; - void __iomem *base; + int i; + + /* + * Initialize interrupt controllers to a safe state. + */ + au_writel(0xffffffff, IC0_CFG0CLR); + au_writel(0xffffffff, IC0_CFG1CLR); + au_writel(0xffffffff, IC0_CFG2CLR); + au_writel(0xffffffff, IC0_MASKCLR); + au_writel(0xffffffff, IC0_ASSIGNCLR); + au_writel(0xffffffff, IC0_WAKECLR); + au_writel(0xffffffff, IC0_SRCSET); + au_writel(0xffffffff, IC0_FALLINGCLR); + au_writel(0xffffffff, IC0_RISINGCLR); + au_writel(0x00000000, IC0_TESTBIT); + + au_writel(0xffffffff, IC1_CFG0CLR); + au_writel(0xffffffff, IC1_CFG1CLR); + au_writel(0xffffffff, IC1_CFG2CLR); + au_writel(0xffffffff, IC1_MASKCLR); + au_writel(0xffffffff, IC1_ASSIGNCLR); + au_writel(0xffffffff, IC1_WAKECLR); + au_writel(0xffffffff, IC1_SRCSET); + au_writel(0xffffffff, IC1_FALLINGCLR); + au_writel(0xffffffff, IC1_RISINGCLR); + au_writel(0x00000000, IC1_TESTBIT); - ic_init((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR)); - ic_init((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR)); mips_cpu_irq_init(); /* register all 64 possible IC0+IC1 irq sources as type "none". * Use set_irq_type() to set edge/level behaviour at runtime. */ - for (irq_nr = AU1000_INTC0_INT_BASE; - (irq_nr < AU1000_INTC0_INT_BASE + 32); irq_nr++) - au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE); + for (i = AU1000_INTC0_INT_BASE; + (i < AU1000_INTC0_INT_BASE + 32); i++) + au1x_ic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE); - for (irq_nr = AU1000_INTC1_INT_BASE; - (irq_nr < AU1000_INTC1_INT_BASE + 32); irq_nr++) - au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE); + for (i = AU1000_INTC1_INT_BASE; + (i < AU1000_INTC1_INT_BASE + 32); i++) + au1x_ic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE); /* * Initialize IC0, which is fixed per processor. @@ -549,13 +520,13 @@ static void __init au1000_init_irq(struct au1xxx_irqmap *map) if (irq_nr >= AU1000_INTC1_INT_BASE) { bit = irq_nr - AU1000_INTC1_INT_BASE; - base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); + if (map->im_request) + au_writel(1 << bit, IC1_ASSIGNSET); } else { bit = irq_nr - AU1000_INTC0_INT_BASE; - base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); + if (map->im_request) + au_writel(1 << bit, IC0_ASSIGNSET); } - if (map->im_request) - __raw_writel(1 << bit, base + IC_ASSIGNSET); au1x_ic_settype(irq_get_irq_data(irq_nr), map->im_type); ++map; @@ -585,62 +556,90 @@ void __init arch_init_irq(void) } } +struct alchemy_ic_sysdev { + struct sys_device sysdev; + void __iomem *base; + unsigned long pmdata[7]; +}; -static unsigned long alchemy_ic_pmdata[7 * 2]; - -static inline void alchemy_ic_suspend_one(void __iomem *base, unsigned long *d) +static int alchemy_ic_suspend(struct sys_device *dev, pm_message_t state) { - d[0] = __raw_readl(base + IC_CFG0RD); - d[1] = __raw_readl(base + IC_CFG1RD); - d[2] = __raw_readl(base + IC_CFG2RD); - d[3] = __raw_readl(base + IC_SRCRD); - d[4] = __raw_readl(base + IC_ASSIGNRD); - d[5] = __raw_readl(base + IC_WAKERD); - d[6] = __raw_readl(base + IC_MASKRD); - ic_init(base); /* shut it up too while at it */ + struct alchemy_ic_sysdev *icdev = + container_of(dev, struct alchemy_ic_sysdev, sysdev); + + icdev->pmdata[0] = __raw_readl(icdev->base + IC_CFG0RD); + icdev->pmdata[1] = __raw_readl(icdev->base + IC_CFG1RD); + icdev->pmdata[2] = __raw_readl(icdev->base + IC_CFG2RD); + icdev->pmdata[3] = __raw_readl(icdev->base + IC_SRCRD); + icdev->pmdata[4] = __raw_readl(icdev->base + IC_ASSIGNRD); + icdev->pmdata[5] = __raw_readl(icdev->base + IC_WAKERD); + icdev->pmdata[6] = __raw_readl(icdev->base + IC_MASKRD); + + return 0; } -static inline void alchemy_ic_resume_one(void __iomem *base, unsigned long *d) +static int alchemy_ic_resume(struct sys_device *dev) { - ic_init(base); - - __raw_writel(d[0], base + IC_CFG0SET); - __raw_writel(d[1], base + IC_CFG1SET); - __raw_writel(d[2], base + IC_CFG2SET); - __raw_writel(d[3], base + IC_SRCSET); - __raw_writel(d[4], base + IC_ASSIGNSET); - __raw_writel(d[5], base + IC_WAKESET); + struct alchemy_ic_sysdev *icdev = + container_of(dev, struct alchemy_ic_sysdev, sysdev); + + __raw_writel(0xffffffff, icdev->base + IC_MASKCLR); + __raw_writel(0xffffffff, icdev->base + IC_CFG0CLR); + __raw_writel(0xffffffff, icdev->base + IC_CFG1CLR); + __raw_writel(0xffffffff, icdev->base + IC_CFG2CLR); + __raw_writel(0xffffffff, icdev->base + IC_SRCCLR); + __raw_writel(0xffffffff, icdev->base + IC_ASSIGNCLR); + __raw_writel(0xffffffff, icdev->base + IC_WAKECLR); + __raw_writel(0xffffffff, icdev->base + IC_RISINGCLR); + __raw_writel(0xffffffff, icdev->base + IC_FALLINGCLR); + __raw_writel(0x00000000, icdev->base + IC_TESTBIT); + wmb(); + __raw_writel(icdev->pmdata[0], icdev->base + IC_CFG0SET); + __raw_writel(icdev->pmdata[1], icdev->base + IC_CFG1SET); + __raw_writel(icdev->pmdata[2], icdev->base + IC_CFG2SET); + __raw_writel(icdev->pmdata[3], icdev->base + IC_SRCSET); + __raw_writel(icdev->pmdata[4], icdev->base + IC_ASSIGNSET); + __raw_writel(icdev->pmdata[5], icdev->base + IC_WAKESET); wmb(); - __raw_writel(d[6], base + IC_MASKSET); + __raw_writel(icdev->pmdata[6], icdev->base + IC_MASKSET); wmb(); -} -static int alchemy_ic_suspend(void) -{ - alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR), - alchemy_ic_pmdata); - alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR), - &alchemy_ic_pmdata[7]); return 0; } -static void alchemy_ic_resume(void) -{ - alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR), - &alchemy_ic_pmdata[7]); - alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR), - alchemy_ic_pmdata); -} - -static struct syscore_ops alchemy_ic_syscore_ops = { +static struct sysdev_class alchemy_ic_sysdev_class = { + .name = "ic", .suspend = alchemy_ic_suspend, .resume = alchemy_ic_resume, }; -static int __init alchemy_ic_pm_init(void) +static int __init alchemy_ic_sysdev_init(void) { - register_syscore_ops(&alchemy_ic_syscore_ops); + struct alchemy_ic_sysdev *icdev; + unsigned long icbase[2] = { IC0_PHYS_ADDR, IC1_PHYS_ADDR }; + int err, i; + + err = sysdev_class_register(&alchemy_ic_sysdev_class); + if (err) + return err; + + for (i = 0; i < 2; i++) { + icdev = kzalloc(sizeof(struct alchemy_ic_sysdev), GFP_KERNEL); + if (!icdev) + return -ENOMEM; + + icdev->base = ioremap(icbase[i], 0x1000); + + icdev->sysdev.id = i; + icdev->sysdev.cls = &alchemy_ic_sysdev_class; + err = sysdev_register(&icdev->sysdev); + if (err) { + kfree(icdev); + return err; + } + } + return 0; } -device_initcall(alchemy_ic_pm_init); +device_initcall(alchemy_ic_sysdev_init); diff --git a/trunk/arch/mips/alchemy/common/platform.c b/trunk/arch/mips/alchemy/common/platform.c index 3b2c18b14341..9e7814db3d03 100644 --- a/trunk/arch/mips/alchemy/common/platform.c +++ b/trunk/arch/mips/alchemy/common/platform.c @@ -13,10 +13,9 @@ #include #include -#include #include #include -#include +#include #include #include @@ -31,12 +30,21 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state, #ifdef CONFIG_SERIAL_8250 switch (state) { case 0: - alchemy_uart_enable(CPHYSADDR(port->membase)); + if ((__raw_readl(port->membase + UART_MOD_CNTRL) & 3) != 3) { + /* power-on sequence as suggested in the databooks */ + __raw_writel(0, port->membase + UART_MOD_CNTRL); + wmb(); + __raw_writel(1, port->membase + UART_MOD_CNTRL); + wmb(); + } + __raw_writel(3, port->membase + UART_MOD_CNTRL); /* full on */ + wmb(); serial8250_do_pm(port, state, old_state); break; case 3: /* power off */ serial8250_do_pm(port, state, old_state); - alchemy_uart_disable(CPHYSADDR(port->membase)); + __raw_writel(0, port->membase + UART_MOD_CNTRL); + wmb(); break; default: serial8250_do_pm(port, state, old_state); @@ -57,60 +65,38 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state, .pm = alchemy_8250_pm, \ } -static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = { - [ALCHEMY_CPU_AU1000] = { - PORT(AU1000_UART0_PHYS_ADDR, AU1000_UART0_INT), - PORT(AU1000_UART1_PHYS_ADDR, AU1000_UART1_INT), - PORT(AU1000_UART2_PHYS_ADDR, AU1000_UART2_INT), - PORT(AU1000_UART3_PHYS_ADDR, AU1000_UART3_INT), - }, - [ALCHEMY_CPU_AU1500] = { - PORT(AU1000_UART0_PHYS_ADDR, AU1500_UART0_INT), - PORT(AU1000_UART3_PHYS_ADDR, AU1500_UART3_INT), - }, - [ALCHEMY_CPU_AU1100] = { - PORT(AU1000_UART0_PHYS_ADDR, AU1100_UART0_INT), - PORT(AU1000_UART1_PHYS_ADDR, AU1100_UART1_INT), - PORT(AU1000_UART3_PHYS_ADDR, AU1100_UART3_INT), - }, - [ALCHEMY_CPU_AU1550] = { - PORT(AU1000_UART0_PHYS_ADDR, AU1550_UART0_INT), - PORT(AU1000_UART1_PHYS_ADDR, AU1550_UART1_INT), - PORT(AU1000_UART3_PHYS_ADDR, AU1550_UART3_INT), - }, - [ALCHEMY_CPU_AU1200] = { - PORT(AU1000_UART0_PHYS_ADDR, AU1200_UART0_INT), - PORT(AU1000_UART1_PHYS_ADDR, AU1200_UART1_INT), - }, +static struct plat_serial8250_port au1x00_uart_data[] = { +#if defined(CONFIG_SOC_AU1000) + PORT(UART0_PHYS_ADDR, AU1000_UART0_INT), + PORT(UART1_PHYS_ADDR, AU1000_UART1_INT), + PORT(UART2_PHYS_ADDR, AU1000_UART2_INT), + PORT(UART3_PHYS_ADDR, AU1000_UART3_INT), +#elif defined(CONFIG_SOC_AU1500) + PORT(UART0_PHYS_ADDR, AU1500_UART0_INT), + PORT(UART3_PHYS_ADDR, AU1500_UART3_INT), +#elif defined(CONFIG_SOC_AU1100) + PORT(UART0_PHYS_ADDR, AU1100_UART0_INT), + PORT(UART1_PHYS_ADDR, AU1100_UART1_INT), + PORT(UART3_PHYS_ADDR, AU1100_UART3_INT), +#elif defined(CONFIG_SOC_AU1550) + PORT(UART0_PHYS_ADDR, AU1550_UART0_INT), + PORT(UART1_PHYS_ADDR, AU1550_UART1_INT), + PORT(UART3_PHYS_ADDR, AU1550_UART3_INT), +#elif defined(CONFIG_SOC_AU1200) + PORT(UART0_PHYS_ADDR, AU1200_UART0_INT), + PORT(UART1_PHYS_ADDR, AU1200_UART1_INT), +#endif + { }, }; static struct platform_device au1xx0_uart_device = { .name = "serial8250", .id = PLAT8250_DEV_AU1X00, + .dev = { + .platform_data = au1x00_uart_data, + }, }; -static void __init alchemy_setup_uarts(int ctype) -{ - unsigned int uartclk = get_au1x00_uart_baud_base() * 16; - int s = sizeof(struct plat_serial8250_port); - int c = alchemy_get_uarts(ctype); - struct plat_serial8250_port *ports; - - ports = kzalloc(s * (c + 1), GFP_KERNEL); - if (!ports) { - printk(KERN_INFO "Alchemy: no memory for UART data\n"); - return; - } - memcpy(ports, au1x00_uart_data[ctype], s * c); - au1xx0_uart_device.dev.platform_data = ports; - - /* Fill up uartclk. */ - for (s = 0; s < c; s++) - ports[s].uartclk = uartclk; - if (platform_device_register(&au1xx0_uart_device)) - printk(KERN_INFO "Alchemy: failed to register UARTs\n"); -} - /* OHCI (USB full speed host controller) */ static struct resource au1xxx_usb_ohci_resources[] = { [0] = { @@ -283,8 +269,8 @@ extern struct au1xmmc_platform_data au1xmmc_platdata[2]; static struct resource au1200_mmc0_resources[] = { [0] = { - .start = AU1100_SD0_PHYS_ADDR, - .end = AU1100_SD0_PHYS_ADDR + 0xfff, + .start = SD0_PHYS_ADDR, + .end = SD0_PHYS_ADDR + 0x7ffff, .flags = IORESOURCE_MEM, }, [1] = { @@ -319,8 +305,8 @@ static struct platform_device au1200_mmc0_device = { #ifndef CONFIG_MIPS_DB1200 static struct resource au1200_mmc1_resources[] = { [0] = { - .start = AU1100_SD1_PHYS_ADDR, - .end = AU1100_SD1_PHYS_ADDR + 0xfff, + .start = SD1_PHYS_ADDR, + .end = SD1_PHYS_ADDR + 0x7ffff, .flags = IORESOURCE_MEM, }, [1] = { @@ -373,16 +359,15 @@ static struct platform_device pbdb_smbus_device = { #endif /* Macro to help defining the Ethernet MAC resources */ -#define MAC_RES_COUNT 3 /* MAC regs base, MAC enable reg, MAC INT */ #define MAC_RES(_base, _enable, _irq) \ { \ - .start = _base, \ - .end = _base + 0xffff, \ + .start = CPHYSADDR(_base), \ + .end = CPHYSADDR(_base + 0xffff), \ .flags = IORESOURCE_MEM, \ }, \ { \ - .start = _enable, \ - .end = _enable + 0x3, \ + .start = CPHYSADDR(_enable), \ + .end = CPHYSADDR(_enable + 0x3), \ .flags = IORESOURCE_MEM, \ }, \ { \ @@ -391,29 +376,19 @@ static struct platform_device pbdb_smbus_device = { .flags = IORESOURCE_IRQ \ } -static struct resource au1xxx_eth0_resources[][MAC_RES_COUNT] __initdata = { - [ALCHEMY_CPU_AU1000] = { - MAC_RES(AU1000_MAC0_PHYS_ADDR, - AU1000_MACEN_PHYS_ADDR, - AU1000_MAC0_DMA_INT) - }, - [ALCHEMY_CPU_AU1500] = { - MAC_RES(AU1500_MAC0_PHYS_ADDR, - AU1500_MACEN_PHYS_ADDR, - AU1500_MAC0_DMA_INT) - }, - [ALCHEMY_CPU_AU1100] = { - MAC_RES(AU1000_MAC0_PHYS_ADDR, - AU1000_MACEN_PHYS_ADDR, - AU1100_MAC0_DMA_INT) - }, - [ALCHEMY_CPU_AU1550] = { - MAC_RES(AU1000_MAC0_PHYS_ADDR, - AU1000_MACEN_PHYS_ADDR, - AU1550_MAC0_DMA_INT) - }, +static struct resource au1xxx_eth0_resources[] = { +#if defined(CONFIG_SOC_AU1000) + MAC_RES(AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT), +#elif defined(CONFIG_SOC_AU1100) + MAC_RES(AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT), +#elif defined(CONFIG_SOC_AU1550) + MAC_RES(AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT), +#elif defined(CONFIG_SOC_AU1500) + MAC_RES(AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT), +#endif }; + static struct au1000_eth_platform_data au1xxx_eth0_platform_data = { .phy1_search_mac0 = 1, }; @@ -421,26 +396,20 @@ static struct au1000_eth_platform_data au1xxx_eth0_platform_data = { static struct platform_device au1xxx_eth0_device = { .name = "au1000-eth", .id = 0, - .num_resources = MAC_RES_COUNT, + .num_resources = ARRAY_SIZE(au1xxx_eth0_resources), + .resource = au1xxx_eth0_resources, .dev.platform_data = &au1xxx_eth0_platform_data, }; -static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = { - [ALCHEMY_CPU_AU1000] = { - MAC_RES(AU1000_MAC1_PHYS_ADDR, - AU1000_MACEN_PHYS_ADDR + 4, - AU1000_MAC1_DMA_INT) - }, - [ALCHEMY_CPU_AU1500] = { - MAC_RES(AU1500_MAC1_PHYS_ADDR, - AU1500_MACEN_PHYS_ADDR + 4, - AU1500_MAC1_DMA_INT) - }, - [ALCHEMY_CPU_AU1550] = { - MAC_RES(AU1000_MAC1_PHYS_ADDR, - AU1000_MACEN_PHYS_ADDR + 4, - AU1550_MAC1_DMA_INT) - }, +#ifndef CONFIG_SOC_AU1100 +static struct resource au1xxx_eth1_resources[] = { +#if defined(CONFIG_SOC_AU1000) + MAC_RES(AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT), +#elif defined(CONFIG_SOC_AU1550) + MAC_RES(AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT), +#elif defined(CONFIG_SOC_AU1500) + MAC_RES(AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT), +#endif }; static struct au1000_eth_platform_data au1xxx_eth1_platform_data = { @@ -450,9 +419,11 @@ static struct au1000_eth_platform_data au1xxx_eth1_platform_data = { static struct platform_device au1xxx_eth1_device = { .name = "au1000-eth", .id = 1, - .num_resources = MAC_RES_COUNT, + .num_resources = ARRAY_SIZE(au1xxx_eth1_resources), + .resource = au1xxx_eth1_resources, .dev.platform_data = &au1xxx_eth1_platform_data, }; +#endif void __init au1xxx_override_eth_cfg(unsigned int port, struct au1000_eth_platform_data *eth_data) @@ -463,65 +434,15 @@ void __init au1xxx_override_eth_cfg(unsigned int port, if (port == 0) memcpy(&au1xxx_eth0_platform_data, eth_data, sizeof(struct au1000_eth_platform_data)); +#ifndef CONFIG_SOC_AU1100 else memcpy(&au1xxx_eth1_platform_data, eth_data, sizeof(struct au1000_eth_platform_data)); -} - -static void __init alchemy_setup_macs(int ctype) -{ - int ret, i; - unsigned char ethaddr[6]; - struct resource *macres; - - /* Handle 1st MAC */ - if (alchemy_get_macs(ctype) < 1) - return; - - macres = kmalloc(sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL); - if (!macres) { - printk(KERN_INFO "Alchemy: no memory for MAC0 resources\n"); - return; - } - memcpy(macres, au1xxx_eth0_resources[ctype], - sizeof(struct resource) * MAC_RES_COUNT); - au1xxx_eth0_device.resource = macres; - - i = prom_get_ethernet_addr(ethaddr); - if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac)) - memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); - - ret = platform_device_register(&au1xxx_eth0_device); - if (!ret) - printk(KERN_INFO "Alchemy: failed to register MAC0\n"); - - - /* Handle 2nd MAC */ - if (alchemy_get_macs(ctype) < 2) - return; - - macres = kmalloc(sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL); - if (!macres) { - printk(KERN_INFO "Alchemy: no memory for MAC1 resources\n"); - return; - } - memcpy(macres, au1xxx_eth1_resources[ctype], - sizeof(struct resource) * MAC_RES_COUNT); - au1xxx_eth1_device.resource = macres; - - ethaddr[5] += 1; /* next addr for 2nd MAC */ - if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac)) - memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6); - - /* Register second MAC if enabled in pinfunc */ - if (!(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) { - ret = platform_device_register(&au1xxx_eth1_device); - if (ret) - printk(KERN_INFO "Alchemy: failed to register MAC1\n"); - } +#endif } static struct platform_device *au1xxx_platform_devices[] __initdata = { + &au1xx0_uart_device, &au1xxx_usb_ohci_device, #ifdef CONFIG_FB_AU1100 &au1100_lcd_device, @@ -539,17 +460,36 @@ static struct platform_device *au1xxx_platform_devices[] __initdata = { #ifdef SMBUS_PSC_BASE &pbdb_smbus_device, #endif + &au1xxx_eth0_device, }; static int __init au1xxx_platform_init(void) { - int err, ctype = alchemy_get_cputype(); + unsigned int uartclk = get_au1x00_uart_baud_base() * 16; + int err, i; + unsigned char ethaddr[6]; - alchemy_setup_uarts(ctype); - alchemy_setup_macs(ctype); + /* Fill up uartclk. */ + for (i = 0; au1x00_uart_data[i].flags; i++) + au1x00_uart_data[i].uartclk = uartclk; + + /* use firmware-provided mac addr if available and necessary */ + i = prom_get_ethernet_addr(ethaddr); + if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac)) + memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); err = platform_add_devices(au1xxx_platform_devices, ARRAY_SIZE(au1xxx_platform_devices)); +#ifndef CONFIG_SOC_AU1100 + ethaddr[5] += 1; /* next addr for 2nd MAC */ + if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac)) + memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6); + + /* Register second MAC if enabled in pinfunc */ + if (!err && !(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) + err = platform_device_register(&au1xxx_eth1_device); +#endif + return err; } diff --git a/trunk/arch/mips/alchemy/common/setup.c b/trunk/arch/mips/alchemy/common/setup.c index 1b887c868417..561e5da2658b 100644 --- a/trunk/arch/mips/alchemy/common/setup.c +++ b/trunk/arch/mips/alchemy/common/setup.c @@ -52,6 +52,8 @@ void __init plat_mem_setup(void) /* this is faster than wasting cycles trying to approximate it */ preset_lpj = (est_freq >> 1) / HZ; + board_setup(); /* board specific setup */ + if (au1xxx_cpu_needs_config_od()) /* Various early Au1xx0 errata corrected by this */ set_c0_config(1 << 19); /* Set Config[OD] */ @@ -59,8 +61,6 @@ void __init plat_mem_setup(void) /* Clear to obtain best system bus performance */ clear_c0_config(1 << 19); /* Clear Config[OD] */ - board_setup(); /* board specific setup */ - /* IO/MEM resources. */ set_io_port_base(0); ioport_resource.start = IOPORT_RESOURCE_START; diff --git a/trunk/arch/mips/alchemy/common/time.c b/trunk/arch/mips/alchemy/common/time.c index d5da6adbf634..2aecb2fdf982 100644 --- a/trunk/arch/mips/alchemy/common/time.c +++ b/trunk/arch/mips/alchemy/common/time.c @@ -141,7 +141,8 @@ static int __init alchemy_time_init(unsigned int m2int) goto cntr_err; /* register counter1 clocksource and event device */ - clocksource_register_hz(&au1x_counter1_clocksource, 32768); + clocksource_set_clock(&au1x_counter1_clocksource, 32768); + clocksource_register(&au1x_counter1_clocksource); cd->shift = 32; cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift); diff --git a/trunk/arch/mips/alchemy/devboards/db1200/setup.c b/trunk/arch/mips/alchemy/devboards/db1200/setup.c index 1dac4f27d334..4a8980027ecf 100644 --- a/trunk/arch/mips/alchemy/devboards/db1200/setup.c +++ b/trunk/arch/mips/alchemy/devboards/db1200/setup.c @@ -23,13 +23,6 @@ void __init board_setup(void) unsigned long freq0, clksrc, div, pfc; unsigned short whoami; - /* Set Config[OD] (disable overlapping bus transaction): - * This gets rid of a _lot_ of spurious interrupts (especially - * wrt. IDE); but incurs ~10% performance hit in some - * cpu-bound applications. - */ - set_c0_config(1 << 19); - bcsr_init(DB1200_BCSR_PHYS_ADDR, DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS); diff --git a/trunk/arch/mips/alchemy/devboards/db1x00/board_setup.c b/trunk/arch/mips/alchemy/devboards/db1x00/board_setup.c index 5c956fe8760f..05f120ff90f9 100644 --- a/trunk/arch/mips/alchemy/devboards/db1x00/board_setup.c +++ b/trunk/arch/mips/alchemy/devboards/db1x00/board_setup.c @@ -127,10 +127,13 @@ const char *get_system_type(void) void __init board_setup(void) { unsigned long bcsr1, bcsr2; + u32 pin_func; bcsr1 = DB1000_BCSR_PHYS_ADDR; bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS; + pin_func = 0; + #ifdef CONFIG_MIPS_DB1000 printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n"); #endif @@ -161,16 +164,12 @@ void __init board_setup(void) /* Not valid for Au1550 */ #if defined(CONFIG_IRDA) && \ (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100)) - { - u32 pin_func; - - /* Set IRFIRSEL instead of GPIO15 */ - pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; - au_writel(pin_func, SYS_PINFUNC); - /* Power off until the driver is in use */ - bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, - BCSR_RESETS_IRDA_MODE_OFF); - } + /* Set IRFIRSEL instead of GPIO15 */ + pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; + au_writel(pin_func, SYS_PINFUNC); + /* Power off until the driver is in use */ + bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, + BCSR_RESETS_IRDA_MODE_OFF); #endif bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */ @@ -178,35 +177,31 @@ void __init board_setup(void) alchemy_gpio1_input_enable(); #ifdef CONFIG_MIPS_MIRAGE - { - u32 pin_func; - - /* GPIO[20] is output */ - alchemy_gpio_direction_output(20, 0); + /* GPIO[20] is output */ + alchemy_gpio_direction_output(20, 0); - /* Set GPIO[210:208] instead of SSI_0 */ - pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; + /* Set GPIO[210:208] instead of SSI_0 */ + pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; - /* Set GPIO[215:211] for LEDs */ - pin_func |= 5 << 2; + /* Set GPIO[215:211] for LEDs */ + pin_func |= 5 << 2; - /* Set GPIO[214:213] for more LEDs */ - pin_func |= 5 << 12; + /* Set GPIO[214:213] for more LEDs */ + pin_func |= 5 << 12; - /* Set GPIO[207:200] instead of PCMCIA/LCD */ - pin_func |= SYS_PF_LCD | SYS_PF_PC; - au_writel(pin_func, SYS_PINFUNC); + /* Set GPIO[207:200] instead of PCMCIA/LCD */ + pin_func |= SYS_PF_LCD | SYS_PF_PC; + au_writel(pin_func, SYS_PINFUNC); - /* - * Enable speaker amplifier. This should - * be part of the audio driver. - */ - alchemy_gpio_direction_output(209, 1); + /* + * Enable speaker amplifier. This should + * be part of the audio driver. + */ + alchemy_gpio_direction_output(209, 1); - pm_power_off = mirage_power_off; - _machine_halt = mirage_power_off; - _machine_restart = (void(*)(char *))mips_softreset; - } + pm_power_off = mirage_power_off; + _machine_halt = mirage_power_off; + _machine_restart = (void(*)(char *))mips_softreset; #endif #ifdef CONFIG_MIPS_BOSPORUS diff --git a/trunk/arch/mips/alchemy/devboards/pb1000/board_setup.c b/trunk/arch/mips/alchemy/devboards/pb1000/board_setup.c index e64fdcbf75d0..2d85c4b5be09 100644 --- a/trunk/arch/mips/alchemy/devboards/pb1000/board_setup.c +++ b/trunk/arch/mips/alchemy/devboards/pb1000/board_setup.c @@ -65,7 +65,7 @@ void __init board_setup(void) /* Set AUX clock to 12 MHz * 8 = 96 MHz */ au_writel(8, SYS_AUXPLL); - alchemy_gpio1_input_enable(); + au_writel(0, SYS_PINSTATERD); udelay(100); #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) diff --git a/trunk/arch/mips/alchemy/devboards/pb1500/board_setup.c b/trunk/arch/mips/alchemy/devboards/pb1500/board_setup.c index 3b4fa3206969..83f46215eb0c 100644 --- a/trunk/arch/mips/alchemy/devboards/pb1500/board_setup.c +++ b/trunk/arch/mips/alchemy/devboards/pb1500/board_setup.c @@ -56,7 +56,7 @@ void __init board_setup(void) sys_clksrc = sys_freqctrl = pin_func = 0; /* Set AUX clock to 12 MHz * 8 = 96 MHz */ au_writel(8, SYS_AUXPLL); - alchemy_gpio1_input_enable(); + au_writel(0, SYS_PINSTATERD); udelay(100); /* GPIO201 is input for PCMCIA card detect */ diff --git a/trunk/arch/mips/alchemy/devboards/prom.c b/trunk/arch/mips/alchemy/devboards/prom.c index e5306b56da6d..baeb21385058 100644 --- a/trunk/arch/mips/alchemy/devboards/prom.c +++ b/trunk/arch/mips/alchemy/devboards/prom.c @@ -62,5 +62,5 @@ void __init prom_init(void) void prom_putchar(unsigned char c) { - alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); + alchemy_uart_putchar(UART0_PHYS_ADDR, c); } diff --git a/trunk/arch/mips/alchemy/gpr/board_setup.c b/trunk/arch/mips/alchemy/gpr/board_setup.c index 5f8f0691ed2d..ad2e3f137933 100644 --- a/trunk/arch/mips/alchemy/gpr/board_setup.c +++ b/trunk/arch/mips/alchemy/gpr/board_setup.c @@ -36,6 +36,9 @@ #include +#define UART1_ADDR KSEG1ADDR(UART1_PHYS_ADDR) +#define UART3_ADDR KSEG1ADDR(UART3_PHYS_ADDR) + char irq_tab_alchemy[][5] __initdata = { [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, }; @@ -64,15 +67,18 @@ static void gpr_power_off(void) void __init board_setup(void) { - printk(KERN_INFO "Trapeze ITS GPR board\n"); + printk(KERN_INFO "Tarpeze ITS GPR board\n"); pm_power_off = gpr_power_off; _machine_halt = gpr_power_off; _machine_restart = gpr_reset; - /* Enable UART1/3 */ - alchemy_uart_enable(AU1000_UART3_PHYS_ADDR); - alchemy_uart_enable(AU1000_UART1_PHYS_ADDR); + /* Enable UART3 */ + au_writel(0x1, UART3_ADDR + UART_MOD_CNTRL);/* clock enable (CE) */ + au_writel(0x3, UART3_ADDR + UART_MOD_CNTRL); /* CE and "enable" */ + /* Enable UART1 */ + au_writel(0x1, UART1_ADDR + UART_MOD_CNTRL); /* clock enable (CE) */ + au_writel(0x3, UART1_ADDR + UART_MOD_CNTRL); /* CE and "enable" */ /* Take away Reset of UMTS-card */ alchemy_gpio_direction_output(215, 1); diff --git a/trunk/arch/mips/alchemy/gpr/init.c b/trunk/arch/mips/alchemy/gpr/init.c index 229aafae680c..f044f4c541d7 100644 --- a/trunk/arch/mips/alchemy/gpr/init.c +++ b/trunk/arch/mips/alchemy/gpr/init.c @@ -59,5 +59,5 @@ void __init prom_init(void) void prom_putchar(unsigned char c) { - alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); + alchemy_uart_putchar(UART0_PHYS_ADDR, c); } diff --git a/trunk/arch/mips/alchemy/mtx-1/board_setup.c b/trunk/arch/mips/alchemy/mtx-1/board_setup.c index 3ae984cf98cf..cf436ab679ae 100644 --- a/trunk/arch/mips/alchemy/mtx-1/board_setup.c +++ b/trunk/arch/mips/alchemy/mtx-1/board_setup.c @@ -87,7 +87,7 @@ void __init board_setup(void) au_writel(SYS_PF_NI2, SYS_PINFUNC); /* Initialize GPIO */ - au_writel(~0, KSEG1ADDR(AU1000_SYS_PHYS_ADDR) + SYS_TRIOUTCLR); + au_writel(0xFFFFFFFF, SYS_TRIOUTCLR); alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */ alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */ alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */ diff --git a/trunk/arch/mips/alchemy/mtx-1/init.c b/trunk/arch/mips/alchemy/mtx-1/init.c index 2e81cc7f3422..f8d25575fa05 100644 --- a/trunk/arch/mips/alchemy/mtx-1/init.c +++ b/trunk/arch/mips/alchemy/mtx-1/init.c @@ -62,5 +62,5 @@ void __init prom_init(void) void prom_putchar(unsigned char c) { - alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); + alchemy_uart_putchar(UART0_PHYS_ADDR, c); } diff --git a/trunk/arch/mips/alchemy/mtx-1/platform.c b/trunk/arch/mips/alchemy/mtx-1/platform.c index 55628e390fd7..956f946218c5 100644 --- a/trunk/arch/mips/alchemy/mtx-1/platform.c +++ b/trunk/arch/mips/alchemy/mtx-1/platform.c @@ -53,8 +53,8 @@ static struct platform_device mtx1_button = { static struct resource mtx1_wdt_res[] = { [0] = { - .start = 215, - .end = 215, + .start = 15, + .end = 15, .name = "mtx1-wdt-gpio", .flags = IORESOURCE_IRQ, } diff --git a/trunk/arch/mips/alchemy/xxs1500/board_setup.c b/trunk/arch/mips/alchemy/xxs1500/board_setup.c index 81e57fad07ab..febfb0fb0896 100644 --- a/trunk/arch/mips/alchemy/xxs1500/board_setup.c +++ b/trunk/arch/mips/alchemy/xxs1500/board_setup.c @@ -66,10 +66,13 @@ void __init board_setup(void) au_writel(pin_func, SYS_PINFUNC); /* Enable UART */ - alchemy_uart_enable(AU1000_UART3_PHYS_ADDR); - /* Enable DTR (MCR bit 0) = USB power up */ - __raw_writel(1, (void __iomem *)KSEG1ADDR(AU1000_UART3_PHYS_ADDR + 0x18)); - wmb(); + au_writel(0x01, UART3_ADDR + UART_MOD_CNTRL); /* clock enable (CE) */ + mdelay(10); + au_writel(0x03, UART3_ADDR + UART_MOD_CNTRL); /* CE and "enable" */ + mdelay(10); + + /* Enable DTR = USB power up */ + au_writel(0x01, UART3_ADDR + UART_MCR); /* UART_MCR_DTR is 0x01??? */ #ifdef CONFIG_PCI #if defined(__MIPSEB__) diff --git a/trunk/arch/mips/alchemy/xxs1500/init.c b/trunk/arch/mips/alchemy/xxs1500/init.c index 0ee02cfa989d..15125c2fda7d 100644 --- a/trunk/arch/mips/alchemy/xxs1500/init.c +++ b/trunk/arch/mips/alchemy/xxs1500/init.c @@ -51,13 +51,14 @@ void __init prom_init(void) prom_init_cmdline(); memsize_str = prom_getenv("memsize"); - if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize)) + if (!memsize_str) memsize = 0x04000000; - + else + strict_strtoul(memsize_str, 0, &memsize); add_memory_region(0, memsize, BOOT_MEM_RAM); } void prom_putchar(unsigned char c) { - alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); + alchemy_uart_putchar(UART0_PHYS_ADDR, c); } diff --git a/trunk/arch/mips/ar7/gpio.c b/trunk/arch/mips/ar7/gpio.c index bb571bcdb8f2..425dfa5d6e12 100644 --- a/trunk/arch/mips/ar7/gpio.c +++ b/trunk/arch/mips/ar7/gpio.c @@ -325,7 +325,9 @@ int __init ar7_gpio_init(void) size = 0x1f; } - gpch->regs = ioremap_nocache(AR7_REGS_GPIO, size); + gpch->regs = ioremap_nocache(AR7_REGS_GPIO, + AR7_REGS_GPIO + 0x10); + if (!gpch->regs) { printk(KERN_ERR "%s: failed to ioremap regs\n", gpch->chip.label); diff --git a/trunk/arch/mips/bcm47xx/nvram.c b/trunk/arch/mips/bcm47xx/nvram.c index 54db815bc86c..e5b6615731e5 100644 --- a/trunk/arch/mips/bcm47xx/nvram.c +++ b/trunk/arch/mips/bcm47xx/nvram.c @@ -3,7 +3,6 @@ * * Copyright (C) 2005 Broadcom Corporation * Copyright (C) 2006 Felix Fietkau - * Copyright (C) 2010-2011 Hauke Mehrtens * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -24,7 +23,7 @@ static char nvram_buf[NVRAM_SPACE]; /* Probe for NVRAM header */ -static void early_nvram_init(void) +static void __init early_nvram_init(void) { struct ssb_mipscore *mcore = &ssb_bcm47xx.mipscore; struct nvram_header *header; diff --git a/trunk/arch/mips/bcm47xx/setup.c b/trunk/arch/mips/bcm47xx/setup.c index 73b529b57433..c95f90bf734c 100644 --- a/trunk/arch/mips/bcm47xx/setup.c +++ b/trunk/arch/mips/bcm47xx/setup.c @@ -3,7 +3,6 @@ * Copyright (C) 2006 Felix Fietkau * Copyright (C) 2006 Michael Buesch * Copyright (C) 2010 Waldemar Brodkorb - * Copyright (C) 2010-2011 Hauke Mehrtens * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -58,49 +57,10 @@ static void bcm47xx_machine_halt(void) } #define READ_FROM_NVRAM(_outvar, name, buf) \ - if (nvram_getprefix(prefix, name, buf, sizeof(buf)) >= 0)\ + if (nvram_getenv(name, buf, sizeof(buf)) >= 0)\ sprom->_outvar = simple_strtoul(buf, NULL, 0); -#define READ_FROM_NVRAM2(_outvar, name1, name2, buf) \ - if (nvram_getprefix(prefix, name1, buf, sizeof(buf)) >= 0 || \ - nvram_getprefix(prefix, name2, buf, sizeof(buf)) >= 0)\ - sprom->_outvar = simple_strtoul(buf, NULL, 0); - -static inline int nvram_getprefix(const char *prefix, char *name, - char *buf, int len) -{ - if (prefix) { - char key[100]; - - snprintf(key, sizeof(key), "%s%s", prefix, name); - return nvram_getenv(key, buf, len); - } - - return nvram_getenv(name, buf, len); -} - -static u32 nvram_getu32(const char *name, char *buf, int len) -{ - int rv; - char key[100]; - u16 var0, var1; - - snprintf(key, sizeof(key), "%s0", name); - rv = nvram_getenv(key, buf, len); - /* return 0 here so this looks like unset */ - if (rv < 0) - return 0; - var0 = simple_strtoul(buf, NULL, 0); - - snprintf(key, sizeof(key), "%s1", name); - rv = nvram_getenv(key, buf, len); - if (rv < 0) - return 0; - var1 = simple_strtoul(buf, NULL, 0); - return var1 << 16 | var0; -} - -static void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix) +static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) { char buf[100]; u32 boardflags; @@ -109,12 +69,11 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix) sprom->revision = 1; /* Fallback: Old hardware does not define this. */ READ_FROM_NVRAM(revision, "sromrev", buf); - if (nvram_getprefix(prefix, "il0macaddr", buf, sizeof(buf)) >= 0 || - nvram_getprefix(prefix, "macaddr", buf, sizeof(buf)) >= 0) + if (nvram_getenv("il0macaddr", buf, sizeof(buf)) >= 0) nvram_parse_macaddr(buf, sprom->il0mac); - if (nvram_getprefix(prefix, "et0macaddr", buf, sizeof(buf)) >= 0) + if (nvram_getenv("et0macaddr", buf, sizeof(buf)) >= 0) nvram_parse_macaddr(buf, sprom->et0mac); - if (nvram_getprefix(prefix, "et1macaddr", buf, sizeof(buf)) >= 0) + if (nvram_getenv("et1macaddr", buf, sizeof(buf)) >= 0) nvram_parse_macaddr(buf, sprom->et1mac); READ_FROM_NVRAM(et0phyaddr, "et0phyaddr", buf); READ_FROM_NVRAM(et1phyaddr, "et1phyaddr", buf); @@ -136,36 +95,20 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix) READ_FROM_NVRAM(pa1hib0, "pa1hib0", buf); READ_FROM_NVRAM(pa1hib2, "pa1hib1", buf); READ_FROM_NVRAM(pa1hib1, "pa1hib2", buf); - READ_FROM_NVRAM2(gpio0, "ledbh0", "wl0gpio0", buf); - READ_FROM_NVRAM2(gpio1, "ledbh1", "wl0gpio1", buf); - READ_FROM_NVRAM2(gpio2, "ledbh2", "wl0gpio2", buf); - READ_FROM_NVRAM2(gpio3, "ledbh3", "wl0gpio3", buf); - READ_FROM_NVRAM2(maxpwr_bg, "maxp2ga0", "pa0maxpwr", buf); - READ_FROM_NVRAM2(maxpwr_al, "maxp5gla0", "pa1lomaxpwr", buf); - READ_FROM_NVRAM2(maxpwr_a, "maxp5ga0", "pa1maxpwr", buf); - READ_FROM_NVRAM2(maxpwr_ah, "maxp5gha0", "pa1himaxpwr", buf); - READ_FROM_NVRAM2(itssi_bg, "itt5ga0", "pa0itssit", buf); - READ_FROM_NVRAM2(itssi_a, "itt2ga0", "pa1itssit", buf); + READ_FROM_NVRAM(gpio0, "wl0gpio0", buf); + READ_FROM_NVRAM(gpio1, "wl0gpio1", buf); + READ_FROM_NVRAM(gpio2, "wl0gpio2", buf); + READ_FROM_NVRAM(gpio3, "wl0gpio3", buf); + READ_FROM_NVRAM(maxpwr_bg, "pa0maxpwr", buf); + READ_FROM_NVRAM(maxpwr_al, "pa1lomaxpwr", buf); + READ_FROM_NVRAM(maxpwr_a, "pa1maxpwr", buf); + READ_FROM_NVRAM(maxpwr_ah, "pa1himaxpwr", buf); + READ_FROM_NVRAM(itssi_a, "pa1itssit", buf); + READ_FROM_NVRAM(itssi_bg, "pa0itssit", buf); READ_FROM_NVRAM(tri2g, "tri2g", buf); READ_FROM_NVRAM(tri5gl, "tri5gl", buf); READ_FROM_NVRAM(tri5g, "tri5g", buf); READ_FROM_NVRAM(tri5gh, "tri5gh", buf); - READ_FROM_NVRAM(txpid2g[0], "txpid2ga0", buf); - READ_FROM_NVRAM(txpid2g[1], "txpid2ga1", buf); - READ_FROM_NVRAM(txpid2g[2], "txpid2ga2", buf); - READ_FROM_NVRAM(txpid2g[3], "txpid2ga3", buf); - READ_FROM_NVRAM(txpid5g[0], "txpid5ga0", buf); - READ_FROM_NVRAM(txpid5g[1], "txpid5ga1", buf); - READ_FROM_NVRAM(txpid5g[2], "txpid5ga2", buf); - READ_FROM_NVRAM(txpid5g[3], "txpid5ga3", buf); - READ_FROM_NVRAM(txpid5gl[0], "txpid5gla0", buf); - READ_FROM_NVRAM(txpid5gl[1], "txpid5gla1", buf); - READ_FROM_NVRAM(txpid5gl[2], "txpid5gla2", buf); - READ_FROM_NVRAM(txpid5gl[3], "txpid5gla3", buf); - READ_FROM_NVRAM(txpid5gh[0], "txpid5gha0", buf); - READ_FROM_NVRAM(txpid5gh[1], "txpid5gha1", buf); - READ_FROM_NVRAM(txpid5gh[2], "txpid5gha2", buf); - READ_FROM_NVRAM(txpid5gh[3], "txpid5gha3", buf); READ_FROM_NVRAM(rxpo2g, "rxpo2g", buf); READ_FROM_NVRAM(rxpo5g, "rxpo5g", buf); READ_FROM_NVRAM(rssisav2g, "rssisav2g", buf); @@ -177,27 +120,19 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix) READ_FROM_NVRAM(rssismf5g, "rssismf5g", buf); READ_FROM_NVRAM(bxa5g, "bxa5g", buf); READ_FROM_NVRAM(cck2gpo, "cck2gpo", buf); + READ_FROM_NVRAM(ofdm2gpo, "ofdm2gpo", buf); + READ_FROM_NVRAM(ofdm5glpo, "ofdm5glpo", buf); + READ_FROM_NVRAM(ofdm5gpo, "ofdm5gpo", buf); + READ_FROM_NVRAM(ofdm5ghpo, "ofdm5ghpo", buf); - sprom->ofdm2gpo = nvram_getu32("ofdm2gpo", buf, sizeof(buf)); - sprom->ofdm5glpo = nvram_getu32("ofdm5glpo", buf, sizeof(buf)); - sprom->ofdm5gpo = nvram_getu32("ofdm5gpo", buf, sizeof(buf)); - sprom->ofdm5ghpo = nvram_getu32("ofdm5ghpo", buf, sizeof(buf)); - - READ_FROM_NVRAM(antenna_gain.ghz24.a0, "ag0", buf); - READ_FROM_NVRAM(antenna_gain.ghz24.a1, "ag1", buf); - READ_FROM_NVRAM(antenna_gain.ghz24.a2, "ag2", buf); - READ_FROM_NVRAM(antenna_gain.ghz24.a3, "ag3", buf); - memcpy(&sprom->antenna_gain.ghz5, &sprom->antenna_gain.ghz24, - sizeof(sprom->antenna_gain.ghz5)); - - if (nvram_getprefix(prefix, "boardflags", buf, sizeof(buf)) >= 0) { + if (nvram_getenv("boardflags", buf, sizeof(buf)) >= 0) { boardflags = simple_strtoul(buf, NULL, 0); if (boardflags) { sprom->boardflags_lo = (boardflags & 0x0000FFFFU); sprom->boardflags_hi = (boardflags & 0xFFFF0000U) >> 16; } } - if (nvram_getprefix(prefix, "boardflags2", buf, sizeof(buf)) >= 0) { + if (nvram_getenv("boardflags2", buf, sizeof(buf)) >= 0) { boardflags = simple_strtoul(buf, NULL, 0); if (boardflags) { sprom->boardflags2_lo = (boardflags & 0x0000FFFFU); @@ -206,22 +141,6 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix) } } -int bcm47xx_get_sprom(struct ssb_bus *bus, struct ssb_sprom *out) -{ - char prefix[10]; - - if (bus->bustype == SSB_BUSTYPE_PCI) { - snprintf(prefix, sizeof(prefix), "pci/%u/%u/", - bus->host_pci->bus->number + 1, - PCI_SLOT(bus->host_pci->devfn)); - bcm47xx_fill_sprom(out, prefix); - return 0; - } else { - printk(KERN_WARNING "bcm47xx: unable to fill SPROM for given bustype.\n"); - return -EINVAL; - } -} - static int bcm47xx_get_invariants(struct ssb_bus *bus, struct ssb_init_invariants *iv) { @@ -239,7 +158,7 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus, if (nvram_getenv("boardrev", buf, sizeof(buf)) >= 0) iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0); - bcm47xx_fill_sprom(&iv->sprom, NULL); + bcm47xx_fill_sprom(&iv->sprom); if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0) iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10); @@ -253,11 +172,6 @@ void __init plat_mem_setup(void) char buf[100]; struct ssb_mipscore *mcore; - err = ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom); - if (err) - printk(KERN_WARNING "bcm47xx: someone else already registered" - " a ssb SPROM callback handler (err %d)\n", err); - err = ssb_bus_ssbbus_register(&ssb_bcm47xx, SSB_ENUM_BASE, bcm47xx_get_invariants); if (err) diff --git a/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c b/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c index 40b223b603be..8dba8cfb752f 100644 --- a/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -643,17 +643,6 @@ static struct ssb_sprom bcm63xx_sprom = { .boardflags_lo = 0x2848, .boardflags_hi = 0x0000, }; - -int bcm63xx_get_fallback_sprom(struct ssb_bus *bus, struct ssb_sprom *out) -{ - if (bus->bustype == SSB_BUSTYPE_PCI) { - memcpy(out, &bcm63xx_sprom, sizeof(struct ssb_sprom)); - return 0; - } else { - printk(KERN_ERR PFX "unable to fill SPROM for given bustype.\n"); - return -EINVAL; - } -} #endif /* @@ -804,9 +793,8 @@ void __init board_prom_init(void) if (!board_get_mac_address(bcm63xx_sprom.il0mac)) { memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN); memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN); - if (ssb_arch_register_fallback_sprom( - &bcm63xx_get_fallback_sprom) < 0) - printk(KERN_ERR PFX "failed to register fallback SPROM\n"); + if (ssb_arch_set_fallback_sprom(&bcm63xx_sprom) < 0) + printk(KERN_ERR "failed to register fallback SPROM\n"); } #endif } diff --git a/trunk/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/trunk/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c index 9a6243676e22..88c9d963be88 100644 --- a/trunk/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c +++ b/trunk/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c @@ -16,8 +16,8 @@ int main(int argc, char *argv[]) { - unsigned long long vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr; struct stat sb; + uint64_t vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr; if (argc != 3) { fprintf(stderr, "Usage: %s \n", diff --git a/trunk/arch/mips/boot/compressed/uart-alchemy.c b/trunk/arch/mips/boot/compressed/uart-alchemy.c index eb063e6dead9..1bff22fa089b 100644 --- a/trunk/arch/mips/boot/compressed/uart-alchemy.c +++ b/trunk/arch/mips/boot/compressed/uart-alchemy.c @@ -3,5 +3,5 @@ void putc(char c) { /* all current (Jan. 2010) in-kernel boards */ - alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); + alchemy_uart_putchar(UART0_PHYS_ADDR, c); } diff --git a/trunk/arch/mips/cavium-octeon/Kconfig b/trunk/arch/mips/cavium-octeon/Kconfig index cad555ebeca3..caae22858163 100644 --- a/trunk/arch/mips/cavium-octeon/Kconfig +++ b/trunk/arch/mips/cavium-octeon/Kconfig @@ -1,7 +1,11 @@ -if CPU_CAVIUM_OCTEON +config CAVIUM_OCTEON_SPECIFIC_OPTIONS + bool "Enable Octeon specific options" + depends on CPU_CAVIUM_OCTEON + default "y" config CAVIUM_CN63XXP1 bool "Enable CN63XXP1 errata worarounds" + depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "n" help The CN63XXP1 chip requires build time workarounds to @@ -12,6 +16,7 @@ config CAVIUM_CN63XXP1 config CAVIUM_OCTEON_2ND_KERNEL bool "Build the kernel to be used as a 2nd kernel on the same chip" + depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "n" help This option configures this kernel to be linked at a different @@ -21,6 +26,7 @@ config CAVIUM_OCTEON_2ND_KERNEL config CAVIUM_OCTEON_HW_FIX_UNALIGNED bool "Enable hardware fixups of unaligned loads and stores" + depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "y" help Configure the Octeon hardware to automatically fix unaligned loads @@ -32,6 +38,7 @@ config CAVIUM_OCTEON_HW_FIX_UNALIGNED config CAVIUM_OCTEON_CVMSEG_SIZE int "Number of L1 cache lines reserved for CVMSEG memory" + depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS range 0 54 default 1 help @@ -43,6 +50,7 @@ config CAVIUM_OCTEON_CVMSEG_SIZE config CAVIUM_OCTEON_LOCK_L2 bool "Lock often used kernel code in the L2" + depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "y" help Enable locking parts of the kernel into the L2 cache. @@ -85,6 +93,7 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY config ARCH_SPARSEMEM_ENABLE def_bool y select SPARSEMEM_STATIC + depends on CPU_CAVIUM_OCTEON config CAVIUM_OCTEON_HELPER def_bool y @@ -98,8 +107,6 @@ config NEED_SG_DMA_LENGTH config SWIOTLB def_bool y + depends on CPU_CAVIUM_OCTEON select IOMMU_HELPER select NEED_SG_DMA_LENGTH - - -endif # CPU_CAVIUM_OCTEON diff --git a/trunk/arch/mips/cavium-octeon/csrc-octeon.c b/trunk/arch/mips/cavium-octeon/csrc-octeon.c index 29d56afbb02d..26bf71130bf8 100644 --- a/trunk/arch/mips/cavium-octeon/csrc-octeon.c +++ b/trunk/arch/mips/cavium-octeon/csrc-octeon.c @@ -105,7 +105,8 @@ unsigned long long notrace sched_clock(void) void __init plat_time_init(void) { clocksource_mips.rating = 300; - clocksource_register_hz(&clocksource_mips, octeon_get_clock_rate()); + clocksource_set_clock(&clocksource_mips, octeon_get_clock_rate()); + clocksource_register(&clocksource_mips); } static u64 octeon_udelay_factor; diff --git a/trunk/arch/mips/cavium-octeon/setup.c b/trunk/arch/mips/cavium-octeon/setup.c index 2d9028f1474c..0707fae3f0ee 100644 --- a/trunk/arch/mips/cavium-octeon/setup.c +++ b/trunk/arch/mips/cavium-octeon/setup.c @@ -288,6 +288,7 @@ void octeon_user_io_init(void) union octeon_cvmemctl cvmmemctl; union cvmx_iob_fau_timeout fau_timeout; union cvmx_pow_nw_tim nm_tim; + uint64_t cvmctl; /* Get the current settings for CP0_CVMMEMCTL_REG */ cvmmemctl.u64 = read_c0_cvmmemctl(); @@ -391,6 +392,12 @@ void octeon_user_io_init(void) CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128); + /* Move the performance counter interrupts to IRQ 6 */ + cvmctl = read_c0_cvmctl(); + cvmctl &= ~(7 << 7); + cvmctl |= 6 << 7; + write_c0_cvmctl(cvmctl); + /* Set a default for the hardware timeouts */ fau_timeout.u64 = 0; fau_timeout.s.tout_val = 0xfff; diff --git a/trunk/arch/mips/cavium-octeon/smp.c b/trunk/arch/mips/cavium-octeon/smp.c index 8b606423bbd7..ba78b21cc8d0 100644 --- a/trunk/arch/mips/cavium-octeon/smp.c +++ b/trunk/arch/mips/cavium-octeon/smp.c @@ -37,15 +37,13 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id) uint64_t action; /* Load the mailbox register to figure out what we're supposed to do */ - action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff; + action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)); /* Clear the mailbox to clear the interrupt */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); if (action & SMP_CALL_FUNCTION) smp_call_function_interrupt(); - if (action & SMP_RESCHEDULE_YOURSELF) - scheduler_ipi(); /* Check if we've been told to flush the icache */ if (action & SMP_ICACHE_FLUSH) @@ -202,15 +200,16 @@ void octeon_prepare_cpus(unsigned int max_cpus) if (labi->labi_signature != LABI_SIGNATURE) panic("The bootloader version on this board is incorrect."); #endif - /* - * Only the low order mailbox bits are used for IPIs, leave - * the other bits alone. - */ - cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff); + + cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, - "SMP-IPI", mailbox_interrupt)) { + "mailbox0", mailbox_interrupt)) { panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); } + if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_DISABLED, + "mailbox1", mailbox_interrupt)) { + panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n"); + } } /** diff --git a/trunk/arch/mips/configs/lemote2f_defconfig b/trunk/arch/mips/configs/lemote2f_defconfig index b6acd2f256b6..167c1d07b809 100644 --- a/trunk/arch/mips/configs/lemote2f_defconfig +++ b/trunk/arch/mips/configs/lemote2f_defconfig @@ -86,8 +86,8 @@ CONFIG_NET_SCHED=y CONFIG_NET_EMATCH=y CONFIG_NET_CLS_ACT=y CONFIG_BT=m -CONFIG_BT_L2CAP=y -CONFIG_BT_SCO=y +CONFIG_BT_L2CAP=m +CONFIG_BT_SCO=m CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m @@ -329,7 +329,7 @@ CONFIG_USB_LED=m CONFIG_USB_GADGET=m CONFIG_USB_GADGET_M66592=y CONFIG_MMC=m -CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS=m CONFIG_STAGING=y # CONFIG_STAGING_EXCLUDE_BUILD is not set CONFIG_FB_SM7XX=y diff --git a/trunk/arch/mips/configs/malta_defconfig b/trunk/arch/mips/configs/malta_defconfig index 5527abbb7dea..7270f3183bda 100644 --- a/trunk/arch/mips/configs/malta_defconfig +++ b/trunk/arch/mips/configs/malta_defconfig @@ -374,7 +374,7 @@ CONFIG_FB_CIRRUS=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_HID=m -CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS=m CONFIG_LEDS_TRIGGER_TIMER=m CONFIG_LEDS_TRIGGER_IDE_DISK=y CONFIG_LEDS_TRIGGER_HEARTBEAT=m diff --git a/trunk/arch/mips/configs/mtx1_defconfig b/trunk/arch/mips/configs/mtx1_defconfig index 37862b2ce363..a97a42c6b2c8 100644 --- a/trunk/arch/mips/configs/mtx1_defconfig +++ b/trunk/arch/mips/configs/mtx1_defconfig @@ -225,8 +225,8 @@ CONFIG_TOSHIBA_FIR=m CONFIG_VLSI_FIR=m CONFIG_MCS_FIR=m CONFIG_BT=m -CONFIG_BT_L2CAP=y -CONFIG_BT_SCO=y +CONFIG_BT_L2CAP=m +CONFIG_BT_SCO=m CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m diff --git a/trunk/arch/mips/configs/nlm_xlr_defconfig b/trunk/arch/mips/configs/nlm_xlr_defconfig deleted file mode 100644 index e4b399fdaa61..000000000000 --- a/trunk/arch/mips/configs/nlm_xlr_defconfig +++ /dev/null @@ -1,574 +0,0 @@ -CONFIG_NLM_XLR_BOARD=y -CONFIG_HIGHMEM=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 -CONFIG_SMP=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_PREEMPT_VOLUNTARY=y -CONFIG_KEXEC=y -CONFIG_EXPERIMENTAL=y -CONFIG_CROSS_COMPILE="mips64-unknown-linux-gnu-" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_AUDIT=y -CONFIG_NAMESPACES=y -CONFIG_SCHED_AUTOGROUP=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="usr/dev_file_list usr/rootfs" -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_INITRAMFS_COMPRESSION_GZIP=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_EXPERT=y -CONFIG_KALLSYMS_ALL=y -# CONFIG_ELF_CORE is not set -# CONFIG_PCSPKR_PLATFORM is not set -# CONFIG_PERF_EVENTS is not set -# CONFIG_COMPAT_BRK is not set -CONFIG_PROFILING=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BINFMT_MISC=m -CONFIG_PM_RUNTIME=y -CONFIG_PM_DEBUG=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_NET_KEY=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_NET_IPIP=m -CONFIG_IP_MROUTE=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y -CONFIG_IPV6_PRIVACY=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_IPCOMP=m -CONFIG_INET6_XFRM_MODE_TRANSPORT=m -CONFIG_INET6_XFRM_MODE_TUNNEL=m -CONFIG_INET6_XFRM_MODE_BEET=m -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_NETLABEL=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_UDPLITE=m -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NETFILTER_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m -CONFIG_IP_VS_FTP=m -CONFIG_NF_CONNTRACK_IPV4=m -CONFIG_IP_NF_QUEUE=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m -CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m -CONFIG_IP6_NF_QUEUE=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_TARGET_LOG=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP6_NF_SECURITY=m -CONFIG_DECNET_NF_GRABULATOR=m -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_ULOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -CONFIG_IP_DCCP=m -CONFIG_RDS=m -CONFIG_RDS_TCP=m -CONFIG_TIPC=m -CONFIG_ATM=m -CONFIG_ATM_CLIP=m -CONFIG_ATM_LANE=m -CONFIG_ATM_MPOA=m -CONFIG_ATM_BR2684=m -CONFIG_BRIDGE=m -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_DECNET=m -CONFIG_LLC2=m -CONFIG_IPX=m -CONFIG_ATALK=m -CONFIG_DEV_APPLETALK=m -CONFIG_IPDDP=m -CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y -CONFIG_X25=m -CONFIG_LAPB=m -CONFIG_ECONET=m -CONFIG_ECONET_AUNUDP=y -CONFIG_ECONET_NATIVE=y -CONFIG_WAN_ROUTER=m -CONFIG_PHONET=m -CONFIG_IEEE802154=m -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_ATM=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_CMP=m -CONFIG_NET_EMATCH_NBYTE=m -CONFIG_NET_EMATCH_U32=m -CONFIG_NET_EMATCH_META=m -CONFIG_NET_EMATCH_TEXT=m -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_DCB=y -CONFIG_NET_PKTGEN=m -# CONFIG_WIRELESS is not set -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_STANDALONE is not set -CONFIG_CONNECTOR=y -CONFIG_MTD=m -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_OSD=m -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=65536 -CONFIG_CDROM_PKTCDVD=y -CONFIG_MISC_DEVICES=y -CONFIG_RAID_ATTRS=m -CONFIG_SCSI=y -CONFIG_SCSI_TGT=m -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=m -CONFIG_CHR_DEV_OSST=m -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_MULTI_LUN=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_TGT_ATTRS=y -CONFIG_SCSI_SAS_LIBSAS=m -CONFIG_SCSI_SRP_ATTRS=m -CONFIG_SCSI_SRP_TGT_ATTRS=y -CONFIG_ISCSI_TCP=m -CONFIG_LIBFCOE=m -CONFIG_SCSI_DEBUG=m -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=m -CONFIG_SCSI_DH_HP_SW=m -CONFIG_SCSI_DH_EMC=m -CONFIG_SCSI_DH_ALUA=m -CONFIG_SCSI_OSD_INITIATOR=m -CONFIG_SCSI_OSD_ULD=m -# CONFIG_INPUT_MOUSEDEV is not set -CONFIG_INPUT_EVDEV=y -CONFIG_INPUT_EVBUG=m -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_SERIO_I8042 is not set -CONFIG_SERIO_SERPORT=m -CONFIG_SERIO_LIBPS2=y -CONFIG_SERIO_RAW=m -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y -CONFIG_LEGACY_PTY_COUNT=0 -CONFIG_SERIAL_NONSTANDARD=y -CONFIG_N_HDLC=m -# CONFIG_DEVKMEM is not set -CONFIG_STALDRV=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_NR_UARTS=48 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -CONFIG_SERIAL_8250_RSA=y -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=m -CONFIG_RAW_DRIVER=m -# CONFIG_HWMON is not set -# CONFIG_VGA_CONSOLE is not set -# CONFIG_HID_SUPPORT is not set -# CONFIG_USB_SUPPORT is not set -CONFIG_UIO=y -CONFIG_UIO_PDRV=m -CONFIG_UIO_PDRV_GENIRQ=m -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_GFS2_FS=m -CONFIG_GFS2_FS_LOCKING_DLM=y -CONFIG_OCFS2_FS=m -CONFIG_BTRFS_FS=m -CONFIG_BTRFS_FS_POSIX_ACL=y -CONFIG_NILFS2_FS=m -CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_PRINT_QUOTA_WARNING is not set -CONFIG_QFMT_V1=m -CONFIG_QFMT_V2=m -CONFIG_AUTOFS4_FS=m -CONFIG_FUSE_FS=y -CONFIG_CUSE=m -CONFIG_FSCACHE=m -CONFIG_FSCACHE_STATS=y -CONFIG_FSCACHE_HISTOGRAM=y -CONFIG_CACHEFILES=m -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_NTFS_FS=m -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_CONFIGFS_FS=y -CONFIG_ADFS_FS=m -CONFIG_AFFS_FS=m -CONFIG_ECRYPT_FS=y -CONFIG_HFS_FS=m -CONFIG_HFSPLUS_FS=m -CONFIG_BEFS_FS=m -CONFIG_BFS_FS=m -CONFIG_EFS_FS=m -CONFIG_CRAMFS=m -CONFIG_SQUASHFS=m -CONFIG_VXFS_FS=m -CONFIG_MINIX_FS=m -CONFIG_OMFS_FS=m -CONFIG_HPFS_FS=m -CONFIG_QNX4FS_FS=m -CONFIG_ROMFS_FS=m -CONFIG_SYSV_FS=m -CONFIG_UFS_FS=m -CONFIG_EXOFS_FS=m -CONFIG_NFS_FS=m -CONFIG_NFS_V3=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -CONFIG_NFS_FSCACHE=y -CONFIG_NFSD=m -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -CONFIG_CIFS=m -CONFIG_CIFS_WEAK_PW_HASH=y -CONFIG_CIFS_UPCALL=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -CONFIG_CIFS_DFS_UPCALL=y -CONFIG_CIFS_EXPERIMENTAL=y -CONFIG_NCP_FS=m -CONFIG_NCPFS_PACKET_SIGNING=y -CONFIG_NCPFS_IOCTL_LOCKING=y -CONFIG_NCPFS_STRONG=y -CONFIG_NCPFS_NFS_NS=y -CONFIG_NCPFS_OS2_NS=y -CONFIG_NCPFS_NLS=y -CONFIG_NCPFS_EXTRAS=y -CONFIG_CODA_FS=m -CONFIG_AFS_FS=m -CONFIG_PARTITION_ADVANCED=y -CONFIG_ACORN_PARTITION=y -CONFIG_ACORN_PARTITION_ICS=y -CONFIG_ACORN_PARTITION_RISCIX=y -CONFIG_OSF_PARTITION=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_LDM_PARTITION=y -CONFIG_SGI_PARTITION=y -CONFIG_ULTRIX_PARTITION=y -CONFIG_SUN_PARTITION=y -CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y -CONFIG_SYSV68_PARTITION=y -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="cp437" -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_PRINTK_TIME=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_UNUSED_SYMBOLS=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_SCHEDSTATS=y -CONFIG_TIMER_STATS=y -CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_SCHED_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_KGDB=y -CONFIG_SECURITY=y -CONFIG_SECURITY_NETWORK=y -CONFIG_LSM_MMAP_MIN_ADDR=0 -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 -CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_SECURITY_SMACK=y -CONFIG_SECURITY_TOMOYO=y -CONFIG_CRYPTO_NULL=m -CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_CTS=m -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=m -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_ZLIB=m -CONFIG_CRYPTO_LZO=m -CONFIG_CRC_CCITT=m -CONFIG_CRC7=m diff --git a/trunk/arch/mips/include/asm/cache.h b/trunk/arch/mips/include/asm/cache.h index b4db69fbc40c..650ac9ba734c 100644 --- a/trunk/arch/mips/include/asm/cache.h +++ b/trunk/arch/mips/include/asm/cache.h @@ -17,6 +17,6 @@ #define SMP_CACHE_SHIFT L1_CACHE_SHIFT #define SMP_CACHE_BYTES L1_CACHE_BYTES -#define __read_mostly __attribute__((__section__(".data..read_mostly"))) +#define __read_mostly __attribute__((__section__(".data.read_mostly"))) #endif /* _ASM_CACHE_H */ diff --git a/trunk/arch/mips/include/asm/cevt-r4k.h b/trunk/arch/mips/include/asm/cevt-r4k.h index 65f9bdd02f1f..fa4328f9124f 100644 --- a/trunk/arch/mips/include/asm/cevt-r4k.h +++ b/trunk/arch/mips/include/asm/cevt-r4k.h @@ -14,9 +14,6 @@ #ifndef __ASM_CEVT_R4K_H #define __ASM_CEVT_R4K_H -#include -#include - DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); void mips_event_handler(struct clock_event_device *dev); diff --git a/trunk/arch/mips/include/asm/cpu.h b/trunk/arch/mips/include/asm/cpu.h index 34c0d3cb116f..86877539c6e8 100644 --- a/trunk/arch/mips/include/asm/cpu.h +++ b/trunk/arch/mips/include/asm/cpu.h @@ -33,7 +33,6 @@ #define PRID_COMP_TOSHIBA 0x070000 #define PRID_COMP_LSI 0x080000 #define PRID_COMP_LEXRA 0x0b0000 -#define PRID_COMP_NETLOGIC 0x0c0000 #define PRID_COMP_CAVIUM 0x0d0000 #define PRID_COMP_INGENIC 0xd00000 @@ -142,31 +141,6 @@ #define PRID_IMP_JZRISC 0x0200 -/* - * These are the PRID's for when 23:16 == PRID_COMP_NETLOGIC - */ -#define PRID_IMP_NETLOGIC_XLR732 0x0000 -#define PRID_IMP_NETLOGIC_XLR716 0x0200 -#define PRID_IMP_NETLOGIC_XLR532 0x0900 -#define PRID_IMP_NETLOGIC_XLR308 0x0600 -#define PRID_IMP_NETLOGIC_XLR532C 0x0800 -#define PRID_IMP_NETLOGIC_XLR516C 0x0a00 -#define PRID_IMP_NETLOGIC_XLR508C 0x0b00 -#define PRID_IMP_NETLOGIC_XLR308C 0x0f00 -#define PRID_IMP_NETLOGIC_XLS608 0x8000 -#define PRID_IMP_NETLOGIC_XLS408 0x8800 -#define PRID_IMP_NETLOGIC_XLS404 0x8c00 -#define PRID_IMP_NETLOGIC_XLS208 0x8e00 -#define PRID_IMP_NETLOGIC_XLS204 0x8f00 -#define PRID_IMP_NETLOGIC_XLS108 0xce00 -#define PRID_IMP_NETLOGIC_XLS104 0xcf00 -#define PRID_IMP_NETLOGIC_XLS616B 0x4000 -#define PRID_IMP_NETLOGIC_XLS608B 0x4a00 -#define PRID_IMP_NETLOGIC_XLS416B 0x4400 -#define PRID_IMP_NETLOGIC_XLS412B 0x4c00 -#define PRID_IMP_NETLOGIC_XLS408B 0x4e00 -#define PRID_IMP_NETLOGIC_XLS404B 0x4f00 - /* * Definitions for 7:0 on legacy processors */ @@ -260,7 +234,6 @@ enum cpu_type_enum { */ CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2, - CPU_XLR, CPU_LAST }; diff --git a/trunk/arch/mips/include/asm/dma-mapping.h b/trunk/arch/mips/include/asm/dma-mapping.h index 7aa37ddfca4b..655f849bd08d 100644 --- a/trunk/arch/mips/include/asm/dma-mapping.h +++ b/trunk/arch/mips/include/asm/dma-mapping.h @@ -5,9 +5,7 @@ #include #include -#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */ #include -#endif extern struct dma_map_ops *mips_dma_map_ops; diff --git a/trunk/arch/mips/include/asm/hugetlb.h b/trunk/arch/mips/include/asm/hugetlb.h index c565b7c3f0b5..f5e856015329 100644 --- a/trunk/arch/mips/include/asm/hugetlb.h +++ b/trunk/arch/mips/include/asm/hugetlb.h @@ -70,7 +70,6 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { - flush_tlb_mm(vma->vm_mm); } static inline int huge_pte_none(pte_t pte) diff --git a/trunk/arch/mips/include/asm/i8253.h b/trunk/arch/mips/include/asm/i8253.h index 9ad011366f73..48bb82372994 100644 --- a/trunk/arch/mips/include/asm/i8253.h +++ b/trunk/arch/mips/include/asm/i8253.h @@ -12,13 +12,8 @@ #define PIT_CH0 0x40 #define PIT_CH2 0x42 -#define PIT_LATCH LATCH - extern raw_spinlock_t i8253_lock; extern void setup_pit_timer(void); -#define inb_pit inb_p -#define outb_pit outb_p - #endif /* __ASM_I8253_H */ diff --git a/trunk/arch/mips/include/asm/jump_label.h b/trunk/arch/mips/include/asm/jump_label.h index 1881b316ca45..7622ccf75076 100644 --- a/trunk/arch/mips/include/asm/jump_label.h +++ b/trunk/arch/mips/include/asm/jump_label.h @@ -20,18 +20,16 @@ #define WORD_INSN ".word" #endif -static __always_inline bool arch_static_branch(struct jump_label_key *key) -{ - asm goto("1:\tnop\n\t" - "nop\n\t" - ".pushsection __jump_table, \"aw\"\n\t" - WORD_INSN " 1b, %l[l_yes], %0\n\t" - ".popsection\n\t" - : : "i" (key) : : l_yes); - return false; -l_yes: - return true; -} +#define JUMP_LABEL(key, label) \ + do { \ + asm goto("1:\tnop\n\t" \ + "nop\n\t" \ + ".pushsection __jump_table, \"a\"\n\t" \ + WORD_INSN " 1b, %l[" #label "], %0\n\t" \ + ".popsection\n\t" \ + : : "i" (key) : : label); \ + } while (0) + #endif /* __KERNEL__ */ diff --git a/trunk/arch/mips/include/asm/mach-au1x00/au1000.h b/trunk/arch/mips/include/asm/mach-au1x00/au1000.h index f260ebed713b..a6976619160a 100644 --- a/trunk/arch/mips/include/asm/mach-au1x00/au1000.h +++ b/trunk/arch/mips/include/asm/mach-au1x00/au1000.h @@ -161,45 +161,6 @@ static inline int alchemy_get_cputype(void) return ALCHEMY_CPU_UNKNOWN; } -/* return number of uarts on a given cputype */ -static inline int alchemy_get_uarts(int type) -{ - switch (type) { - case ALCHEMY_CPU_AU1000: - return 4; - case ALCHEMY_CPU_AU1500: - case ALCHEMY_CPU_AU1200: - return 2; - case ALCHEMY_CPU_AU1100: - case ALCHEMY_CPU_AU1550: - return 3; - } - return 0; -} - -/* enable an UART block if it isn't already */ -static inline void alchemy_uart_enable(u32 uart_phys) -{ - void __iomem *addr = (void __iomem *)KSEG1ADDR(uart_phys); - - /* reset, enable clock, deassert reset */ - if ((__raw_readl(addr + 0x100) & 3) != 3) { - __raw_writel(0, addr + 0x100); - wmb(); - __raw_writel(1, addr + 0x100); - wmb(); - } - __raw_writel(3, addr + 0x100); - wmb(); -} - -static inline void alchemy_uart_disable(u32 uart_phys) -{ - void __iomem *addr = (void __iomem *)KSEG1ADDR(uart_phys); - __raw_writel(0, addr + 0x100); /* UART_MOD_CNTRL */ - wmb(); -} - static inline void alchemy_uart_putchar(u32 uart_phys, u8 c) { void __iomem *base = (void __iomem *)KSEG1ADDR(uart_phys); @@ -219,20 +180,6 @@ static inline void alchemy_uart_putchar(u32 uart_phys, u8 c) wmb(); } -/* return number of ethernet MACs on a given cputype */ -static inline int alchemy_get_macs(int type) -{ - switch (type) { - case ALCHEMY_CPU_AU1000: - case ALCHEMY_CPU_AU1500: - case ALCHEMY_CPU_AU1550: - return 2; - case ALCHEMY_CPU_AU1100: - return 1; - } - return 0; -} - /* arch/mips/au1000/common/clocks.c */ extern void set_au1x00_speed(unsigned int new_freq); extern unsigned int get_au1x00_speed(void); @@ -683,42 +630,38 @@ enum soc_au1200_ints { /* * Physical base addresses for integrated peripherals - * 0..au1000 1..au1500 2..au1100 3..au1550 4..au1200 */ -#define AU1000_AC97_PHYS_ADDR 0x10000000 /* 012 */ -#define AU1000_USBD_PHYS_ADDR 0x10200000 /* 0123 */ -#define AU1000_IC0_PHYS_ADDR 0x10400000 /* 01234 */ -#define AU1000_MAC0_PHYS_ADDR 0x10500000 /* 023 */ -#define AU1000_MAC1_PHYS_ADDR 0x10510000 /* 023 */ -#define AU1000_MACEN_PHYS_ADDR 0x10520000 /* 023 */ -#define AU1100_SD0_PHYS_ADDR 0x10600000 /* 24 */ -#define AU1100_SD1_PHYS_ADDR 0x10680000 /* 24 */ -#define AU1000_I2S_PHYS_ADDR 0x11000000 /* 02 */ -#define AU1500_MAC0_PHYS_ADDR 0x11500000 /* 1 */ -#define AU1500_MAC1_PHYS_ADDR 0x11510000 /* 1 */ -#define AU1500_MACEN_PHYS_ADDR 0x11520000 /* 1 */ -#define AU1000_UART0_PHYS_ADDR 0x11100000 /* 01234 */ -#define AU1000_UART1_PHYS_ADDR 0x11200000 /* 0234 */ -#define AU1000_UART2_PHYS_ADDR 0x11300000 /* 0 */ -#define AU1000_UART3_PHYS_ADDR 0x11400000 /* 0123 */ -#define AU1500_GPIO2_PHYS_ADDR 0x11700000 /* 1234 */ -#define AU1000_IC1_PHYS_ADDR 0x11800000 /* 01234 */ -#define AU1000_SYS_PHYS_ADDR 0x11900000 /* 01234 */ -#define AU1000_DMA_PHYS_ADDR 0x14002000 /* 012 */ -#define AU1550_DBDMA_PHYS_ADDR 0x14002000 /* 34 */ -#define AU1550_DBDMA_CONF_PHYS_ADDR 0x14003000 /* 34 */ -#define AU1000_MACDMA0_PHYS_ADDR 0x14004000 /* 0123 */ -#define AU1000_MACDMA1_PHYS_ADDR 0x14004200 /* 0123 */ - - #ifdef CONFIG_SOC_AU1000 #define MEM_PHYS_ADDR 0x14000000 #define STATIC_MEM_PHYS_ADDR 0x14001000 +#define DMA0_PHYS_ADDR 0x14002000 +#define DMA1_PHYS_ADDR 0x14002100 +#define DMA2_PHYS_ADDR 0x14002200 +#define DMA3_PHYS_ADDR 0x14002300 +#define DMA4_PHYS_ADDR 0x14002400 +#define DMA5_PHYS_ADDR 0x14002500 +#define DMA6_PHYS_ADDR 0x14002600 +#define DMA7_PHYS_ADDR 0x14002700 +#define IC0_PHYS_ADDR 0x10400000 +#define IC1_PHYS_ADDR 0x11800000 +#define AC97_PHYS_ADDR 0x10000000 #define USBH_PHYS_ADDR 0x10100000 +#define USBD_PHYS_ADDR 0x10200000 #define IRDA_PHYS_ADDR 0x10300000 +#define MAC0_PHYS_ADDR 0x10500000 +#define MAC1_PHYS_ADDR 0x10510000 +#define MACEN_PHYS_ADDR 0x10520000 +#define MACDMA0_PHYS_ADDR 0x14004000 +#define MACDMA1_PHYS_ADDR 0x14004200 +#define I2S_PHYS_ADDR 0x11000000 +#define UART0_PHYS_ADDR 0x11100000 +#define UART1_PHYS_ADDR 0x11200000 +#define UART2_PHYS_ADDR 0x11300000 +#define UART3_PHYS_ADDR 0x11400000 #define SSI0_PHYS_ADDR 0x11600000 #define SSI1_PHYS_ADDR 0x11680000 +#define SYS_PHYS_ADDR 0x11900000 #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL #define PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL @@ -729,8 +672,30 @@ enum soc_au1200_ints { #ifdef CONFIG_SOC_AU1500 #define MEM_PHYS_ADDR 0x14000000 #define STATIC_MEM_PHYS_ADDR 0x14001000 +#define DMA0_PHYS_ADDR 0x14002000 +#define DMA1_PHYS_ADDR 0x14002100 +#define DMA2_PHYS_ADDR 0x14002200 +#define DMA3_PHYS_ADDR 0x14002300 +#define DMA4_PHYS_ADDR 0x14002400 +#define DMA5_PHYS_ADDR 0x14002500 +#define DMA6_PHYS_ADDR 0x14002600 +#define DMA7_PHYS_ADDR 0x14002700 +#define IC0_PHYS_ADDR 0x10400000 +#define IC1_PHYS_ADDR 0x11800000 +#define AC97_PHYS_ADDR 0x10000000 #define USBH_PHYS_ADDR 0x10100000 +#define USBD_PHYS_ADDR 0x10200000 #define PCI_PHYS_ADDR 0x14005000 +#define MAC0_PHYS_ADDR 0x11500000 +#define MAC1_PHYS_ADDR 0x11510000 +#define MACEN_PHYS_ADDR 0x11520000 +#define MACDMA0_PHYS_ADDR 0x14004000 +#define MACDMA1_PHYS_ADDR 0x14004200 +#define I2S_PHYS_ADDR 0x11000000 +#define UART0_PHYS_ADDR 0x11100000 +#define UART3_PHYS_ADDR 0x11400000 +#define GPIO2_PHYS_ADDR 0x11700000 +#define SYS_PHYS_ADDR 0x11900000 #define PCI_MEM_PHYS_ADDR 0x400000000ULL #define PCI_IO_PHYS_ADDR 0x500000000ULL #define PCI_CONFIG0_PHYS_ADDR 0x600000000ULL @@ -745,10 +710,34 @@ enum soc_au1200_ints { #ifdef CONFIG_SOC_AU1100 #define MEM_PHYS_ADDR 0x14000000 #define STATIC_MEM_PHYS_ADDR 0x14001000 +#define DMA0_PHYS_ADDR 0x14002000 +#define DMA1_PHYS_ADDR 0x14002100 +#define DMA2_PHYS_ADDR 0x14002200 +#define DMA3_PHYS_ADDR 0x14002300 +#define DMA4_PHYS_ADDR 0x14002400 +#define DMA5_PHYS_ADDR 0x14002500 +#define DMA6_PHYS_ADDR 0x14002600 +#define DMA7_PHYS_ADDR 0x14002700 +#define IC0_PHYS_ADDR 0x10400000 +#define SD0_PHYS_ADDR 0x10600000 +#define SD1_PHYS_ADDR 0x10680000 +#define IC1_PHYS_ADDR 0x11800000 +#define AC97_PHYS_ADDR 0x10000000 #define USBH_PHYS_ADDR 0x10100000 +#define USBD_PHYS_ADDR 0x10200000 #define IRDA_PHYS_ADDR 0x10300000 +#define MAC0_PHYS_ADDR 0x10500000 +#define MACEN_PHYS_ADDR 0x10520000 +#define MACDMA0_PHYS_ADDR 0x14004000 +#define MACDMA1_PHYS_ADDR 0x14004200 +#define I2S_PHYS_ADDR 0x11000000 +#define UART0_PHYS_ADDR 0x11100000 +#define UART1_PHYS_ADDR 0x11200000 +#define UART3_PHYS_ADDR 0x11400000 #define SSI0_PHYS_ADDR 0x11600000 #define SSI1_PHYS_ADDR 0x11680000 +#define GPIO2_PHYS_ADDR 0x11700000 +#define SYS_PHYS_ADDR 0x11900000 #define LCD_PHYS_ADDR 0x15000000 #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL @@ -760,8 +749,22 @@ enum soc_au1200_ints { #ifdef CONFIG_SOC_AU1550 #define MEM_PHYS_ADDR 0x14000000 #define STATIC_MEM_PHYS_ADDR 0x14001000 +#define IC0_PHYS_ADDR 0x10400000 +#define IC1_PHYS_ADDR 0x11800000 #define USBH_PHYS_ADDR 0x14020000 +#define USBD_PHYS_ADDR 0x10200000 #define PCI_PHYS_ADDR 0x14005000 +#define MAC0_PHYS_ADDR 0x10500000 +#define MAC1_PHYS_ADDR 0x10510000 +#define MACEN_PHYS_ADDR 0x10520000 +#define MACDMA0_PHYS_ADDR 0x14004000 +#define MACDMA1_PHYS_ADDR 0x14004200 +#define UART0_PHYS_ADDR 0x11100000 +#define UART1_PHYS_ADDR 0x11200000 +#define UART3_PHYS_ADDR 0x11400000 +#define GPIO2_PHYS_ADDR 0x11700000 +#define SYS_PHYS_ADDR 0x11900000 +#define DDMA_PHYS_ADDR 0x14002000 #define PE_PHYS_ADDR 0x14008000 #define PSC0_PHYS_ADDR 0x11A00000 #define PSC1_PHYS_ADDR 0x11B00000 @@ -783,10 +786,19 @@ enum soc_au1200_ints { #define STATIC_MEM_PHYS_ADDR 0x14001000 #define AES_PHYS_ADDR 0x10300000 #define CIM_PHYS_ADDR 0x14004000 +#define IC0_PHYS_ADDR 0x10400000 +#define IC1_PHYS_ADDR 0x11800000 #define USBM_PHYS_ADDR 0x14020000 #define USBH_PHYS_ADDR 0x14020100 +#define UART0_PHYS_ADDR 0x11100000 +#define UART1_PHYS_ADDR 0x11200000 +#define GPIO2_PHYS_ADDR 0x11700000 +#define SYS_PHYS_ADDR 0x11900000 +#define DDMA_PHYS_ADDR 0x14002000 #define PSC0_PHYS_ADDR 0x11A00000 #define PSC1_PHYS_ADDR 0x11B00000 +#define SD0_PHYS_ADDR 0x10600000 +#define SD1_PHYS_ADDR 0x10680000 #define LCD_PHYS_ADDR 0x15000000 #define SWCNT_PHYS_ADDR 0x1110010C #define MAEFE_PHYS_ADDR 0x14012000 @@ -823,43 +835,183 @@ enum soc_au1200_ints { #endif +/* Interrupt Controller register offsets */ +#define IC_CFG0RD 0x40 +#define IC_CFG0SET 0x40 +#define IC_CFG0CLR 0x44 +#define IC_CFG1RD 0x48 +#define IC_CFG1SET 0x48 +#define IC_CFG1CLR 0x4C +#define IC_CFG2RD 0x50 +#define IC_CFG2SET 0x50 +#define IC_CFG2CLR 0x54 +#define IC_REQ0INT 0x54 +#define IC_SRCRD 0x58 +#define IC_SRCSET 0x58 +#define IC_SRCCLR 0x5C +#define IC_REQ1INT 0x5C +#define IC_ASSIGNRD 0x60 +#define IC_ASSIGNSET 0x60 +#define IC_ASSIGNCLR 0x64 +#define IC_WAKERD 0x68 +#define IC_WAKESET 0x68 +#define IC_WAKECLR 0x6C +#define IC_MASKRD 0x70 +#define IC_MASKSET 0x70 +#define IC_MASKCLR 0x74 +#define IC_RISINGRD 0x78 +#define IC_RISINGCLR 0x78 +#define IC_FALLINGRD 0x7C +#define IC_FALLINGCLR 0x7C +#define IC_TESTBIT 0x80 + + +/* Interrupt Controller 0 */ +#define IC0_CFG0RD 0xB0400040 +#define IC0_CFG0SET 0xB0400040 +#define IC0_CFG0CLR 0xB0400044 + +#define IC0_CFG1RD 0xB0400048 +#define IC0_CFG1SET 0xB0400048 +#define IC0_CFG1CLR 0xB040004C + +#define IC0_CFG2RD 0xB0400050 +#define IC0_CFG2SET 0xB0400050 +#define IC0_CFG2CLR 0xB0400054 + +#define IC0_REQ0INT 0xB0400054 +#define IC0_SRCRD 0xB0400058 +#define IC0_SRCSET 0xB0400058 +#define IC0_SRCCLR 0xB040005C +#define IC0_REQ1INT 0xB040005C + +#define IC0_ASSIGNRD 0xB0400060 +#define IC0_ASSIGNSET 0xB0400060 +#define IC0_ASSIGNCLR 0xB0400064 + +#define IC0_WAKERD 0xB0400068 +#define IC0_WAKESET 0xB0400068 +#define IC0_WAKECLR 0xB040006C + +#define IC0_MASKRD 0xB0400070 +#define IC0_MASKSET 0xB0400070 +#define IC0_MASKCLR 0xB0400074 + +#define IC0_RISINGRD 0xB0400078 +#define IC0_RISINGCLR 0xB0400078 +#define IC0_FALLINGRD 0xB040007C +#define IC0_FALLINGCLR 0xB040007C + +#define IC0_TESTBIT 0xB0400080 + +/* Interrupt Controller 1 */ +#define IC1_CFG0RD 0xB1800040 +#define IC1_CFG0SET 0xB1800040 +#define IC1_CFG0CLR 0xB1800044 + +#define IC1_CFG1RD 0xB1800048 +#define IC1_CFG1SET 0xB1800048 +#define IC1_CFG1CLR 0xB180004C + +#define IC1_CFG2RD 0xB1800050 +#define IC1_CFG2SET 0xB1800050 +#define IC1_CFG2CLR 0xB1800054 + +#define IC1_REQ0INT 0xB1800054 +#define IC1_SRCRD 0xB1800058 +#define IC1_SRCSET 0xB1800058 +#define IC1_SRCCLR 0xB180005C +#define IC1_REQ1INT 0xB180005C + +#define IC1_ASSIGNRD 0xB1800060 +#define IC1_ASSIGNSET 0xB1800060 +#define IC1_ASSIGNCLR 0xB1800064 + +#define IC1_WAKERD 0xB1800068 +#define IC1_WAKESET 0xB1800068 +#define IC1_WAKECLR 0xB180006C + +#define IC1_MASKRD 0xB1800070 +#define IC1_MASKSET 0xB1800070 +#define IC1_MASKCLR 0xB1800074 + +#define IC1_RISINGRD 0xB1800078 +#define IC1_RISINGCLR 0xB1800078 +#define IC1_FALLINGRD 0xB180007C +#define IC1_FALLINGCLR 0xB180007C + +#define IC1_TESTBIT 0xB1800080 /* Au1000 */ #ifdef CONFIG_SOC_AU1000 +#define UART0_ADDR 0xB1100000 +#define UART3_ADDR 0xB1400000 + #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ #define USB_HOST_CONFIG 0xB017FFFC #define FOR_PLATFORM_C_USB_HOST_INT AU1000_USB_HOST_INT + +#define AU1000_ETH0_BASE 0xB0500000 +#define AU1000_ETH1_BASE 0xB0510000 +#define AU1000_MAC0_ENABLE 0xB0520000 +#define AU1000_MAC1_ENABLE 0xB0520004 +#define NUM_ETH_INTERFACES 2 #endif /* CONFIG_SOC_AU1000 */ /* Au1500 */ #ifdef CONFIG_SOC_AU1500 +#define UART0_ADDR 0xB1100000 +#define UART3_ADDR 0xB1400000 + #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ #define USB_HOST_CONFIG 0xB017fffc #define FOR_PLATFORM_C_USB_HOST_INT AU1500_USB_HOST_INT + +#define AU1500_ETH0_BASE 0xB1500000 +#define AU1500_ETH1_BASE 0xB1510000 +#define AU1500_MAC0_ENABLE 0xB1520000 +#define AU1500_MAC1_ENABLE 0xB1520004 +#define NUM_ETH_INTERFACES 2 #endif /* CONFIG_SOC_AU1500 */ /* Au1100 */ #ifdef CONFIG_SOC_AU1100 +#define UART0_ADDR 0xB1100000 +#define UART3_ADDR 0xB1400000 + #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ #define USB_HOST_CONFIG 0xB017FFFC #define FOR_PLATFORM_C_USB_HOST_INT AU1100_USB_HOST_INT + +#define AU1100_ETH0_BASE 0xB0500000 +#define AU1100_MAC0_ENABLE 0xB0520000 +#define NUM_ETH_INTERFACES 1 #endif /* CONFIG_SOC_AU1100 */ #ifdef CONFIG_SOC_AU1550 +#define UART0_ADDR 0xB1100000 #define USB_OHCI_BASE 0x14020000 /* phys addr for ioremap */ #define USB_OHCI_LEN 0x00060000 #define USB_HOST_CONFIG 0xB4027ffc #define FOR_PLATFORM_C_USB_HOST_INT AU1550_USB_HOST_INT + +#define AU1550_ETH0_BASE 0xB0500000 +#define AU1550_ETH1_BASE 0xB0510000 +#define AU1550_MAC0_ENABLE 0xB0520000 +#define AU1550_MAC1_ENABLE 0xB0520004 +#define NUM_ETH_INTERFACES 2 #endif /* CONFIG_SOC_AU1550 */ #ifdef CONFIG_SOC_AU1200 +#define UART0_ADDR 0xB1100000 + #define USB_UOC_BASE 0x14020020 #define USB_UOC_LEN 0x20 #define USB_OHCI_BASE 0x14020100 @@ -1352,6 +1504,22 @@ enum soc_au1200_ints { #define SYS_PINFUNC_S1B (1 << 2) #endif +#define SYS_TRIOUTRD 0xB1900100 +#define SYS_TRIOUTCLR 0xB1900100 +#define SYS_OUTPUTRD 0xB1900108 +#define SYS_OUTPUTSET 0xB1900108 +#define SYS_OUTPUTCLR 0xB190010C +#define SYS_PINSTATERD 0xB1900110 +#define SYS_PININPUTEN 0xB1900110 + +/* GPIO2, Au1500, Au1550 only */ +#define GPIO2_BASE 0xB1700000 +#define GPIO2_DIR (GPIO2_BASE + 0) +#define GPIO2_OUTPUT (GPIO2_BASE + 8) +#define GPIO2_PINSTATE (GPIO2_BASE + 0xC) +#define GPIO2_INTENABLE (GPIO2_BASE + 0x10) +#define GPIO2_ENABLE (GPIO2_BASE + 0x14) + /* Power Management */ #define SYS_SCRATCH0 0xB1900018 #define SYS_SCRATCH1 0xB190001C @@ -1467,6 +1635,12 @@ enum soc_au1200_ints { # define AC97C_RS (1 << 1) # define AC97C_CE (1 << 0) +/* Secure Digital (SD) Controller */ +#define SD0_XMIT_FIFO 0xB0600000 +#define SD0_RECV_FIFO 0xB0600004 +#define SD1_XMIT_FIFO 0xB0680000 +#define SD1_RECV_FIFO 0xB0680004 + #if defined(CONFIG_SOC_AU1500) || defined(CONFIG_SOC_AU1550) /* Au1500 PCI Controller */ #define Au1500_CFG_BASE 0xB4005000 /* virtual, KSEG1 addr */ diff --git a/trunk/arch/mips/include/asm/mach-au1x00/au1000_dma.h b/trunk/arch/mips/include/asm/mach-au1x00/au1000_dma.h index 59f5b55b2200..c333b4e1cd44 100644 --- a/trunk/arch/mips/include/asm/mach-au1x00/au1000_dma.h +++ b/trunk/arch/mips/include/asm/mach-au1x00/au1000_dma.h @@ -37,6 +37,10 @@ #define NUM_AU1000_DMA_CHANNELS 8 +/* DMA Channel Base Addresses */ +#define DMA_CHANNEL_BASE 0xB4002000 +#define DMA_CHANNEL_LEN 0x00000100 + /* DMA Channel Register Offsets */ #define DMA_MODE_SET 0x00000000 #define DMA_MODE_READ DMA_MODE_SET diff --git a/trunk/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/trunk/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h index 2fdacfe85e23..c8a553a36ba4 100644 --- a/trunk/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h +++ b/trunk/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h @@ -37,6 +37,14 @@ #ifndef _LANGUAGE_ASSEMBLY +/* + * The DMA base addresses. + * The channels are every 256 bytes (0x0100) from the channel 0 base. + * Interrupt status/enable is bits 15:0 for channels 15 to zero. + */ +#define DDMA_GLOBAL_BASE 0xb4003000 +#define DDMA_CHANNEL_BASE 0xb4002000 + typedef volatile struct dbdma_global { u32 ddma_config; u32 ddma_intstat; diff --git a/trunk/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/trunk/arch/mips/include/asm/mach-au1x00/gpio-au1000.h index 1f41a522906d..62d2f136d941 100644 --- a/trunk/arch/mips/include/asm/mach-au1x00/gpio-au1000.h +++ b/trunk/arch/mips/include/asm/mach-au1x00/gpio-au1000.h @@ -24,23 +24,6 @@ #define MAKE_IRQ(intc, off) (AU1000_INTC##intc##_INT_BASE + (off)) -/* GPIO1 registers within SYS_ area */ -#define SYS_TRIOUTRD 0x100 -#define SYS_TRIOUTCLR 0x100 -#define SYS_OUTPUTRD 0x108 -#define SYS_OUTPUTSET 0x108 -#define SYS_OUTPUTCLR 0x10C -#define SYS_PINSTATERD 0x110 -#define SYS_PININPUTEN 0x110 - -/* register offsets within GPIO2 block */ -#define GPIO2_DIR 0x00 -#define GPIO2_OUTPUT 0x08 -#define GPIO2_PINSTATE 0x0C -#define GPIO2_INTENABLE 0x10 -#define GPIO2_ENABLE 0x14 - -struct gpio; static inline int au1000_gpio1_to_irq(int gpio) { @@ -217,26 +200,23 @@ static inline int au1200_irq_to_gpio(int irq) */ static inline void alchemy_gpio1_set_value(int gpio, int v) { - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); unsigned long r = v ? SYS_OUTPUTSET : SYS_OUTPUTCLR; - __raw_writel(mask, base + r); - wmb(); + au_writel(mask, r); + au_sync(); } static inline int alchemy_gpio1_get_value(int gpio) { - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); - return __raw_readl(base + SYS_PINSTATERD) & mask; + return au_readl(SYS_PINSTATERD) & mask; } static inline int alchemy_gpio1_direction_input(int gpio) { - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); - __raw_writel(mask, base + SYS_TRIOUTCLR); - wmb(); + au_writel(mask, SYS_TRIOUTCLR); + au_sync(); return 0; } @@ -277,31 +257,27 @@ static inline int alchemy_gpio1_to_irq(int gpio) */ static inline void __alchemy_gpio2_mod_dir(int gpio, int to_out) { - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); unsigned long mask = 1 << (gpio - ALCHEMY_GPIO2_BASE); - unsigned long d = __raw_readl(base + GPIO2_DIR); - + unsigned long d = au_readl(GPIO2_DIR); if (to_out) d |= mask; else d &= ~mask; - __raw_writel(d, base + GPIO2_DIR); - wmb(); + au_writel(d, GPIO2_DIR); + au_sync(); } static inline void alchemy_gpio2_set_value(int gpio, int v) { - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); unsigned long mask; mask = ((v) ? 0x00010001 : 0x00010000) << (gpio - ALCHEMY_GPIO2_BASE); - __raw_writel(mask, base + GPIO2_OUTPUT); - wmb(); + au_writel(mask, GPIO2_OUTPUT); + au_sync(); } static inline int alchemy_gpio2_get_value(int gpio) { - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); - return __raw_readl(base + GPIO2_PINSTATE) & (1 << (gpio - ALCHEMY_GPIO2_BASE)); + return au_readl(GPIO2_PINSTATE) & (1 << (gpio - ALCHEMY_GPIO2_BASE)); } static inline int alchemy_gpio2_direction_input(int gpio) @@ -353,23 +329,21 @@ static inline int alchemy_gpio2_to_irq(int gpio) */ static inline void alchemy_gpio1_input_enable(void) { - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); - __raw_writel(0, base + SYS_PININPUTEN); /* the write op is key */ - wmb(); + au_writel(0, SYS_PININPUTEN); /* the write op is key */ + au_sync(); } /* GPIO2 shared interrupts and control */ static inline void __alchemy_gpio2_mod_int(int gpio2, int en) { - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); - unsigned long r = __raw_readl(base + GPIO2_INTENABLE); + unsigned long r = au_readl(GPIO2_INTENABLE); if (en) r |= 1 << gpio2; else r &= ~(1 << gpio2); - __raw_writel(r, base + GPIO2_INTENABLE); - wmb(); + au_writel(r, GPIO2_INTENABLE); + au_sync(); } /** @@ -444,11 +418,10 @@ static inline void alchemy_gpio2_disable_int(int gpio2) */ static inline void alchemy_gpio2_enable(void) { - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); - __raw_writel(3, base + GPIO2_ENABLE); /* reset, clock enabled */ - wmb(); - __raw_writel(1, base + GPIO2_ENABLE); /* clock enabled */ - wmb(); + au_writel(3, GPIO2_ENABLE); /* reset, clock enabled */ + au_sync(); + au_writel(1, GPIO2_ENABLE); /* clock enabled */ + au_sync(); } /** @@ -458,9 +431,8 @@ static inline void alchemy_gpio2_enable(void) */ static inline void alchemy_gpio2_disable(void) { - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); - __raw_writel(2, base + GPIO2_ENABLE); /* reset, clock disabled */ - wmb(); + au_writel(2, GPIO2_ENABLE); /* reset, clock disabled */ + au_sync(); } /**********************************************************************/ @@ -584,16 +556,6 @@ static inline void gpio_set_value(int gpio, int v) alchemy_gpio_set_value(gpio, v); } -static inline int gpio_get_value_cansleep(unsigned gpio) -{ - return gpio_get_value(gpio); -} - -static inline void gpio_set_value_cansleep(unsigned gpio, int value) -{ - gpio_set_value(gpio, value); -} - static inline int gpio_is_valid(int gpio) { return alchemy_gpio_is_valid(gpio); @@ -619,50 +581,10 @@ static inline int gpio_request(unsigned gpio, const char *label) return 0; } -static inline int gpio_request_one(unsigned gpio, - unsigned long flags, const char *label) -{ - return 0; -} - -static inline int gpio_request_array(struct gpio *array, size_t num) -{ - return 0; -} - static inline void gpio_free(unsigned gpio) { } -static inline void gpio_free_array(struct gpio *array, size_t num) -{ -} - -static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) -{ - return -ENOSYS; -} - -static inline int gpio_export(unsigned gpio, bool direction_may_change) -{ - return -ENOSYS; -} - -static inline int gpio_export_link(struct device *dev, const char *name, - unsigned gpio) -{ - return -ENOSYS; -} - -static inline int gpio_sysfs_set_active_low(unsigned gpio, int value) -{ - return -ENOSYS; -} - -static inline void gpio_unexport(unsigned gpio) -{ -} - #endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ diff --git a/trunk/arch/mips/include/asm/mach-bcm47xx/nvram.h b/trunk/arch/mips/include/asm/mach-bcm47xx/nvram.h index 184d5ecb5f51..9759588ba3cf 100644 --- a/trunk/arch/mips/include/asm/mach-bcm47xx/nvram.h +++ b/trunk/arch/mips/include/asm/mach-bcm47xx/nvram.h @@ -39,16 +39,8 @@ extern int nvram_getenv(char *name, char *val, size_t val_len); static inline void nvram_parse_macaddr(char *buf, u8 *macaddr) { - if (strchr(buf, ':')) - sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], - &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4], - &macaddr[5]); - else if (strchr(buf, '-')) - sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0], - &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4], - &macaddr[5]); - else - printk(KERN_WARNING "Can not parse mac address: %s\n", buf); + sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], &macaddr[1], + &macaddr[2], &macaddr[3], &macaddr[4], &macaddr[5]); } #endif diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h index ed72e6a26b73..32978d32561a 100644 --- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h +++ b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h @@ -88,7 +88,7 @@ struct bcm_tag { char kernel_crc[CRC_LEN]; /* 228-235: Unused at present */ char reserved1[8]; - /* 236-239: CRC32 of header excluding last 20 bytes */ + /* 236-239: CRC32 of header excluding tagVersion */ char header_crc[CRC_LEN]; /* 240-255: Unused at present */ char reserved2[16]; diff --git a/trunk/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/trunk/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index dedef7d2b01f..0b2b5eb22e9b 100644 --- a/trunk/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/trunk/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h @@ -63,11 +63,6 @@ # CN30XX Disable instruction prefetching or v0, v0, 0x2000 skip: - # First clear off CvmCtl[IPPCI] bit and move the performance - # counters interrupt to IRQ 6 - li v1, ~(7 << 7) - and v0, v0, v1 - ori v0, v0, (6 << 7) # Write the cavium control register dmtc0 v0, CP0_CVMCTL_REG sync diff --git a/trunk/arch/mips/include/asm/mach-lantiq/lantiq.h b/trunk/arch/mips/include/asm/mach-lantiq/lantiq.h deleted file mode 100644 index ce2f02929d22..000000000000 --- a/trunk/arch/mips/include/asm/mach-lantiq/lantiq.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ -#ifndef _LANTIQ_H__ -#define _LANTIQ_H__ - -#include - -/* generic reg access functions */ -#define ltq_r32(reg) __raw_readl(reg) -#define ltq_w32(val, reg) __raw_writel(val, reg) -#define ltq_w32_mask(clear, set, reg) \ - ltq_w32((ltq_r32(reg) & ~(clear)) | (set), reg) -#define ltq_r8(reg) __raw_readb(reg) -#define ltq_w8(val, reg) __raw_writeb(val, reg) - -/* register access macros for EBU and CGU */ -#define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y)) -#define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x)) -#define ltq_cgu_w32(x, y) ltq_w32((x), ltq_cgu_membase + (y)) -#define ltq_cgu_r32(x) ltq_r32(ltq_cgu_membase + (x)) - -extern __iomem void *ltq_ebu_membase; -extern __iomem void *ltq_cgu_membase; - -extern unsigned int ltq_get_cpu_ver(void); -extern unsigned int ltq_get_soc_type(void); - -/* clock speeds */ -#define CLOCK_60M 60000000 -#define CLOCK_83M 83333333 -#define CLOCK_111M 111111111 -#define CLOCK_133M 133333333 -#define CLOCK_167M 166666667 -#define CLOCK_200M 200000000 -#define CLOCK_266M 266666666 -#define CLOCK_333M 333333333 -#define CLOCK_400M 400000000 - -/* spinlock all ebu i/o */ -extern spinlock_t ebu_lock; - -/* some irq helpers */ -extern void ltq_disable_irq(struct irq_data *data); -extern void ltq_mask_and_ack_irq(struct irq_data *data); -extern void ltq_enable_irq(struct irq_data *data); - -/* find out what caused the last cpu reset */ -extern int ltq_reset_cause(void); -#define LTQ_RST_CAUSE_WDTRST 0x20 - -#define IOPORT_RESOURCE_START 0x10000000 -#define IOPORT_RESOURCE_END 0xffffffff -#define IOMEM_RESOURCE_START 0x10000000 -#define IOMEM_RESOURCE_END 0xffffffff -#define LTQ_FLASH_START 0x10000000 -#define LTQ_FLASH_MAX 0x04000000 - -#endif diff --git a/trunk/arch/mips/include/asm/mach-lantiq/lantiq_platform.h b/trunk/arch/mips/include/asm/mach-lantiq/lantiq_platform.h deleted file mode 100644 index a305f1d0259e..000000000000 --- a/trunk/arch/mips/include/asm/mach-lantiq/lantiq_platform.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#ifndef _LANTIQ_PLATFORM_H__ -#define _LANTIQ_PLATFORM_H__ - -#include -#include - -/* struct used to pass info to the pci core */ -enum { - PCI_CLOCK_INT = 0, - PCI_CLOCK_EXT -}; - -#define PCI_EXIN0 0x0001 -#define PCI_EXIN1 0x0002 -#define PCI_EXIN2 0x0004 -#define PCI_EXIN3 0x0008 -#define PCI_EXIN4 0x0010 -#define PCI_EXIN5 0x0020 -#define PCI_EXIN_MAX 6 - -#define PCI_GNT1 0x0040 -#define PCI_GNT2 0x0080 -#define PCI_GNT3 0x0100 -#define PCI_GNT4 0x0200 - -#define PCI_REQ1 0x0400 -#define PCI_REQ2 0x0800 -#define PCI_REQ3 0x1000 -#define PCI_REQ4 0x2000 -#define PCI_REQ_SHIFT 10 -#define PCI_REQ_MASK 0xf - -struct ltq_pci_data { - int clock; - int gpio; - int irq[16]; -}; - -/* struct used to pass info to network drivers */ -struct ltq_eth_data { - struct sockaddr mac; - int mii_mode; -}; - -#endif diff --git a/trunk/arch/mips/include/asm/mach-lantiq/war.h b/trunk/arch/mips/include/asm/mach-lantiq/war.h deleted file mode 100644 index 01b08ef368d1..000000000000 --- a/trunk/arch/mips/include/asm/mach-lantiq/war.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - */ -#ifndef __ASM_MIPS_MACH_LANTIQ_WAR_H -#define __ASM_MIPS_MACH_LANTIQ_WAR_H - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 0 -#define R5432_CP0_INTERRUPT_WAR 0 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 0 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define RM9000_CDEX_SMP_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 0 -#define R10000_LLSC_WAR 0 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif diff --git a/trunk/arch/mips/include/asm/mach-lantiq/xway/irq.h b/trunk/arch/mips/include/asm/mach-lantiq/xway/irq.h deleted file mode 100644 index a1471d2dd0d2..000000000000 --- a/trunk/arch/mips/include/asm/mach-lantiq/xway/irq.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#ifndef __LANTIQ_IRQ_H -#define __LANTIQ_IRQ_H - -#include - -#define NR_IRQS 256 - -#include_next - -#endif diff --git a/trunk/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/trunk/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h deleted file mode 100644 index b4465a888e20..000000000000 --- a/trunk/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#ifndef _LANTIQ_XWAY_IRQ_H__ -#define _LANTIQ_XWAY_IRQ_H__ - -#define INT_NUM_IRQ0 8 -#define INT_NUM_IM0_IRL0 (INT_NUM_IRQ0 + 0) -#define INT_NUM_IM1_IRL0 (INT_NUM_IRQ0 + 32) -#define INT_NUM_IM2_IRL0 (INT_NUM_IRQ0 + 64) -#define INT_NUM_IM3_IRL0 (INT_NUM_IRQ0 + 96) -#define INT_NUM_IM4_IRL0 (INT_NUM_IRQ0 + 128) -#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) - -#define LTQ_ASC_TIR(x) (INT_NUM_IM3_IRL0 + (x * 8)) -#define LTQ_ASC_RIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 1) -#define LTQ_ASC_EIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 2) - -#define LTQ_ASC_ASE_TIR INT_NUM_IM2_IRL0 -#define LTQ_ASC_ASE_RIR (INT_NUM_IM2_IRL0 + 2) -#define LTQ_ASC_ASE_EIR (INT_NUM_IM2_IRL0 + 3) - -#define LTQ_SSC_TIR (INT_NUM_IM0_IRL0 + 15) -#define LTQ_SSC_RIR (INT_NUM_IM0_IRL0 + 14) -#define LTQ_SSC_EIR (INT_NUM_IM0_IRL0 + 16) - -#define LTQ_MEI_DYING_GASP_INT (INT_NUM_IM1_IRL0 + 21) -#define LTQ_MEI_INT (INT_NUM_IM1_IRL0 + 23) - -#define LTQ_TIMER6_INT (INT_NUM_IM1_IRL0 + 23) -#define LTQ_USB_INT (INT_NUM_IM1_IRL0 + 22) -#define LTQ_USB_OC_INT (INT_NUM_IM4_IRL0 + 23) - -#define MIPS_CPU_TIMER_IRQ 7 - -#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) -#define LTQ_DMA_CH1_INT (INT_NUM_IM2_IRL0 + 1) -#define LTQ_DMA_CH2_INT (INT_NUM_IM2_IRL0 + 2) -#define LTQ_DMA_CH3_INT (INT_NUM_IM2_IRL0 + 3) -#define LTQ_DMA_CH4_INT (INT_NUM_IM2_IRL0 + 4) -#define LTQ_DMA_CH5_INT (INT_NUM_IM2_IRL0 + 5) -#define LTQ_DMA_CH6_INT (INT_NUM_IM2_IRL0 + 6) -#define LTQ_DMA_CH7_INT (INT_NUM_IM2_IRL0 + 7) -#define LTQ_DMA_CH8_INT (INT_NUM_IM2_IRL0 + 8) -#define LTQ_DMA_CH9_INT (INT_NUM_IM2_IRL0 + 9) -#define LTQ_DMA_CH10_INT (INT_NUM_IM2_IRL0 + 10) -#define LTQ_DMA_CH11_INT (INT_NUM_IM2_IRL0 + 11) -#define LTQ_DMA_CH12_INT (INT_NUM_IM2_IRL0 + 25) -#define LTQ_DMA_CH13_INT (INT_NUM_IM2_IRL0 + 26) -#define LTQ_DMA_CH14_INT (INT_NUM_IM2_IRL0 + 27) -#define LTQ_DMA_CH15_INT (INT_NUM_IM2_IRL0 + 28) -#define LTQ_DMA_CH16_INT (INT_NUM_IM2_IRL0 + 29) -#define LTQ_DMA_CH17_INT (INT_NUM_IM2_IRL0 + 30) -#define LTQ_DMA_CH18_INT (INT_NUM_IM2_IRL0 + 16) -#define LTQ_DMA_CH19_INT (INT_NUM_IM2_IRL0 + 21) - -#define LTQ_PPE_MBOX_INT (INT_NUM_IM2_IRL0 + 24) - -#define INT_NUM_IM4_IRL14 (INT_NUM_IM4_IRL0 + 14) - -#endif diff --git a/trunk/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/trunk/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h deleted file mode 100644 index 8a3c6be669d2..000000000000 --- a/trunk/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#ifndef _LTQ_XWAY_H__ -#define _LTQ_XWAY_H__ - -#ifdef CONFIG_SOC_TYPE_XWAY - -#include - -/* Chip IDs */ -#define SOC_ID_DANUBE1 0x129 -#define SOC_ID_DANUBE2 0x12B -#define SOC_ID_TWINPASS 0x12D -#define SOC_ID_AMAZON_SE 0x152 -#define SOC_ID_ARX188 0x16C -#define SOC_ID_ARX168 0x16D -#define SOC_ID_ARX182 0x16F - -/* SoC Types */ -#define SOC_TYPE_DANUBE 0x01 -#define SOC_TYPE_TWINPASS 0x02 -#define SOC_TYPE_AR9 0x03 -#define SOC_TYPE_VR9 0x04 -#define SOC_TYPE_AMAZON_SE 0x05 - -/* ASC0/1 - serial port */ -#define LTQ_ASC0_BASE_ADDR 0x1E100400 -#define LTQ_ASC1_BASE_ADDR 0x1E100C00 -#define LTQ_ASC_SIZE 0x400 - -/* RCU - reset control unit */ -#define LTQ_RCU_BASE_ADDR 0x1F203000 -#define LTQ_RCU_SIZE 0x1000 - -/* GPTU - general purpose timer unit */ -#define LTQ_GPTU_BASE_ADDR 0x18000300 -#define LTQ_GPTU_SIZE 0x100 - -/* EBU - external bus unit */ -#define LTQ_EBU_GPIO_START 0x14000000 -#define LTQ_EBU_GPIO_SIZE 0x1000 - -#define LTQ_EBU_BASE_ADDR 0x1E105300 -#define LTQ_EBU_SIZE 0x100 - -#define LTQ_EBU_BUSCON0 0x0060 -#define LTQ_EBU_PCC_CON 0x0090 -#define LTQ_EBU_PCC_IEN 0x00A4 -#define LTQ_EBU_PCC_ISTAT 0x00A0 -#define LTQ_EBU_BUSCON1 0x0064 -#define LTQ_EBU_ADDRSEL1 0x0024 -#define EBU_WRDIS 0x80000000 - -/* CGU - clock generation unit */ -#define LTQ_CGU_BASE_ADDR 0x1F103000 -#define LTQ_CGU_SIZE 0x1000 - -/* ICU - interrupt control unit */ -#define LTQ_ICU_BASE_ADDR 0x1F880200 -#define LTQ_ICU_SIZE 0x100 - -/* EIU - external interrupt unit */ -#define LTQ_EIU_BASE_ADDR 0x1F101000 -#define LTQ_EIU_SIZE 0x1000 - -/* PMU - power management unit */ -#define LTQ_PMU_BASE_ADDR 0x1F102000 -#define LTQ_PMU_SIZE 0x1000 - -#define PMU_DMA 0x0020 -#define PMU_USB 0x8041 -#define PMU_LED 0x0800 -#define PMU_GPT 0x1000 -#define PMU_PPE 0x2000 -#define PMU_FPI 0x4000 -#define PMU_SWITCH 0x10000000 - -/* ETOP - ethernet */ -#define LTQ_ETOP_BASE_ADDR 0x1E180000 -#define LTQ_ETOP_SIZE 0x40000 - -/* DMA */ -#define LTQ_DMA_BASE_ADDR 0x1E104100 -#define LTQ_DMA_SIZE 0x800 - -/* PCI */ -#define PCI_CR_BASE_ADDR 0x1E105400 -#define PCI_CR_SIZE 0x400 - -/* WDT */ -#define LTQ_WDT_BASE_ADDR 0x1F8803F0 -#define LTQ_WDT_SIZE 0x10 - -/* STP - serial to parallel conversion unit */ -#define LTQ_STP_BASE_ADDR 0x1E100BB0 -#define LTQ_STP_SIZE 0x40 - -/* GPIO */ -#define LTQ_GPIO0_BASE_ADDR 0x1E100B10 -#define LTQ_GPIO1_BASE_ADDR 0x1E100B40 -#define LTQ_GPIO2_BASE_ADDR 0x1E100B70 -#define LTQ_GPIO_SIZE 0x30 - -/* SSC */ -#define LTQ_SSC_BASE_ADDR 0x1e100800 -#define LTQ_SSC_SIZE 0x100 - -/* MEI - dsl core */ -#define LTQ_MEI_BASE_ADDR 0x1E116000 - -/* DEU - data encryption unit */ -#define LTQ_DEU_BASE_ADDR 0x1E103100 - -/* MPS - multi processor unit (voice) */ -#define LTQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000) -#define LTQ_MPS_CHIPID ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344)) - -/* request a non-gpio and set the PIO config */ -extern int ltq_gpio_request(unsigned int pin, unsigned int alt0, - unsigned int alt1, unsigned int dir, const char *name); -extern void ltq_pmu_enable(unsigned int module); -extern void ltq_pmu_disable(unsigned int module); - -static inline int ltq_is_ar9(void) -{ - return (ltq_get_soc_type() == SOC_TYPE_AR9); -} - -static inline int ltq_is_vr9(void) -{ - return (ltq_get_soc_type() == SOC_TYPE_VR9); -} - -#endif /* CONFIG_SOC_TYPE_XWAY */ -#endif /* _LTQ_XWAY_H__ */ diff --git a/trunk/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/trunk/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h deleted file mode 100644 index 872943a4b90e..000000000000 --- a/trunk/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) 2011 John Crispin - */ - -#ifndef LTQ_DMA_H__ -#define LTQ_DMA_H__ - -#define LTQ_DESC_SIZE 0x08 /* each descriptor is 64bit */ -#define LTQ_DESC_NUM 0x40 /* 64 descriptors / channel */ - -#define LTQ_DMA_OWN BIT(31) /* owner bit */ -#define LTQ_DMA_C BIT(30) /* complete bit */ -#define LTQ_DMA_SOP BIT(29) /* start of packet */ -#define LTQ_DMA_EOP BIT(28) /* end of packet */ -#define LTQ_DMA_TX_OFFSET(x) ((x & 0x1f) << 23) /* data bytes offset */ -#define LTQ_DMA_RX_OFFSET(x) ((x & 0x7) << 23) /* data bytes offset */ -#define LTQ_DMA_SIZE_MASK (0xffff) /* the size field is 16 bit */ - -struct ltq_dma_desc { - u32 ctl; - u32 addr; -}; - -struct ltq_dma_channel { - int nr; /* the channel number */ - int irq; /* the mapped irq */ - int desc; /* the current descriptor */ - struct ltq_dma_desc *desc_base; /* the descriptor base */ - int phys; /* physical addr */ -}; - -enum { - DMA_PORT_ETOP = 0, - DMA_PORT_DEU, -}; - -extern void ltq_dma_enable_irq(struct ltq_dma_channel *ch); -extern void ltq_dma_disable_irq(struct ltq_dma_channel *ch); -extern void ltq_dma_ack_irq(struct ltq_dma_channel *ch); -extern void ltq_dma_open(struct ltq_dma_channel *ch); -extern void ltq_dma_close(struct ltq_dma_channel *ch); -extern void ltq_dma_alloc_tx(struct ltq_dma_channel *ch); -extern void ltq_dma_alloc_rx(struct ltq_dma_channel *ch); -extern void ltq_dma_free(struct ltq_dma_channel *ch); -extern void ltq_dma_init_port(int p); - -#endif diff --git a/trunk/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h b/trunk/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h deleted file mode 100644 index 3b728275b9b0..000000000000 --- a/trunk/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2011 Netlogic Microsystems - * Copyright (C) 2003 Ralf Baechle - */ -#ifndef __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H -#define __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H - -#define cpu_has_4kex 1 -#define cpu_has_4k_cache 1 -#define cpu_has_watch 1 -#define cpu_has_mips16 0 -#define cpu_has_counter 1 -#define cpu_has_divec 1 -#define cpu_has_vce 0 -#define cpu_has_cache_cdex_p 0 -#define cpu_has_cache_cdex_s 0 -#define cpu_has_prefetch 1 -#define cpu_has_mcheck 1 -#define cpu_has_ejtag 1 - -#define cpu_has_llsc 1 -#define cpu_has_vtag_icache 0 -#define cpu_has_dc_aliases 0 -#define cpu_has_ic_fills_f_dc 0 -#define cpu_has_dsp 0 -#define cpu_has_mipsmt 0 -#define cpu_has_userlocal 0 -#define cpu_icache_snoops_remote_store 0 - -#define cpu_has_nofpuex 0 -#define cpu_has_64bits 1 - -#define cpu_has_mips32r1 1 -#define cpu_has_mips32r2 0 -#define cpu_has_mips64r1 1 -#define cpu_has_mips64r2 0 - -#define cpu_has_inclusive_pcaches 0 - -#define cpu_dcache_line_size() 32 -#define cpu_icache_line_size() 32 - -#endif /* __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H */ diff --git a/trunk/arch/mips/include/asm/mach-netlogic/irq.h b/trunk/arch/mips/include/asm/mach-netlogic/irq.h deleted file mode 100644 index b5902458e7c1..000000000000 --- a/trunk/arch/mips/include/asm/mach-netlogic/irq.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2011 Netlogic Microsystems. - */ -#ifndef __ASM_NETLOGIC_IRQ_H -#define __ASM_NETLOGIC_IRQ_H - -#define NR_IRQS 64 -#define MIPS_CPU_IRQ_BASE 0 - -#endif /* __ASM_NETLOGIC_IRQ_H */ diff --git a/trunk/arch/mips/include/asm/mach-netlogic/war.h b/trunk/arch/mips/include/asm/mach-netlogic/war.h deleted file mode 100644 index 22da89327352..000000000000 --- a/trunk/arch/mips/include/asm/mach-netlogic/war.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2011 Netlogic Microsystems. - * Copyright (C) 2002, 2004, 2007 by Ralf Baechle - */ -#ifndef __ASM_MIPS_MACH_NLM_WAR_H -#define __ASM_MIPS_MACH_NLM_WAR_H - -#define R4600_V1_INDEX_ICACHEOP_WAR 0 -#define R4600_V1_HIT_CACHEOP_WAR 0 -#define R4600_V2_HIT_CACHEOP_WAR 0 -#define R5432_CP0_INTERRUPT_WAR 0 -#define BCM1250_M3_WAR 0 -#define SIBYTE_1956_WAR 0 -#define MIPS4K_ICACHE_REFILL_WAR 0 -#define MIPS_CACHE_SYNC_WAR 0 -#define TX49XX_ICACHE_INDEX_INV_WAR 0 -#define RM9000_CDEX_SMP_WAR 0 -#define ICACHE_REFILLS_WORKAROUND_WAR 0 -#define R10000_LLSC_WAR 0 -#define MIPS34K_MISSED_ITLB_WAR 0 - -#endif /* __ASM_MIPS_MACH_NLM_WAR_H */ diff --git a/trunk/arch/mips/include/asm/module.h b/trunk/arch/mips/include/asm/module.h index bc01a02cacd8..d94085a3eafb 100644 --- a/trunk/arch/mips/include/asm/module.h +++ b/trunk/arch/mips/include/asm/module.h @@ -118,8 +118,6 @@ search_module_dbetables(unsigned long addr) #define MODULE_PROC_FAMILY "LOONGSON2 " #elif defined CONFIG_CPU_CAVIUM_OCTEON #define MODULE_PROC_FAMILY "OCTEON " -#elif defined CONFIG_CPU_XLR -#define MODULE_PROC_FAMILY "XLR " #else #error MODULE_PROC_FAMILY undefined for your processor configuration #endif diff --git a/trunk/arch/mips/include/asm/netlogic/interrupt.h b/trunk/arch/mips/include/asm/netlogic/interrupt.h deleted file mode 100644 index a85aadb6cfd7..000000000000 --- a/trunk/arch/mips/include/asm/netlogic/interrupt.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights - * reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the NetLogic - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _ASM_NLM_INTERRUPT_H -#define _ASM_NLM_INTERRUPT_H - -/* Defines for the IRQ numbers */ - -#define IRQ_IPI_SMP_FUNCTION 3 -#define IRQ_IPI_SMP_RESCHEDULE 4 -#define IRQ_MSGRING 6 -#define IRQ_TIMER 7 - -#endif diff --git a/trunk/arch/mips/include/asm/netlogic/mips-extns.h b/trunk/arch/mips/include/asm/netlogic/mips-extns.h deleted file mode 100644 index 8c53d0ba4bf2..000000000000 --- a/trunk/arch/mips/include/asm/netlogic/mips-extns.h +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights - * reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the NetLogic - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _ASM_NLM_MIPS_EXTS_H -#define _ASM_NLM_MIPS_EXTS_H - -/* - * XLR and XLP interrupt request and interrupt mask registers - */ -#define read_c0_eirr() __read_64bit_c0_register($9, 6) -#define read_c0_eimr() __read_64bit_c0_register($9, 7) -#define write_c0_eirr(val) __write_64bit_c0_register($9, 6, val) - -/* - * Writing EIMR in 32 bit is a special case, the lower 8 bit of the - * EIMR is shadowed in the status register, so we cannot save and - * restore status register for split read. - */ -#define write_c0_eimr(val) \ -do { \ - if (sizeof(unsigned long) == 4) { \ - unsigned long __flags; \ - \ - local_irq_save(__flags); \ - __asm__ __volatile__( \ - ".set\tmips64\n\t" \ - "dsll\t%L0, %L0, 32\n\t" \ - "dsrl\t%L0, %L0, 32\n\t" \ - "dsll\t%M0, %M0, 32\n\t" \ - "or\t%L0, %L0, %M0\n\t" \ - "dmtc0\t%L0, $9, 7\n\t" \ - ".set\tmips0" \ - : : "r" (val)); \ - __flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\ - local_irq_restore(__flags); \ - } else \ - __write_64bit_c0_register($9, 7, (val)); \ -} while (0) - -static inline int hard_smp_processor_id(void) -{ - return __read_32bit_c0_register($15, 1) & 0x3ff; -} - -#endif /*_ASM_NLM_MIPS_EXTS_H */ diff --git a/trunk/arch/mips/include/asm/netlogic/psb-bootinfo.h b/trunk/arch/mips/include/asm/netlogic/psb-bootinfo.h deleted file mode 100644 index 6878307f0ee6..000000000000 --- a/trunk/arch/mips/include/asm/netlogic/psb-bootinfo.h +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights - * reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the NetLogic - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _ASM_NETLOGIC_BOOTINFO_H -#define _ASM_NETLOGIC_BOOTINFO_H - -struct psb_info { - uint64_t boot_level; - uint64_t io_base; - uint64_t output_device; - uint64_t uart_print; - uint64_t led_output; - uint64_t init; - uint64_t exit; - uint64_t warm_reset; - uint64_t wakeup; - uint64_t online_cpu_map; - uint64_t master_reentry_sp; - uint64_t master_reentry_gp; - uint64_t master_reentry_fn; - uint64_t slave_reentry_fn; - uint64_t magic_dword; - uint64_t uart_putchar; - uint64_t size; - uint64_t uart_getchar; - uint64_t nmi_handler; - uint64_t psb_version; - uint64_t mac_addr; - uint64_t cpu_frequency; - uint64_t board_version; - uint64_t malloc; - uint64_t free; - uint64_t global_shmem_addr; - uint64_t global_shmem_size; - uint64_t psb_os_cpu_map; - uint64_t userapp_cpu_map; - uint64_t wakeup_os; - uint64_t psb_mem_map; - uint64_t board_major_version; - uint64_t board_minor_version; - uint64_t board_manf_revision; - uint64_t board_serial_number; - uint64_t psb_physaddr_map; - uint64_t xlr_loaderip_config; - uint64_t bldr_envp; - uint64_t avail_mem_map; -}; - -enum { - NETLOGIC_IO_SPACE = 0x10, - PCIX_IO_SPACE, - PCIX_CFG_SPACE, - PCIX_MEMORY_SPACE, - HT_IO_SPACE, - HT_CFG_SPACE, - HT_MEMORY_SPACE, - SRAM_SPACE, - FLASH_CONTROLLER_SPACE -}; - -#define NLM_MAX_ARGS 64 -#define NLM_MAX_ENVS 32 - -/* This is what netlboot passes and linux boot_mem_map is subtly different */ -#define NLM_BOOT_MEM_MAP_MAX 32 -struct nlm_boot_mem_map { - int nr_map; - struct nlm_boot_mem_map_entry { - uint64_t addr; /* start of memory segment */ - uint64_t size; /* size of memory segment */ - uint32_t type; /* type of memory segment */ - } map[NLM_BOOT_MEM_MAP_MAX]; -}; - -/* Pointer to saved boot loader info */ -extern struct psb_info nlm_prom_info; - -#endif diff --git a/trunk/arch/mips/include/asm/netlogic/xlr/gpio.h b/trunk/arch/mips/include/asm/netlogic/xlr/gpio.h deleted file mode 100644 index 51f6ad4aeb14..000000000000 --- a/trunk/arch/mips/include/asm/netlogic/xlr/gpio.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights - * reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the NetLogic - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _ASM_NLM_GPIO_H -#define _ASM_NLM_GPIO_H - -#define NETLOGIC_GPIO_INT_EN_REG 0 -#define NETLOGIC_GPIO_INPUT_INVERSION_REG 1 -#define NETLOGIC_GPIO_IO_DIR_REG 2 -#define NETLOGIC_GPIO_IO_DATA_WR_REG 3 -#define NETLOGIC_GPIO_IO_DATA_RD_REG 4 - -#define NETLOGIC_GPIO_SWRESET_REG 8 -#define NETLOGIC_GPIO_DRAM1_CNTRL_REG 9 -#define NETLOGIC_GPIO_DRAM1_RATIO_REG 10 -#define NETLOGIC_GPIO_DRAM1_RESET_REG 11 -#define NETLOGIC_GPIO_DRAM1_STATUS_REG 12 -#define NETLOGIC_GPIO_DRAM2_CNTRL_REG 13 -#define NETLOGIC_GPIO_DRAM2_RATIO_REG 14 -#define NETLOGIC_GPIO_DRAM2_RESET_REG 15 -#define NETLOGIC_GPIO_DRAM2_STATUS_REG 16 - -#define NETLOGIC_GPIO_PWRON_RESET_CFG_REG 21 -#define NETLOGIC_GPIO_BIST_ALL_GO_STATUS_REG 24 -#define NETLOGIC_GPIO_BIST_CPU_GO_STATUS_REG 25 -#define NETLOGIC_GPIO_BIST_DEV_GO_STATUS_REG 26 - -#define NETLOGIC_GPIO_FUSE_BANK_REG 35 -#define NETLOGIC_GPIO_CPU_RESET_REG 40 -#define NETLOGIC_GPIO_RNG_REG 43 - -#define NETLOGIC_PWRON_RESET_PCMCIA_BOOT 17 -#define NETLOGIC_GPIO_LED_BITMAP 0x1700000 -#define NETLOGIC_GPIO_LED_0_SHIFT 20 -#define NETLOGIC_GPIO_LED_1_SHIFT 24 - -#define NETLOGIC_GPIO_LED_OUTPUT_CODE_RESET 0x01 -#define NETLOGIC_GPIO_LED_OUTPUT_CODE_HARD_RESET 0x02 -#define NETLOGIC_GPIO_LED_OUTPUT_CODE_SOFT_RESET 0x03 -#define NETLOGIC_GPIO_LED_OUTPUT_CODE_MAIN 0x04 - -#endif diff --git a/trunk/arch/mips/include/asm/netlogic/xlr/iomap.h b/trunk/arch/mips/include/asm/netlogic/xlr/iomap.h deleted file mode 100644 index 2e3a4dd53045..000000000000 --- a/trunk/arch/mips/include/asm/netlogic/xlr/iomap.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights - * reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the NetLogic - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _ASM_NLM_IOMAP_H -#define _ASM_NLM_IOMAP_H - -#define DEFAULT_NETLOGIC_IO_BASE CKSEG1ADDR(0x1ef00000) -#define NETLOGIC_IO_DDR2_CHN0_OFFSET 0x01000 -#define NETLOGIC_IO_DDR2_CHN1_OFFSET 0x02000 -#define NETLOGIC_IO_DDR2_CHN2_OFFSET 0x03000 -#define NETLOGIC_IO_DDR2_CHN3_OFFSET 0x04000 -#define NETLOGIC_IO_PIC_OFFSET 0x08000 -#define NETLOGIC_IO_UART_0_OFFSET 0x14000 -#define NETLOGIC_IO_UART_1_OFFSET 0x15100 - -#define NETLOGIC_IO_SIZE 0x1000 - -#define NETLOGIC_IO_BRIDGE_OFFSET 0x00000 - -#define NETLOGIC_IO_RLD2_CHN0_OFFSET 0x05000 -#define NETLOGIC_IO_RLD2_CHN1_OFFSET 0x06000 - -#define NETLOGIC_IO_SRAM_OFFSET 0x07000 - -#define NETLOGIC_IO_PCIX_OFFSET 0x09000 -#define NETLOGIC_IO_HT_OFFSET 0x0A000 - -#define NETLOGIC_IO_SECURITY_OFFSET 0x0B000 - -#define NETLOGIC_IO_GMAC_0_OFFSET 0x0C000 -#define NETLOGIC_IO_GMAC_1_OFFSET 0x0D000 -#define NETLOGIC_IO_GMAC_2_OFFSET 0x0E000 -#define NETLOGIC_IO_GMAC_3_OFFSET 0x0F000 - -/* XLS devices */ -#define NETLOGIC_IO_GMAC_4_OFFSET 0x20000 -#define NETLOGIC_IO_GMAC_5_OFFSET 0x21000 -#define NETLOGIC_IO_GMAC_6_OFFSET 0x22000 -#define NETLOGIC_IO_GMAC_7_OFFSET 0x23000 - -#define NETLOGIC_IO_PCIE_0_OFFSET 0x1E000 -#define NETLOGIC_IO_PCIE_1_OFFSET 0x1F000 -#define NETLOGIC_IO_SRIO_0_OFFSET 0x1E000 -#define NETLOGIC_IO_SRIO_1_OFFSET 0x1F000 - -#define NETLOGIC_IO_USB_0_OFFSET 0x24000 -#define NETLOGIC_IO_USB_1_OFFSET 0x25000 - -#define NETLOGIC_IO_COMP_OFFSET 0x1D000 -/* end XLS devices */ - -/* XLR devices */ -#define NETLOGIC_IO_SPI4_0_OFFSET 0x10000 -#define NETLOGIC_IO_XGMAC_0_OFFSET 0x11000 -#define NETLOGIC_IO_SPI4_1_OFFSET 0x12000 -#define NETLOGIC_IO_XGMAC_1_OFFSET 0x13000 -/* end XLR devices */ - -#define NETLOGIC_IO_I2C_0_OFFSET 0x16000 -#define NETLOGIC_IO_I2C_1_OFFSET 0x17000 - -#define NETLOGIC_IO_GPIO_OFFSET 0x18000 -#define NETLOGIC_IO_FLASH_OFFSET 0x19000 -#define NETLOGIC_IO_TB_OFFSET 0x1C000 - -#define NETLOGIC_CPLD_OFFSET KSEG1ADDR(0x1d840000) - -/* - * Base Address (Virtual) of the PCI Config address space - * For now, choose 256M phys in kseg1 = 0xA0000000 + (1<<28) - * Config space spans 256 (num of buses) * 256 (num functions) * 256 bytes - * ie 1<<24 = 16M - */ -#define DEFAULT_PCI_CONFIG_BASE 0x18000000 -#define DEFAULT_HT_TYPE0_CFG_BASE 0x16000000 -#define DEFAULT_HT_TYPE1_CFG_BASE 0x17000000 - -#ifndef __ASSEMBLY__ -#include -#include - -typedef volatile __u32 nlm_reg_t; -extern unsigned long netlogic_io_base; - -/* FIXME read once in write_reg */ -#ifdef CONFIG_CPU_LITTLE_ENDIAN -#define netlogic_read_reg(base, offset) ((base)[(offset)]) -#define netlogic_write_reg(base, offset, value) ((base)[(offset)] = (value)) -#else -#define netlogic_read_reg(base, offset) (be32_to_cpu((base)[(offset)])) -#define netlogic_write_reg(base, offset, value) \ - ((base)[(offset)] = cpu_to_be32((value))) -#endif - -#define netlogic_read_reg_le32(base, offset) (le32_to_cpu((base)[(offset)])) -#define netlogic_write_reg_le32(base, offset, value) \ - ((base)[(offset)] = cpu_to_le32((value))) -#define netlogic_io_mmio(offset) ((nlm_reg_t *)(netlogic_io_base+(offset))) -#endif /* __ASSEMBLY__ */ -#endif diff --git a/trunk/arch/mips/include/asm/netlogic/xlr/pic.h b/trunk/arch/mips/include/asm/netlogic/xlr/pic.h deleted file mode 100644 index 5cceb746f080..000000000000 --- a/trunk/arch/mips/include/asm/netlogic/xlr/pic.h +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights - * reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the NetLogic - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _ASM_NLM_XLR_PIC_H -#define _ASM_NLM_XLR_PIC_H - -#define PIC_CLKS_PER_SEC 66666666ULL -/* PIC hardware interrupt numbers */ -#define PIC_IRT_WD_INDEX 0 -#define PIC_IRT_TIMER_0_INDEX 1 -#define PIC_IRT_TIMER_1_INDEX 2 -#define PIC_IRT_TIMER_2_INDEX 3 -#define PIC_IRT_TIMER_3_INDEX 4 -#define PIC_IRT_TIMER_4_INDEX 5 -#define PIC_IRT_TIMER_5_INDEX 6 -#define PIC_IRT_TIMER_6_INDEX 7 -#define PIC_IRT_TIMER_7_INDEX 8 -#define PIC_IRT_CLOCK_INDEX PIC_IRT_TIMER_7_INDEX -#define PIC_IRT_UART_0_INDEX 9 -#define PIC_IRT_UART_1_INDEX 10 -#define PIC_IRT_I2C_0_INDEX 11 -#define PIC_IRT_I2C_1_INDEX 12 -#define PIC_IRT_PCMCIA_INDEX 13 -#define PIC_IRT_GPIO_INDEX 14 -#define PIC_IRT_HYPER_INDEX 15 -#define PIC_IRT_PCIX_INDEX 16 -/* XLS */ -#define PIC_IRT_CDE_INDEX 15 -#define PIC_IRT_BRIDGE_TB_XLS_INDEX 16 -/* XLS */ -#define PIC_IRT_GMAC0_INDEX 17 -#define PIC_IRT_GMAC1_INDEX 18 -#define PIC_IRT_GMAC2_INDEX 19 -#define PIC_IRT_GMAC3_INDEX 20 -#define PIC_IRT_XGS0_INDEX 21 -#define PIC_IRT_XGS1_INDEX 22 -#define PIC_IRT_HYPER_FATAL_INDEX 23 -#define PIC_IRT_PCIX_FATAL_INDEX 24 -#define PIC_IRT_BRIDGE_AERR_INDEX 25 -#define PIC_IRT_BRIDGE_BERR_INDEX 26 -#define PIC_IRT_BRIDGE_TB_XLR_INDEX 27 -#define PIC_IRT_BRIDGE_AERR_NMI_INDEX 28 -/* XLS */ -#define PIC_IRT_GMAC4_INDEX 21 -#define PIC_IRT_GMAC5_INDEX 22 -#define PIC_IRT_GMAC6_INDEX 23 -#define PIC_IRT_GMAC7_INDEX 24 -#define PIC_IRT_BRIDGE_ERR_INDEX 25 -#define PIC_IRT_PCIE_LINK0_INDEX 26 -#define PIC_IRT_PCIE_LINK1_INDEX 27 -#define PIC_IRT_PCIE_LINK2_INDEX 23 -#define PIC_IRT_PCIE_LINK3_INDEX 24 -#define PIC_IRT_PCIE_XLSB0_LINK2_INDEX 28 -#define PIC_IRT_PCIE_XLSB0_LINK3_INDEX 29 -#define PIC_IRT_SRIO_LINK0_INDEX 26 -#define PIC_IRT_SRIO_LINK1_INDEX 27 -#define PIC_IRT_SRIO_LINK2_INDEX 28 -#define PIC_IRT_SRIO_LINK3_INDEX 29 -#define PIC_IRT_PCIE_INT_INDEX 28 -#define PIC_IRT_PCIE_FATAL_INDEX 29 -#define PIC_IRT_GPIO_B_INDEX 30 -#define PIC_IRT_USB_INDEX 31 -/* XLS */ -#define PIC_NUM_IRTS 32 - - -#define PIC_CLOCK_TIMER 7 - -/* PIC Registers */ -#define PIC_CTRL 0x00 -#define PIC_IPI 0x04 -#define PIC_INT_ACK 0x06 - -#define WD_MAX_VAL_0 0x08 -#define WD_MAX_VAL_1 0x09 -#define WD_MASK_0 0x0a -#define WD_MASK_1 0x0b -#define WD_HEARBEAT_0 0x0c -#define WD_HEARBEAT_1 0x0d - -#define PIC_IRT_0_BASE 0x40 -#define PIC_IRT_1_BASE 0x80 -#define PIC_TIMER_MAXVAL_0_BASE 0x100 -#define PIC_TIMER_MAXVAL_1_BASE 0x110 -#define PIC_TIMER_COUNT_0_BASE 0x120 -#define PIC_TIMER_COUNT_1_BASE 0x130 - -#define PIC_IRT_0(picintr) (PIC_IRT_0_BASE + (picintr)) -#define PIC_IRT_1(picintr) (PIC_IRT_1_BASE + (picintr)) - -#define PIC_TIMER_MAXVAL_0(i) (PIC_TIMER_MAXVAL_0_BASE + (i)) -#define PIC_TIMER_MAXVAL_1(i) (PIC_TIMER_MAXVAL_1_BASE + (i)) -#define PIC_TIMER_COUNT_0(i) (PIC_TIMER_COUNT_0_BASE + (i)) -#define PIC_TIMER_COUNT_1(i) (PIC_TIMER_COUNT_0_BASE + (i)) - -/* - * Mapping between hardware interrupt numbers and IRQs on CPU - * we use a simple scheme to map PIC interrupts 0-31 to IRQs - * 8-39. This leaves the IRQ 0-7 for cpu interrupts like - * count/compare and FMN - */ -#define PIC_IRQ_BASE 8 -#define PIC_INTR_TO_IRQ(i) (PIC_IRQ_BASE + (i)) -#define PIC_IRQ_TO_INTR(i) ((i) - PIC_IRQ_BASE) - -#define PIC_IRT_FIRST_IRQ PIC_IRQ_BASE -#define PIC_WD_IRQ PIC_INTR_TO_IRQ(PIC_IRT_WD_INDEX) -#define PIC_TIMER_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_0_INDEX) -#define PIC_TIMER_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_1_INDEX) -#define PIC_TIMER_2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_2_INDEX) -#define PIC_TIMER_3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_3_INDEX) -#define PIC_TIMER_4_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_4_INDEX) -#define PIC_TIMER_5_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_5_INDEX) -#define PIC_TIMER_6_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_6_INDEX) -#define PIC_TIMER_7_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_7_INDEX) -#define PIC_CLOCK_IRQ (PIC_TIMER_7_IRQ) -#define PIC_UART_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_UART_0_INDEX) -#define PIC_UART_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_UART_1_INDEX) -#define PIC_I2C_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_I2C_0_INDEX) -#define PIC_I2C_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_I2C_1_INDEX) -#define PIC_PCMCIA_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCMCIA_INDEX) -#define PIC_GPIO_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GPIO_INDEX) -#define PIC_HYPER_IRQ PIC_INTR_TO_IRQ(PIC_IRT_HYPER_INDEX) -#define PIC_PCIX_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIX_INDEX) -/* XLS */ -#define PIC_CDE_IRQ PIC_INTR_TO_IRQ(PIC_IRT_CDE_INDEX) -#define PIC_BRIDGE_TB_XLS_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLS_INDEX) -/* end XLS */ -#define PIC_GMAC_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC0_INDEX) -#define PIC_GMAC_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC1_INDEX) -#define PIC_GMAC_2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC2_INDEX) -#define PIC_GMAC_3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC3_INDEX) -#define PIC_XGS_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_XGS0_INDEX) -#define PIC_XGS_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_XGS1_INDEX) -#define PIC_HYPER_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_HYPER_FATAL_INDEX) -#define PIC_PCIX_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIX_FATAL_INDEX) -#define PIC_BRIDGE_AERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_INDEX) -#define PIC_BRIDGE_BERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_BERR_INDEX) -#define PIC_BRIDGE_TB_XLR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLR_INDEX) -#define PIC_BRIDGE_AERR_NMI_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_NMI_INDEX) -/* XLS defines */ -#define PIC_GMAC_4_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC4_INDEX) -#define PIC_GMAC_5_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC5_INDEX) -#define PIC_GMAC_6_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC6_INDEX) -#define PIC_GMAC_7_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC7_INDEX) -#define PIC_BRIDGE_ERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_ERR_INDEX) -#define PIC_PCIE_LINK0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK0_INDEX) -#define PIC_PCIE_LINK1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK1_INDEX) -#define PIC_PCIE_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK2_INDEX) -#define PIC_PCIE_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK3_INDEX) -#define PIC_PCIE_XLSB0_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_XLSB0_LINK2_INDEX) -#define PIC_PCIE_XLSB0_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_XLSB0_LINK3_INDEX) -#define PIC_SRIO_LINK0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK0_INDEX) -#define PIC_SRIO_LINK1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK1_INDEX) -#define PIC_SRIO_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK2_INDEX) -#define PIC_SRIO_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK3_INDEX) -#define PIC_PCIE_INT_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_INT__INDEX) -#define PIC_PCIE_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_FATAL_INDEX) -#define PIC_GPIO_B_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GPIO_B_INDEX) -#define PIC_USB_IRQ PIC_INTR_TO_IRQ(PIC_IRT_USB_INDEX) -#define PIC_IRT_LAST_IRQ PIC_USB_IRQ -/* end XLS */ - -#ifndef __ASSEMBLY__ -static inline void pic_send_ipi(u32 ipi) -{ - nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); - - netlogic_write_reg(mmio, PIC_IPI, ipi); -} - -static inline u32 pic_read_control(void) -{ - nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); - - return netlogic_read_reg(mmio, PIC_CTRL); -} - -static inline void pic_write_control(u32 control) -{ - nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); - - netlogic_write_reg(mmio, PIC_CTRL, control); -} - -static inline void pic_update_control(u32 control) -{ - nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); - - netlogic_write_reg(mmio, PIC_CTRL, - (control | netlogic_read_reg(mmio, PIC_CTRL))); -} - -#define PIC_IRQ_IS_EDGE_TRIGGERED(irq) (((irq) >= PIC_TIMER_0_IRQ) && \ - ((irq) <= PIC_TIMER_7_IRQ)) -#define PIC_IRQ_IS_IRT(irq) (((irq) >= PIC_IRT_FIRST_IRQ) && \ - ((irq) <= PIC_IRT_LAST_IRQ)) -#endif - -#endif /* _ASM_NLM_XLR_PIC_H */ diff --git a/trunk/arch/mips/include/asm/netlogic/xlr/xlr.h b/trunk/arch/mips/include/asm/netlogic/xlr/xlr.h deleted file mode 100644 index 3e6372692a04..000000000000 --- a/trunk/arch/mips/include/asm/netlogic/xlr/xlr.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights - * reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the NetLogic - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _ASM_NLM_XLR_H -#define _ASM_NLM_XLR_H - -/* Platform UART functions */ -struct uart_port; -unsigned int nlm_xlr_uart_in(struct uart_port *, int); -void nlm_xlr_uart_out(struct uart_port *, int, int); - -/* SMP support functions */ -struct irq_desc; -void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc); -void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc); -int nlm_wakeup_secondary_cpus(u32 wakeup_mask); -void nlm_smp_irq_init(void); -void nlm_boot_smp_nmi(void); -void prom_pre_boot_secondary_cpus(void); - -extern struct plat_smp_ops nlm_smp_ops; -extern unsigned long nlm_common_ebase; - -/* XLS B silicon "Rook" */ -static inline unsigned int nlm_chip_is_xls_b(void) -{ - uint32_t prid = read_c0_prid(); - - return ((prid & 0xf000) == 0x4000); -} - -/* - * XLR chip types - */ - /* The XLS product line has chip versions 0x[48c]? */ -static inline unsigned int nlm_chip_is_xls(void) -{ - uint32_t prid = read_c0_prid(); - - return ((prid & 0xf000) == 0x8000 || (prid & 0xf000) == 0x4000 || - (prid & 0xf000) == 0xc000); -} - -#endif /* _ASM_NLM_XLR_H */ diff --git a/trunk/arch/mips/include/asm/ptrace.h b/trunk/arch/mips/include/asm/ptrace.h index de39b1f343ea..9f1b8dba2c81 100644 --- a/trunk/arch/mips/include/asm/ptrace.h +++ b/trunk/arch/mips/include/asm/ptrace.h @@ -141,8 +141,7 @@ extern int ptrace_set_watch_regs(struct task_struct *child, #define instruction_pointer(regs) ((regs)->cp0_epc) #define profile_pc(regs) instruction_pointer(regs) -extern asmlinkage void syscall_trace_enter(struct pt_regs *regs); -extern asmlinkage void syscall_trace_leave(struct pt_regs *regs); +extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); extern NORET_TYPE void die(const char *, struct pt_regs *) ATTRIB_NORET; diff --git a/trunk/arch/mips/include/asm/thread_info.h b/trunk/arch/mips/include/asm/thread_info.h index 97f8bf6639e7..d71160de4d10 100644 --- a/trunk/arch/mips/include/asm/thread_info.h +++ b/trunk/arch/mips/include/asm/thread_info.h @@ -149,9 +149,6 @@ register struct thread_info *__current_thread_info __asm__("$28"); #define _TIF_FPUBOUND (1< 0xffffff) { if (vdma_debug) @@ -228,7 +228,8 @@ int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) return -EINVAL; /* invalid physical address */ } - pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; + npages = pages = + (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; first = laddr >> 12; if (vdma_debug) printk("vdma_remap: first=%x, pages=%x\n", first, pages); diff --git a/trunk/arch/mips/jz4740/dma.c b/trunk/arch/mips/jz4740/dma.c index d7feb898692c..5ebe75a68350 100644 --- a/trunk/arch/mips/jz4740/dma.c +++ b/trunk/arch/mips/jz4740/dma.c @@ -242,7 +242,9 @@ EXPORT_SYMBOL_GPL(jz4740_dma_get_residue); static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma) { - (void) jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); + uint32_t status; + + status = jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); diff --git a/trunk/arch/mips/jz4740/setup.c b/trunk/arch/mips/jz4740/setup.c index d97cfbf882f5..6a9e14dab91e 100644 --- a/trunk/arch/mips/jz4740/setup.c +++ b/trunk/arch/mips/jz4740/setup.c @@ -1,6 +1,5 @@ /* * Copyright (C) 2009-2010, Lars-Peter Clausen - * Copyright (C) 2011, Maarten ter Huurne * JZ4740 setup code * * This program is free software; you can redistribute it and/or modify it @@ -15,44 +14,13 @@ */ #include -#include #include -#include - -#include - #include "reset.h" - -#define JZ4740_EMC_SDRAM_CTRL 0x80 - - -static void __init jz4740_detect_mem(void) -{ - void __iomem *jz_emc_base; - u32 ctrl, bus, bank, rows, cols; - phys_t size; - - jz_emc_base = ioremap(JZ4740_EMC_BASE_ADDR, 0x100); - ctrl = readl(jz_emc_base + JZ4740_EMC_SDRAM_CTRL); - bus = 2 - ((ctrl >> 31) & 1); - bank = 1 + ((ctrl >> 19) & 1); - cols = 8 + ((ctrl >> 26) & 7); - rows = 11 + ((ctrl >> 20) & 3); - printk(KERN_DEBUG - "SDRAM preconfigured: bus:%u bank:%u rows:%u cols:%u\n", - bus, bank, rows, cols); - iounmap(jz_emc_base); - - size = 1 << (bus + bank + cols + rows); - add_memory_region(0, size, BOOT_MEM_RAM); -} - void __init plat_mem_setup(void) { jz4740_reset_init(); - jz4740_detect_mem(); } const char *get_system_type(void) diff --git a/trunk/arch/mips/jz4740/time.c b/trunk/arch/mips/jz4740/time.c index f83c2dd07a27..fe01678d94fd 100644 --- a/trunk/arch/mips/jz4740/time.c +++ b/trunk/arch/mips/jz4740/time.c @@ -89,7 +89,7 @@ static int jz4740_clockevent_set_next(unsigned long evt, static struct clock_event_device jz4740_clockevent = { .name = "jz4740-timer", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .features = CLOCK_EVT_FEAT_PERIODIC, .set_next_event = jz4740_clockevent_set_next, .set_mode = jz4740_clockevent_set_mode, .rating = 200, @@ -121,7 +121,8 @@ void __init plat_time_init(void) clockevents_register_device(&jz4740_clockevent); - ret = clocksource_register_hz(&jz4740_clocksource, clk_rate); + clocksource_set_clock(&jz4740_clocksource, clk_rate); + ret = clocksource_register(&jz4740_clocksource); if (ret) printk(KERN_ERR "Failed to register clocksource: %d\n", ret); diff --git a/trunk/arch/mips/jz4740/timer.c b/trunk/arch/mips/jz4740/timer.c index 654d5c3900b6..b2c015129055 100644 --- a/trunk/arch/mips/jz4740/timer.c +++ b/trunk/arch/mips/jz4740/timer.c @@ -27,13 +27,11 @@ void jz4740_timer_enable_watchdog(void) { writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR); } -EXPORT_SYMBOL_GPL(jz4740_timer_enable_watchdog); void jz4740_timer_disable_watchdog(void) { writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET); } -EXPORT_SYMBOL_GPL(jz4740_timer_disable_watchdog); void __init jz4740_timer_init(void) { diff --git a/trunk/arch/mips/kernel/Makefile b/trunk/arch/mips/kernel/Makefile index 83bba332bbfc..cedee2bcbd18 100644 --- a/trunk/arch/mips/kernel/Makefile +++ b/trunk/arch/mips/kernel/Makefile @@ -52,7 +52,6 @@ obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o -obj-$(CONFIG_CPU_XLR) += r4k_fpu.o r4k_switch.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP_UP) += smp-up.o diff --git a/trunk/arch/mips/kernel/cevt-txx9.c b/trunk/arch/mips/kernel/cevt-txx9.c index f0ab92a1b057..0b7377361e22 100644 --- a/trunk/arch/mips/kernel/cevt-txx9.c +++ b/trunk/arch/mips/kernel/cevt-txx9.c @@ -51,7 +51,8 @@ void __init txx9_clocksource_init(unsigned long baseaddr, { struct txx9_tmr_reg __iomem *tmrptr; - clocksource_register_hz(&txx9_clocksource.cs, TIMER_CLK(imbusclk)); + clocksource_set_clock(&txx9_clocksource.cs, TIMER_CLK(imbusclk)); + clocksource_register(&txx9_clocksource.cs); tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); __raw_writel(TCR_BASE, &tmrptr->tcr); diff --git a/trunk/arch/mips/kernel/cpu-probe.c b/trunk/arch/mips/kernel/cpu-probe.c index bb133d10b145..f65d4c8c65a6 100644 --- a/trunk/arch/mips/kernel/cpu-probe.c +++ b/trunk/arch/mips/kernel/cpu-probe.c @@ -291,12 +291,6 @@ static inline int cpu_has_confreg(void) #endif } -static inline void set_elf_platform(int cpu, const char *plat) -{ - if (cpu == 0) - __elf_platform = plat; -} - /* * Get the FPU Implementation/Revision. */ @@ -620,16 +614,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) case PRID_IMP_LOONGSON2: c->cputype = CPU_LOONGSON2; __cpu_name[cpu] = "ICT Loongson-2"; - - switch (c->processor_id & PRID_REV_MASK) { - case PRID_REV_LOONGSON2E: - set_elf_platform(cpu, "loongson2e"); - break; - case PRID_REV_LOONGSON2F: - set_elf_platform(cpu, "loongson2f"); - break; - } - c->isa_level = MIPS_CPU_ISA_III; c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC | @@ -927,14 +911,12 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) case PRID_IMP_BMIPS32_REV8: c->cputype = CPU_BMIPS32; __cpu_name[cpu] = "Broadcom BMIPS32"; - set_elf_platform(cpu, "bmips32"); break; case PRID_IMP_BMIPS3300: case PRID_IMP_BMIPS3300_ALT: case PRID_IMP_BMIPS3300_BUG: c->cputype = CPU_BMIPS3300; __cpu_name[cpu] = "Broadcom BMIPS3300"; - set_elf_platform(cpu, "bmips3300"); break; case PRID_IMP_BMIPS43XX: { int rev = c->processor_id & 0xff; @@ -943,18 +925,15 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) rev <= PRID_REV_BMIPS4380_HI) { c->cputype = CPU_BMIPS4380; __cpu_name[cpu] = "Broadcom BMIPS4380"; - set_elf_platform(cpu, "bmips4380"); } else { c->cputype = CPU_BMIPS4350; __cpu_name[cpu] = "Broadcom BMIPS4350"; - set_elf_platform(cpu, "bmips4350"); } break; } case PRID_IMP_BMIPS5000: c->cputype = CPU_BMIPS5000; __cpu_name[cpu] = "Broadcom BMIPS5000"; - set_elf_platform(cpu, "bmips5000"); c->options |= MIPS_CPU_ULRI; break; } @@ -977,12 +956,14 @@ static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_CAVIUM_OCTEON_PLUS; __cpu_name[cpu] = "Cavium Octeon+"; platform: - set_elf_platform(cpu, "octeon"); + if (cpu == 0) + __elf_platform = "octeon"; break; case PRID_IMP_CAVIUM_CN63XX: c->cputype = CPU_CAVIUM_OCTEON2; __cpu_name[cpu] = "Cavium Octeon II"; - set_elf_platform(cpu, "octeon2"); + if (cpu == 0) + __elf_platform = "octeon2"; break; default: printk(KERN_INFO "Unknown Octeon chip!\n"); @@ -1007,59 +988,6 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) } } -static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) -{ - decode_configs(c); - - c->options = (MIPS_CPU_TLB | - MIPS_CPU_4KEX | - MIPS_CPU_COUNTER | - MIPS_CPU_DIVEC | - MIPS_CPU_WATCH | - MIPS_CPU_EJTAG | - MIPS_CPU_LLSC); - - switch (c->processor_id & 0xff00) { - case PRID_IMP_NETLOGIC_XLR732: - case PRID_IMP_NETLOGIC_XLR716: - case PRID_IMP_NETLOGIC_XLR532: - case PRID_IMP_NETLOGIC_XLR308: - case PRID_IMP_NETLOGIC_XLR532C: - case PRID_IMP_NETLOGIC_XLR516C: - case PRID_IMP_NETLOGIC_XLR508C: - case PRID_IMP_NETLOGIC_XLR308C: - c->cputype = CPU_XLR; - __cpu_name[cpu] = "Netlogic XLR"; - break; - - case PRID_IMP_NETLOGIC_XLS608: - case PRID_IMP_NETLOGIC_XLS408: - case PRID_IMP_NETLOGIC_XLS404: - case PRID_IMP_NETLOGIC_XLS208: - case PRID_IMP_NETLOGIC_XLS204: - case PRID_IMP_NETLOGIC_XLS108: - case PRID_IMP_NETLOGIC_XLS104: - case PRID_IMP_NETLOGIC_XLS616B: - case PRID_IMP_NETLOGIC_XLS608B: - case PRID_IMP_NETLOGIC_XLS416B: - case PRID_IMP_NETLOGIC_XLS412B: - case PRID_IMP_NETLOGIC_XLS408B: - case PRID_IMP_NETLOGIC_XLS404B: - c->cputype = CPU_XLR; - __cpu_name[cpu] = "Netlogic XLS"; - break; - - default: - printk(KERN_INFO "Unknown Netlogic chip id [%02x]!\n", - c->processor_id); - c->cputype = CPU_XLR; - break; - } - - c->isa_level = MIPS_CPU_ISA_M64R1; - c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; -} - #ifdef CONFIG_64BIT /* For use by uaccess.h */ u64 __ua_limit; @@ -1107,9 +1035,6 @@ __cpuinit void cpu_probe(void) case PRID_COMP_INGENIC: cpu_probe_ingenic(c, cpu); break; - case PRID_COMP_NETLOGIC: - cpu_probe_netlogic(c, cpu); - break; } BUG_ON(!__cpu_name[cpu]); diff --git a/trunk/arch/mips/kernel/csrc-bcm1480.c b/trunk/arch/mips/kernel/csrc-bcm1480.c index f96f99c794a3..51489f8a825e 100644 --- a/trunk/arch/mips/kernel/csrc-bcm1480.c +++ b/trunk/arch/mips/kernel/csrc-bcm1480.c @@ -49,5 +49,6 @@ void __init sb1480_clocksource_init(void) plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG))); zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000); - clocksource_register_hz(cs, zbbus); + clocksource_set_clock(cs, zbbus); + clocksource_register(cs); } diff --git a/trunk/arch/mips/kernel/csrc-ioasic.c b/trunk/arch/mips/kernel/csrc-ioasic.c index 46bd7fa98d6c..23da108506b0 100644 --- a/trunk/arch/mips/kernel/csrc-ioasic.c +++ b/trunk/arch/mips/kernel/csrc-ioasic.c @@ -59,5 +59,7 @@ void __init dec_ioasic_clocksource_init(void) printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq); clocksource_dec.rating = 200 + freq / 10000000; - clocksource_register_hz(&clocksource_dec, freq); + clocksource_set_clock(&clocksource_dec, freq); + + clocksource_register(&clocksource_dec); } diff --git a/trunk/arch/mips/kernel/csrc-powertv.c b/trunk/arch/mips/kernel/csrc-powertv.c index 2e7c5232da8d..a27c16c8690e 100644 --- a/trunk/arch/mips/kernel/csrc-powertv.c +++ b/trunk/arch/mips/kernel/csrc-powertv.c @@ -78,7 +78,9 @@ static void __init powertv_c0_hpt_clocksource_init(void) clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; - clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); + clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); + + clocksource_register(&clocksource_mips); } /** @@ -128,16 +130,43 @@ static struct clocksource clocksource_tim_c = { /** * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock * + * The hard part here is coming up with a constant k and shift s such that + * the 48-bit TIM_C value multiplied by k doesn't overflow and that value, + * when shifted right by s, yields the corresponding number of nanoseconds. * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to - * 1 / (27,000,000/8) seconds. + * 1 / (27,000,000/8) seconds. Multiply that by a billion and you get the + * number of nanoseconds. Since the TIM_C value has 48 bits and the math is + * done in 64 bits, avoiding an overflow means that k must be less than + * 64 - 48 = 16 bits. */ static void __init powertv_tim_c_clocksource_init(void) { + int prescale; + unsigned long dividend; + unsigned long k; + int s; + const int max_k_bits = (64 - 48) - 1; + const unsigned long billion = 1000000000; const unsigned long counts_per_second = 27000000 / 8; + prescale = BITS_PER_LONG - ilog2(billion) - 1; + dividend = billion << prescale; + k = dividend / counts_per_second; + s = ilog2(k) - max_k_bits; + + if (s < 0) + s = prescale; + + else { + k >>= s; + s += prescale; + } + + clocksource_tim_c.mult = k; + clocksource_tim_c.shift = s; clocksource_tim_c.rating = 200; - clocksource_register_hz(&clocksource_tim_c, counts_per_second); + clocksource_register(&clocksource_tim_c); tim_c = (struct tim_c *) asic_reg_addr(tim_ch); } diff --git a/trunk/arch/mips/kernel/csrc-r4k.c b/trunk/arch/mips/kernel/csrc-r4k.c index decd1fa38d55..e95a3cd48eea 100644 --- a/trunk/arch/mips/kernel/csrc-r4k.c +++ b/trunk/arch/mips/kernel/csrc-r4k.c @@ -30,7 +30,9 @@ int __init init_r4k_clocksource(void) /* Calculate a somewhat reasonable rating value */ clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; - clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); + clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); + + clocksource_register(&clocksource_mips); return 0; } diff --git a/trunk/arch/mips/kernel/csrc-sb1250.c b/trunk/arch/mips/kernel/csrc-sb1250.c index e9606d907685..d14d3d1907fa 100644 --- a/trunk/arch/mips/kernel/csrc-sb1250.c +++ b/trunk/arch/mips/kernel/csrc-sb1250.c @@ -65,5 +65,6 @@ void __init sb1250_clocksource_init(void) IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CFG))); - clocksource_register_hz(cs, V_SCD_TIMER_FREQ); + clocksource_set_clock(cs, V_SCD_TIMER_FREQ); + clocksource_register(cs); } diff --git a/trunk/arch/mips/kernel/entry.S b/trunk/arch/mips/kernel/entry.S index 37acfa036d44..ffa331029e08 100644 --- a/trunk/arch/mips/kernel/entry.S +++ b/trunk/arch/mips/kernel/entry.S @@ -167,13 +167,14 @@ work_notifysig: # deal with pending signals and FEXPORT(syscall_exit_work_partial) SAVE_STATIC syscall_exit_work: - li t0, _TIF_WORK_SYSCALL_EXIT + li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT and t0, a2 # a2 is preloaded with TI_FLAGS beqz t0, work_pending # trace bit set? - local_irq_enable # could let syscall_trace_leave() + local_irq_enable # could let do_syscall_trace() # call schedule() instead move a0, sp - jal syscall_trace_leave + li a1, 1 + jal do_syscall_trace b resume_userspace #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) diff --git a/trunk/arch/mips/kernel/ftrace.c b/trunk/arch/mips/kernel/ftrace.c index feb8021a305f..94ca2b018af7 100644 --- a/trunk/arch/mips/kernel/ftrace.c +++ b/trunk/arch/mips/kernel/ftrace.c @@ -23,7 +23,6 @@ #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ -#define JUMP_RANGE_MASK ((1UL << 28) - 1) #define INSN_NOP 0x00000000 /* nop */ #define INSN_JAL(addr) \ @@ -45,12 +44,12 @@ static inline void ftrace_dyn_arch_init_insns(void) /* jal (ftrace_caller + 8), jump over the first two instruction */ buf = (u32 *)&insn_jal_ftrace_caller; - uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); + uasm_i_jal(&buf, (FTRACE_ADDR + 8)); #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* j ftrace_graph_caller */ buf = (u32 *)&insn_j_ftrace_graph_caller; - uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); + uasm_i_j(&buf, (unsigned long)ftrace_graph_caller); #endif } diff --git a/trunk/arch/mips/kernel/i8253.c b/trunk/arch/mips/kernel/i8253.c index 391221b6a6aa..2392a7a296d4 100644 --- a/trunk/arch/mips/kernel/i8253.c +++ b/trunk/arch/mips/kernel/i8253.c @@ -125,11 +125,87 @@ void __init setup_pit_timer(void) setup_irq(0, &irq0); } +/* + * Since the PIT overflows every tick, its not very useful + * to just read by itself. So use jiffies to emulate a free + * running counter: + */ +static cycle_t pit_read(struct clocksource *cs) +{ + unsigned long flags; + int count; + u32 jifs; + static int old_count; + static u32 old_jifs; + + raw_spin_lock_irqsave(&i8253_lock, flags); + /* + * Although our caller may have the read side of xtime_lock, + * this is now a seqlock, and we are cheating in this routine + * by having side effects on state that we cannot undo if + * there is a collision on the seqlock and our caller has to + * retry. (Namely, old_jifs and old_count.) So we must treat + * jiffies as volatile despite the lock. We read jiffies + * before latching the timer count to guarantee that although + * the jiffies value might be older than the count (that is, + * the counter may underflow between the last point where + * jiffies was incremented and the point where we latch the + * count), it cannot be newer. + */ + jifs = jiffies; + outb_p(0x00, PIT_MODE); /* latch the count ASAP */ + count = inb_p(PIT_CH0); /* read the latched count */ + count |= inb_p(PIT_CH0) << 8; + + /* VIA686a test code... reset the latch if count > max + 1 */ + if (count > LATCH) { + outb_p(0x34, PIT_MODE); + outb_p(LATCH & 0xff, PIT_CH0); + outb(LATCH >> 8, PIT_CH0); + count = LATCH - 1; + } + + /* + * It's possible for count to appear to go the wrong way for a + * couple of reasons: + * + * 1. The timer counter underflows, but we haven't handled the + * resulting interrupt and incremented jiffies yet. + * 2. Hardware problem with the timer, not giving us continuous time, + * the counter does small "jumps" upwards on some Pentium systems, + * (see c't 95/10 page 335 for Neptun bug.) + * + * Previous attempts to handle these cases intelligently were + * buggy, so we just do the simple thing now. + */ + if (count > old_count && jifs == old_jifs) { + count = old_count; + } + old_count = count; + old_jifs = jifs; + + raw_spin_unlock_irqrestore(&i8253_lock, flags); + + count = (LATCH - 1) - count; + + return (cycle_t)(jifs * LATCH) + count; +} + +static struct clocksource clocksource_pit = { + .name = "pit", + .rating = 110, + .read = pit_read, + .mask = CLOCKSOURCE_MASK(32), + .mult = 0, + .shift = 20, +}; + static int __init init_pit_clocksource(void) { if (num_possible_cpus() > 1) /* PIT does not scale! */ return 0; - return clocksource_i8253_init(); + clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); + return clocksource_register(&clocksource_pit); } arch_initcall(init_pit_clocksource); diff --git a/trunk/arch/mips/kernel/ptrace.c b/trunk/arch/mips/kernel/ptrace.c index 4e6ea1ffad46..d21c388c0116 100644 --- a/trunk/arch/mips/kernel/ptrace.c +++ b/trunk/arch/mips/kernel/ptrace.c @@ -533,10 +533,15 @@ static inline int audit_arch(void) * Notification of system call entry/exit * - triggered by current->work.syscall_trace */ -asmlinkage void syscall_trace_enter(struct pt_regs *regs) +asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) { /* do the secure computing check first */ - secure_computing(regs->regs[2]); + if (!entryexit) + secure_computing(regs->regs[2]); + + if (unlikely(current->audit_context) && entryexit) + audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), + regs->regs[2]); if (!(current->ptrace & PT_PTRACED)) goto out; @@ -560,40 +565,8 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs) } out: - if (unlikely(current->audit_context)) + if (unlikely(current->audit_context) && !entryexit) audit_syscall_entry(audit_arch(), regs->regs[2], regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); } - -/* - * Notification of system call entry/exit - * - triggered by current->work.syscall_trace - */ -asmlinkage void syscall_trace_leave(struct pt_regs *regs) -{ - if (unlikely(current->audit_context)) - audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]), - -regs->regs[2]); - - if (!(current->ptrace & PT_PTRACED)) - return; - - if (!test_thread_flag(TIF_SYSCALL_TRACE)) - return; - - /* The 0x80 provides a way for the tracing parent to distinguish - between a syscall stop and SIGTRAP delivery */ - ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? - 0x80 : 0)); - - /* - * this isn't the same as continuing with a signal, but it will do - * for normal use. strace only continues with a signal if the - * stopping signal is not SIGTRAP. -brl - */ - if (current->exit_code) { - send_sig(current->exit_code, current, 1); - current->exit_code = 0; - } -} diff --git a/trunk/arch/mips/kernel/scall32-o32.S b/trunk/arch/mips/kernel/scall32-o32.S index 7a8e1dd7f6f2..7f5468b38d4c 100644 --- a/trunk/arch/mips/kernel/scall32-o32.S +++ b/trunk/arch/mips/kernel/scall32-o32.S @@ -88,7 +88,8 @@ syscall_trace_entry: SAVE_STATIC move s0, t2 move a0, sp - jal syscall_trace_enter + li a1, 0 + jal do_syscall_trace move t0, s0 RESTORE_STATIC @@ -564,7 +565,7 @@ einval: li v0, -ENOSYS sys sys_ioprio_get 2 /* 4315 */ sys sys_utimensat 4 sys sys_signalfd 3 - sys sys_ni_syscall 0 /* was timerfd */ + sys sys_ni_syscall 0 sys sys_eventfd 1 sys sys_fallocate 6 /* 4320 */ sys sys_timerfd_create 2 diff --git a/trunk/arch/mips/kernel/scall64-64.S b/trunk/arch/mips/kernel/scall64-64.S index 2d31c83224f9..a2e1fcbc41dc 100644 --- a/trunk/arch/mips/kernel/scall64-64.S +++ b/trunk/arch/mips/kernel/scall64-64.S @@ -91,7 +91,8 @@ syscall_trace_entry: SAVE_STATIC move s0, t2 move a0, sp - jal syscall_trace_enter + li a1, 0 + jal do_syscall_trace move t0, s0 RESTORE_STATIC @@ -403,7 +404,7 @@ sys_call_table: PTR sys_ioprio_get PTR sys_utimensat /* 5275 */ PTR sys_signalfd - PTR sys_ni_syscall /* was timerfd */ + PTR sys_ni_syscall PTR sys_eventfd PTR sys_fallocate PTR sys_timerfd_create /* 5280 */ diff --git a/trunk/arch/mips/kernel/scall64-n32.S b/trunk/arch/mips/kernel/scall64-n32.S index 38a0503b9a4a..b2c7624995b8 100644 --- a/trunk/arch/mips/kernel/scall64-n32.S +++ b/trunk/arch/mips/kernel/scall64-n32.S @@ -89,7 +89,8 @@ n32_syscall_trace_entry: SAVE_STATIC move s0, t2 move a0, sp - jal syscall_trace_enter + li a1, 0 + jal do_syscall_trace move t0, s0 RESTORE_STATIC @@ -402,7 +403,7 @@ EXPORT(sysn32_call_table) PTR sys_ioprio_get PTR compat_sys_utimensat PTR compat_sys_signalfd /* 6280 */ - PTR sys_ni_syscall /* was timerfd */ + PTR sys_ni_syscall PTR sys_eventfd PTR sys_fallocate PTR sys_timerfd_create diff --git a/trunk/arch/mips/kernel/scall64-o32.S b/trunk/arch/mips/kernel/scall64-o32.S index 91ea5e4041dd..049a9c8c49a0 100644 --- a/trunk/arch/mips/kernel/scall64-o32.S +++ b/trunk/arch/mips/kernel/scall64-o32.S @@ -123,7 +123,8 @@ trace_a_syscall: move s0, t2 # Save syscall pointer move a0, sp - jal syscall_trace_enter + li a1, 0 + jal do_syscall_trace move t0, s0 RESTORE_STATIC @@ -521,7 +522,7 @@ sys_call_table: PTR sys_ioprio_get /* 4315 */ PTR compat_sys_utimensat PTR compat_sys_signalfd - PTR sys_ni_syscall /* was timerfd */ + PTR sys_ni_syscall PTR sys_eventfd PTR sys32_fallocate /* 4320 */ PTR sys_timerfd_create diff --git a/trunk/arch/mips/kernel/smtc.c b/trunk/arch/mips/kernel/smtc.c index cedac4633741..5a88cc4ccd5a 100644 --- a/trunk/arch/mips/kernel/smtc.c +++ b/trunk/arch/mips/kernel/smtc.c @@ -929,7 +929,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) static void ipi_resched_interrupt(void) { - scheduler_ipi(); + /* Return from interrupt should be enough to cause scheduler check */ } static void ipi_call_interrupt(void) diff --git a/trunk/arch/mips/kernel/syscall.c b/trunk/arch/mips/kernel/syscall.c index d02765708ddb..58beabf50b3c 100644 --- a/trunk/arch/mips/kernel/syscall.c +++ b/trunk/arch/mips/kernel/syscall.c @@ -10,9 +10,12 @@ #include #include #include +#include #include #include +#include #include +#include #include #include #include @@ -22,9 +25,11 @@ #include #include #include +#include #include #include #include +#include #include #include @@ -61,6 +66,121 @@ asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs) return res; } +unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ + +EXPORT_SYMBOL(shm_align_mask); + +#define COLOUR_ALIGN(addr,pgoff) \ + ((((addr) + shm_align_mask) & ~shm_align_mask) + \ + (((pgoff) << PAGE_SHIFT) & shm_align_mask)) + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct vm_area_struct * vmm; + int do_color_align; + unsigned long task_size; + +#ifdef CONFIG_32BIT + task_size = TASK_SIZE; +#else /* Must be CONFIG_64BIT*/ + task_size = test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE; +#endif + + if (len > task_size) + return -ENOMEM; + + if (flags & MAP_FIXED) { + /* Even MAP_FIXED mappings must reside within task_size. */ + if (task_size - len < addr) + return -EINVAL; + + /* + * We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) + return -EINVAL; + return addr; + } + + do_color_align = 0; + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + if (addr) { + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + vmm = find_vma(current->mm, addr); + if (task_size - len >= addr && + (!vmm || addr + len <= vmm->vm_start)) + return addr; + } + addr = current->mm->mmap_base; + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (task_size - len < addr) + return -ENOMEM; + if (!vmm || addr + len <= vmm->vm_start) + return addr; + addr = vmm->vm_end; + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + } +} + +void arch_pick_mmap_layout(struct mm_struct *mm) +{ + unsigned long random_factor = 0UL; + + if (current->flags & PF_RANDOMIZE) { + random_factor = get_random_int(); + random_factor = random_factor << PAGE_SHIFT; + if (TASK_IS_32BIT_ADDR) + random_factor &= 0xfffffful; + else + random_factor &= 0xffffffful; + } + + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; +} + +static inline unsigned long brk_rnd(void) +{ + unsigned long rnd = get_random_int(); + + rnd = rnd << PAGE_SHIFT; + /* 8MB for 32bit, 256MB for 64bit */ + if (TASK_IS_32BIT_ADDR) + rnd = rnd & 0x7ffffful; + else + rnd = rnd & 0xffffffful; + + return rnd; +} + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + unsigned long base = mm->brk; + unsigned long ret; + + ret = PAGE_ALIGN(base + brk_rnd()); + + if (ret < mm->brk) + return mm->brk; + + return ret; +} + SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, off_t, offset) diff --git a/trunk/arch/mips/kernel/traps.c b/trunk/arch/mips/kernel/traps.c index e9b3af27d844..71350f7f2d88 100644 --- a/trunk/arch/mips/kernel/traps.c +++ b/trunk/arch/mips/kernel/traps.c @@ -374,8 +374,7 @@ void __noreturn die(const char *str, struct pt_regs *regs) unsigned long dvpret = dvpe(); #endif /* CONFIG_MIPS_MT_SMTC */ - if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) - sig = 0; + notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV); console_verbose(); spin_lock_irq(&die_lock); @@ -384,6 +383,9 @@ void __noreturn die(const char *str, struct pt_regs *regs) mips_mt_regdump(dvpret); #endif /* CONFIG_MIPS_MT_SMTC */ + if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) + sig = 0; + printk("%s[#%d]:\n", str, ++die_counter); show_registers(regs); add_taint(TAINT_DIE); diff --git a/trunk/arch/mips/kernel/vmlinux.lds.S b/trunk/arch/mips/kernel/vmlinux.lds.S index cd2ca544454b..832afbb87588 100644 --- a/trunk/arch/mips/kernel/vmlinux.lds.S +++ b/trunk/arch/mips/kernel/vmlinux.lds.S @@ -68,14 +68,12 @@ SECTIONS RODATA /* writeable */ - _sdata = .; /* Start of data section */ .data : { /* Data */ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ INIT_TASK_DATA(PAGE_SIZE) NOSAVE_DATA CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) - READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) DATA_DATA CONSTRUCTORS } diff --git a/trunk/arch/mips/lantiq/Kconfig b/trunk/arch/mips/lantiq/Kconfig deleted file mode 100644 index 3fccf2104513..000000000000 --- a/trunk/arch/mips/lantiq/Kconfig +++ /dev/null @@ -1,23 +0,0 @@ -if LANTIQ - -config SOC_TYPE_XWAY - bool - default n - -choice - prompt "SoC Type" - default SOC_XWAY - -config SOC_AMAZON_SE - bool "Amazon SE" - select SOC_TYPE_XWAY - -config SOC_XWAY - bool "XWAY" - select SOC_TYPE_XWAY - select HW_HAS_PCI -endchoice - -source "arch/mips/lantiq/xway/Kconfig" - -endif diff --git a/trunk/arch/mips/lantiq/Makefile b/trunk/arch/mips/lantiq/Makefile deleted file mode 100644 index e5dae0e24b00..000000000000 --- a/trunk/arch/mips/lantiq/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (C) 2010 John Crispin -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 as published -# by the Free Software Foundation. - -obj-y := irq.o setup.o clk.o prom.o devices.o - -obj-$(CONFIG_EARLY_PRINTK) += early_printk.o - -obj-$(CONFIG_SOC_TYPE_XWAY) += xway/ diff --git a/trunk/arch/mips/lantiq/Platform b/trunk/arch/mips/lantiq/Platform deleted file mode 100644 index f3dff05722de..000000000000 --- a/trunk/arch/mips/lantiq/Platform +++ /dev/null @@ -1,8 +0,0 @@ -# -# Lantiq -# - -platform-$(CONFIG_LANTIQ) += lantiq/ -cflags-$(CONFIG_LANTIQ) += -I$(srctree)/arch/mips/include/asm/mach-lantiq -load-$(CONFIG_LANTIQ) = 0xffffffff80002000 -cflags-$(CONFIG_SOC_TYPE_XWAY) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway diff --git a/trunk/arch/mips/lantiq/clk.c b/trunk/arch/mips/lantiq/clk.c deleted file mode 100644 index 94560899d13e..000000000000 --- a/trunk/arch/mips/lantiq/clk.c +++ /dev/null @@ -1,140 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 Thomas Langer - * Copyright (C) 2010 John Crispin - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#include "clk.h" - -struct clk { - const char *name; - unsigned long rate; - unsigned long (*get_rate) (void); -}; - -static struct clk *cpu_clk; -static int cpu_clk_cnt; - -/* lantiq socs have 3 static clocks */ -static struct clk cpu_clk_generic[] = { - { - .name = "cpu", - .get_rate = ltq_get_cpu_hz, - }, { - .name = "fpi", - .get_rate = ltq_get_fpi_hz, - }, { - .name = "io", - .get_rate = ltq_get_io_region_clock, - }, -}; - -static struct resource ltq_cgu_resource = { - .name = "cgu", - .start = LTQ_CGU_BASE_ADDR, - .end = LTQ_CGU_BASE_ADDR + LTQ_CGU_SIZE - 1, - .flags = IORESOURCE_MEM, -}; - -/* remapped clock register range */ -void __iomem *ltq_cgu_membase; - -void clk_init(void) -{ - cpu_clk = cpu_clk_generic; - cpu_clk_cnt = ARRAY_SIZE(cpu_clk_generic); -} - -static inline int clk_good(struct clk *clk) -{ - return clk && !IS_ERR(clk); -} - -unsigned long clk_get_rate(struct clk *clk) -{ - if (unlikely(!clk_good(clk))) - return 0; - - if (clk->rate != 0) - return clk->rate; - - if (clk->get_rate != NULL) - return clk->get_rate(); - - return 0; -} -EXPORT_SYMBOL(clk_get_rate); - -struct clk *clk_get(struct device *dev, const char *id) -{ - int i; - - for (i = 0; i < cpu_clk_cnt; i++) - if (!strcmp(id, cpu_clk[i].name)) - return &cpu_clk[i]; - BUG(); - return ERR_PTR(-ENOENT); -} -EXPORT_SYMBOL(clk_get); - -void clk_put(struct clk *clk) -{ - /* not used */ -} -EXPORT_SYMBOL(clk_put); - -static inline u32 ltq_get_counter_resolution(void) -{ - u32 res; - - __asm__ __volatile__( - ".set push\n" - ".set mips32r2\n" - "rdhwr %0, $3\n" - ".set pop\n" - : "=&r" (res) - : /* no input */ - : "memory"); - - return res; -} - -void __init plat_time_init(void) -{ - struct clk *clk; - - if (insert_resource(&iomem_resource, <q_cgu_resource) < 0) - panic("Failed to insert cgu memory\n"); - - if (request_mem_region(ltq_cgu_resource.start, - resource_size(<q_cgu_resource), "cgu") < 0) - panic("Failed to request cgu memory\n"); - - ltq_cgu_membase = ioremap_nocache(ltq_cgu_resource.start, - resource_size(<q_cgu_resource)); - if (!ltq_cgu_membase) { - pr_err("Failed to remap cgu memory\n"); - unreachable(); - } - clk = clk_get(0, "cpu"); - mips_hpt_frequency = clk_get_rate(clk) / ltq_get_counter_resolution(); - write_c0_compare(read_c0_count()); - clk_put(clk); -} diff --git a/trunk/arch/mips/lantiq/clk.h b/trunk/arch/mips/lantiq/clk.h deleted file mode 100644 index 3328925f2c3f..000000000000 --- a/trunk/arch/mips/lantiq/clk.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#ifndef _LTQ_CLK_H__ -#define _LTQ_CLK_H__ - -extern void clk_init(void); - -extern unsigned long ltq_get_cpu_hz(void); -extern unsigned long ltq_get_fpi_hz(void); -extern unsigned long ltq_get_io_region_clock(void); - -#endif diff --git a/trunk/arch/mips/lantiq/devices.c b/trunk/arch/mips/lantiq/devices.c deleted file mode 100644 index 7b82c34cb169..000000000000 --- a/trunk/arch/mips/lantiq/devices.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -#include "devices.h" - -/* nor flash */ -static struct resource ltq_nor_resource = { - .name = "nor", - .start = LTQ_FLASH_START, - .end = LTQ_FLASH_START + LTQ_FLASH_MAX - 1, - .flags = IORESOURCE_MEM, -}; - -static struct platform_device ltq_nor = { - .name = "ltq_nor", - .resource = <q_nor_resource, - .num_resources = 1, -}; - -void __init ltq_register_nor(struct physmap_flash_data *data) -{ - ltq_nor.dev.platform_data = data; - platform_device_register(<q_nor); -} - -/* watchdog */ -static struct resource ltq_wdt_resource = { - .name = "watchdog", - .start = LTQ_WDT_BASE_ADDR, - .end = LTQ_WDT_BASE_ADDR + LTQ_WDT_SIZE - 1, - .flags = IORESOURCE_MEM, -}; - -void __init ltq_register_wdt(void) -{ - platform_device_register_simple("ltq_wdt", 0, <q_wdt_resource, 1); -} - -/* asc ports */ -static struct resource ltq_asc0_resources[] = { - { - .name = "asc0", - .start = LTQ_ASC0_BASE_ADDR, - .end = LTQ_ASC0_BASE_ADDR + LTQ_ASC_SIZE - 1, - .flags = IORESOURCE_MEM, - }, - IRQ_RES(tx, LTQ_ASC_TIR(0)), - IRQ_RES(rx, LTQ_ASC_RIR(0)), - IRQ_RES(err, LTQ_ASC_EIR(0)), -}; - -static struct resource ltq_asc1_resources[] = { - { - .name = "asc1", - .start = LTQ_ASC1_BASE_ADDR, - .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1, - .flags = IORESOURCE_MEM, - }, - IRQ_RES(tx, LTQ_ASC_TIR(1)), - IRQ_RES(rx, LTQ_ASC_RIR(1)), - IRQ_RES(err, LTQ_ASC_EIR(1)), -}; - -void __init ltq_register_asc(int port) -{ - switch (port) { - case 0: - platform_device_register_simple("ltq_asc", 0, - ltq_asc0_resources, ARRAY_SIZE(ltq_asc0_resources)); - break; - case 1: - platform_device_register_simple("ltq_asc", 1, - ltq_asc1_resources, ARRAY_SIZE(ltq_asc1_resources)); - break; - default: - break; - } -} - -#ifdef CONFIG_PCI -/* pci */ -static struct platform_device ltq_pci = { - .name = "ltq_pci", - .num_resources = 0, -}; - -void __init ltq_register_pci(struct ltq_pci_data *data) -{ - ltq_pci.dev.platform_data = data; - platform_device_register(<q_pci); -} -#else -void __init ltq_register_pci(struct ltq_pci_data *data) -{ - pr_err("kernel is compiled without PCI support\n"); -} -#endif diff --git a/trunk/arch/mips/lantiq/devices.h b/trunk/arch/mips/lantiq/devices.h deleted file mode 100644 index 2947bb19a528..000000000000 --- a/trunk/arch/mips/lantiq/devices.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#ifndef _LTQ_DEVICES_H__ -#define _LTQ_DEVICES_H__ - -#include -#include - -#define IRQ_RES(resname, irq) \ - {.name = #resname, .start = (irq), .flags = IORESOURCE_IRQ} - -extern void ltq_register_nor(struct physmap_flash_data *data); -extern void ltq_register_wdt(void); -extern void ltq_register_asc(int port); -extern void ltq_register_pci(struct ltq_pci_data *data); - -#endif diff --git a/trunk/arch/mips/lantiq/early_printk.c b/trunk/arch/mips/lantiq/early_printk.c deleted file mode 100644 index 972e05f87631..000000000000 --- a/trunk/arch/mips/lantiq/early_printk.c +++ /dev/null @@ -1,33 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include - -#include -#include - -/* no ioremap possible at this early stage, lets use KSEG1 instead */ -#define LTQ_ASC_BASE KSEG1ADDR(LTQ_ASC1_BASE_ADDR) -#define ASC_BUF 1024 -#define LTQ_ASC_FSTAT ((u32 *)(LTQ_ASC_BASE + 0x0048)) -#define LTQ_ASC_TBUF ((u32 *)(LTQ_ASC_BASE + 0x0020)) -#define TXMASK 0x3F00 -#define TXOFFSET 8 - -void prom_putchar(char c) -{ - unsigned long flags; - - local_irq_save(flags); - do { } while ((ltq_r32(LTQ_ASC_FSTAT) & TXMASK) >> TXOFFSET); - if (c == '\n') - ltq_w32('\r', LTQ_ASC_TBUF); - ltq_w32(c, LTQ_ASC_TBUF); - local_irq_restore(flags); -} diff --git a/trunk/arch/mips/lantiq/irq.c b/trunk/arch/mips/lantiq/irq.c deleted file mode 100644 index fc89795cafdb..000000000000 --- a/trunk/arch/mips/lantiq/irq.c +++ /dev/null @@ -1,326 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - * Copyright (C) 2010 Thomas Langer - */ - -#include -#include - -#include -#include - -#include -#include - -/* register definitions */ -#define LTQ_ICU_IM0_ISR 0x0000 -#define LTQ_ICU_IM0_IER 0x0008 -#define LTQ_ICU_IM0_IOSR 0x0010 -#define LTQ_ICU_IM0_IRSR 0x0018 -#define LTQ_ICU_IM0_IMR 0x0020 -#define LTQ_ICU_IM1_ISR 0x0028 -#define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR) - -#define LTQ_EIU_EXIN_C 0x0000 -#define LTQ_EIU_EXIN_INIC 0x0004 -#define LTQ_EIU_EXIN_INEN 0x000C - -/* irq numbers used by the external interrupt unit (EIU) */ -#define LTQ_EIU_IR0 (INT_NUM_IM4_IRL0 + 30) -#define LTQ_EIU_IR1 (INT_NUM_IM3_IRL0 + 31) -#define LTQ_EIU_IR2 (INT_NUM_IM1_IRL0 + 26) -#define LTQ_EIU_IR3 INT_NUM_IM1_IRL0 -#define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1) -#define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2) -#define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30) - -#define MAX_EIU 6 - -/* irqs generated by device attached to the EBU need to be acked in - * a special manner - */ -#define LTQ_ICU_EBU_IRQ 22 - -#define ltq_icu_w32(x, y) ltq_w32((x), ltq_icu_membase + (y)) -#define ltq_icu_r32(x) ltq_r32(ltq_icu_membase + (x)) - -#define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y)) -#define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x)) - -static unsigned short ltq_eiu_irq[MAX_EIU] = { - LTQ_EIU_IR0, - LTQ_EIU_IR1, - LTQ_EIU_IR2, - LTQ_EIU_IR3, - LTQ_EIU_IR4, - LTQ_EIU_IR5, -}; - -static struct resource ltq_icu_resource = { - .name = "icu", - .start = LTQ_ICU_BASE_ADDR, - .end = LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1, - .flags = IORESOURCE_MEM, -}; - -static struct resource ltq_eiu_resource = { - .name = "eiu", - .start = LTQ_EIU_BASE_ADDR, - .end = LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1, - .flags = IORESOURCE_MEM, -}; - -static void __iomem *ltq_icu_membase; -static void __iomem *ltq_eiu_membase; - -void ltq_disable_irq(struct irq_data *d) -{ - u32 ier = LTQ_ICU_IM0_IER; - int irq_nr = d->irq - INT_NUM_IRQ0; - - ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); - irq_nr %= INT_NUM_IM_OFFSET; - ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); -} - -void ltq_mask_and_ack_irq(struct irq_data *d) -{ - u32 ier = LTQ_ICU_IM0_IER; - u32 isr = LTQ_ICU_IM0_ISR; - int irq_nr = d->irq - INT_NUM_IRQ0; - - ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); - isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); - irq_nr %= INT_NUM_IM_OFFSET; - ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); - ltq_icu_w32((1 << irq_nr), isr); -} - -static void ltq_ack_irq(struct irq_data *d) -{ - u32 isr = LTQ_ICU_IM0_ISR; - int irq_nr = d->irq - INT_NUM_IRQ0; - - isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); - irq_nr %= INT_NUM_IM_OFFSET; - ltq_icu_w32((1 << irq_nr), isr); -} - -void ltq_enable_irq(struct irq_data *d) -{ - u32 ier = LTQ_ICU_IM0_IER; - int irq_nr = d->irq - INT_NUM_IRQ0; - - ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); - irq_nr %= INT_NUM_IM_OFFSET; - ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier); -} - -static unsigned int ltq_startup_eiu_irq(struct irq_data *d) -{ - int i; - int irq_nr = d->irq - INT_NUM_IRQ0; - - ltq_enable_irq(d); - for (i = 0; i < MAX_EIU; i++) { - if (irq_nr == ltq_eiu_irq[i]) { - /* low level - we should really handle set_type */ - ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | - (0x6 << (i * 4)), LTQ_EIU_EXIN_C); - /* clear all pending */ - ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i), - LTQ_EIU_EXIN_INIC); - /* enable */ - ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i), - LTQ_EIU_EXIN_INEN); - break; - } - } - - return 0; -} - -static void ltq_shutdown_eiu_irq(struct irq_data *d) -{ - int i; - int irq_nr = d->irq - INT_NUM_IRQ0; - - ltq_disable_irq(d); - for (i = 0; i < MAX_EIU; i++) { - if (irq_nr == ltq_eiu_irq[i]) { - /* disable */ - ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), - LTQ_EIU_EXIN_INEN); - break; - } - } -} - -static struct irq_chip ltq_irq_type = { - "icu", - .irq_enable = ltq_enable_irq, - .irq_disable = ltq_disable_irq, - .irq_unmask = ltq_enable_irq, - .irq_ack = ltq_ack_irq, - .irq_mask = ltq_disable_irq, - .irq_mask_ack = ltq_mask_and_ack_irq, -}; - -static struct irq_chip ltq_eiu_type = { - "eiu", - .irq_startup = ltq_startup_eiu_irq, - .irq_shutdown = ltq_shutdown_eiu_irq, - .irq_enable = ltq_enable_irq, - .irq_disable = ltq_disable_irq, - .irq_unmask = ltq_enable_irq, - .irq_ack = ltq_ack_irq, - .irq_mask = ltq_disable_irq, - .irq_mask_ack = ltq_mask_and_ack_irq, -}; - -static void ltq_hw_irqdispatch(int module) -{ - u32 irq; - - irq = ltq_icu_r32(LTQ_ICU_IM0_IOSR + (module * LTQ_ICU_OFFSET)); - if (irq == 0) - return; - - /* silicon bug causes only the msb set to 1 to be valid. all - * other bits might be bogus - */ - irq = __fls(irq); - do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module)); - - /* if this is a EBU irq, we need to ack it or get a deadlock */ - if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0)) - ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, - LTQ_EBU_PCC_ISTAT); -} - -#define DEFINE_HWx_IRQDISPATCH(x) \ - static void ltq_hw ## x ## _irqdispatch(void) \ - { \ - ltq_hw_irqdispatch(x); \ - } -DEFINE_HWx_IRQDISPATCH(0) -DEFINE_HWx_IRQDISPATCH(1) -DEFINE_HWx_IRQDISPATCH(2) -DEFINE_HWx_IRQDISPATCH(3) -DEFINE_HWx_IRQDISPATCH(4) - -static void ltq_hw5_irqdispatch(void) -{ - do_IRQ(MIPS_CPU_TIMER_IRQ); -} - -asmlinkage void plat_irq_dispatch(void) -{ - unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; - unsigned int i; - - if (pending & CAUSEF_IP7) { - do_IRQ(MIPS_CPU_TIMER_IRQ); - goto out; - } else { - for (i = 0; i < 5; i++) { - if (pending & (CAUSEF_IP2 << i)) { - ltq_hw_irqdispatch(i); - goto out; - } - } - } - pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status()); - -out: - return; -} - -static struct irqaction cascade = { - .handler = no_action, - .flags = IRQF_DISABLED, - .name = "cascade", -}; - -void __init arch_init_irq(void) -{ - int i; - - if (insert_resource(&iomem_resource, <q_icu_resource) < 0) - panic("Failed to insert icu memory\n"); - - if (request_mem_region(ltq_icu_resource.start, - resource_size(<q_icu_resource), "icu") < 0) - panic("Failed to request icu memory\n"); - - ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start, - resource_size(<q_icu_resource)); - if (!ltq_icu_membase) - panic("Failed to remap icu memory\n"); - - if (insert_resource(&iomem_resource, <q_eiu_resource) < 0) - panic("Failed to insert eiu memory\n"); - - if (request_mem_region(ltq_eiu_resource.start, - resource_size(<q_eiu_resource), "eiu") < 0) - panic("Failed to request eiu memory\n"); - - ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start, - resource_size(<q_eiu_resource)); - if (!ltq_eiu_membase) - panic("Failed to remap eiu memory\n"); - - /* make sure all irqs are turned off by default */ - for (i = 0; i < 5; i++) - ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET)); - - /* clear all possibly pending interrupts */ - ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET)); - - mips_cpu_irq_init(); - - for (i = 2; i <= 6; i++) - setup_irq(i, &cascade); - - if (cpu_has_vint) { - pr_info("Setting up vectored interrupts\n"); - set_vi_handler(2, ltq_hw0_irqdispatch); - set_vi_handler(3, ltq_hw1_irqdispatch); - set_vi_handler(4, ltq_hw2_irqdispatch); - set_vi_handler(5, ltq_hw3_irqdispatch); - set_vi_handler(6, ltq_hw4_irqdispatch); - set_vi_handler(7, ltq_hw5_irqdispatch); - } - - for (i = INT_NUM_IRQ0; - i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++) - if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) || - (i == LTQ_EIU_IR2)) - irq_set_chip_and_handler(i, <q_eiu_type, - handle_level_irq); - /* EIU3-5 only exist on ar9 and vr9 */ - else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) || - (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9())) - irq_set_chip_and_handler(i, <q_eiu_type, - handle_level_irq); - else - irq_set_chip_and_handler(i, <q_irq_type, - handle_level_irq); - -#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) - set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | - IE_IRQ3 | IE_IRQ4 | IE_IRQ5); -#else - set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | - IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); -#endif -} - -unsigned int __cpuinit get_c0_compare_int(void) -{ - return CP0_LEGACY_COMPARE_IRQ; -} diff --git a/trunk/arch/mips/lantiq/machtypes.h b/trunk/arch/mips/lantiq/machtypes.h deleted file mode 100644 index 7e01b8c484eb..000000000000 --- a/trunk/arch/mips/lantiq/machtypes.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#ifndef _LANTIQ_MACH_H__ -#define _LANTIQ_MACH_H__ - -#include - -enum lantiq_mach_type { - LTQ_MACH_GENERIC = 0, - LTQ_MACH_EASY50712, /* Danube evaluation board */ - LTQ_MACH_EASY50601, /* Amazon SE evaluation board */ -}; - -#endif diff --git a/trunk/arch/mips/lantiq/prom.c b/trunk/arch/mips/lantiq/prom.c deleted file mode 100644 index 56ba007bf1e5..000000000000 --- a/trunk/arch/mips/lantiq/prom.c +++ /dev/null @@ -1,71 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include - -#include - -#include "prom.h" -#include "clk.h" - -static struct ltq_soc_info soc_info; - -unsigned int ltq_get_cpu_ver(void) -{ - return soc_info.rev; -} -EXPORT_SYMBOL(ltq_get_cpu_ver); - -unsigned int ltq_get_soc_type(void) -{ - return soc_info.type; -} -EXPORT_SYMBOL(ltq_get_soc_type); - -const char *get_system_type(void) -{ - return soc_info.sys_type; -} - -void prom_free_prom_memory(void) -{ -} - -static void __init prom_init_cmdline(void) -{ - int argc = fw_arg0; - char **argv = (char **) KSEG1ADDR(fw_arg1); - int i; - - for (i = 0; i < argc; i++) { - char *p = (char *) KSEG1ADDR(argv[i]); - - if (p && *p) { - strlcat(arcs_cmdline, p, sizeof(arcs_cmdline)); - strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline)); - } - } -} - -void __init prom_init(void) -{ - struct clk *clk; - - ltq_soc_detect(&soc_info); - clk_init(); - clk = clk_get(0, "cpu"); - snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev1.%d", - soc_info.name, soc_info.rev); - clk_put(clk); - soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0'; - pr_info("SoC: %s\n", soc_info.sys_type); - prom_init_cmdline(); -} diff --git a/trunk/arch/mips/lantiq/prom.h b/trunk/arch/mips/lantiq/prom.h deleted file mode 100644 index b4229d94280f..000000000000 --- a/trunk/arch/mips/lantiq/prom.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#ifndef _LTQ_PROM_H__ -#define _LTQ_PROM_H__ - -#define LTQ_SYS_TYPE_LEN 0x100 - -struct ltq_soc_info { - unsigned char *name; - unsigned int rev; - unsigned int partnum; - unsigned int type; - unsigned char sys_type[LTQ_SYS_TYPE_LEN]; -}; - -extern void ltq_soc_detect(struct ltq_soc_info *i); -extern void ltq_soc_setup(void); - -#endif diff --git a/trunk/arch/mips/lantiq/setup.c b/trunk/arch/mips/lantiq/setup.c deleted file mode 100644 index 9b8af77ed0f9..000000000000 --- a/trunk/arch/mips/lantiq/setup.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include -#include - -#include - -#include "machtypes.h" -#include "devices.h" -#include "prom.h" - -void __init plat_mem_setup(void) -{ - /* assume 16M as default incase uboot fails to pass proper ramsize */ - unsigned long memsize = 16; - char **envp = (char **) KSEG1ADDR(fw_arg2); - - ioport_resource.start = IOPORT_RESOURCE_START; - ioport_resource.end = IOPORT_RESOURCE_END; - iomem_resource.start = IOMEM_RESOURCE_START; - iomem_resource.end = IOMEM_RESOURCE_END; - - set_io_port_base((unsigned long) KSEG1); - - while (*envp) { - char *e = (char *)KSEG1ADDR(*envp); - if (!strncmp(e, "memsize=", 8)) { - e += 8; - if (strict_strtoul(e, 0, &memsize)) - pr_warn("bad memsize specified\n"); - } - envp++; - } - memsize *= 1024 * 1024; - add_memory_region(0x00000000, memsize, BOOT_MEM_RAM); -} - -static int __init -lantiq_setup(void) -{ - ltq_soc_setup(); - mips_machine_setup(); - return 0; -} - -arch_initcall(lantiq_setup); - -static void __init -lantiq_generic_init(void) -{ - /* Nothing to do */ -} - -MIPS_MACHINE(LTQ_MACH_GENERIC, - "Generic", - "Generic Lantiq based board", - lantiq_generic_init); diff --git a/trunk/arch/mips/lantiq/xway/Kconfig b/trunk/arch/mips/lantiq/xway/Kconfig deleted file mode 100644 index 2b857de36620..000000000000 --- a/trunk/arch/mips/lantiq/xway/Kconfig +++ /dev/null @@ -1,23 +0,0 @@ -if SOC_XWAY - -menu "MIPS Machine" - -config LANTIQ_MACH_EASY50712 - bool "Easy50712 - Danube" - default y - -endmenu - -endif - -if SOC_AMAZON_SE - -menu "MIPS Machine" - -config LANTIQ_MACH_EASY50601 - bool "Easy50601 - Amazon SE" - default y - -endmenu - -endif diff --git a/trunk/arch/mips/lantiq/xway/Makefile b/trunk/arch/mips/lantiq/xway/Makefile deleted file mode 100644 index c517f2e77563..000000000000 --- a/trunk/arch/mips/lantiq/xway/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -obj-y := pmu.o ebu.o reset.o gpio.o gpio_stp.o gpio_ebu.o devices.o dma.o - -obj-$(CONFIG_SOC_XWAY) += clk-xway.o prom-xway.o setup-xway.o -obj-$(CONFIG_SOC_AMAZON_SE) += clk-ase.o prom-ase.o setup-ase.o - -obj-$(CONFIG_LANTIQ_MACH_EASY50712) += mach-easy50712.o -obj-$(CONFIG_LANTIQ_MACH_EASY50601) += mach-easy50601.o diff --git a/trunk/arch/mips/lantiq/xway/clk-ase.c b/trunk/arch/mips/lantiq/xway/clk-ase.c deleted file mode 100644 index 22d823acd536..000000000000 --- a/trunk/arch/mips/lantiq/xway/clk-ase.c +++ /dev/null @@ -1,48 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2011 John Crispin - */ - -#include -#include -#include -#include - -#include -#include -#include - -#include - -/* cgu registers */ -#define LTQ_CGU_SYS 0x0010 - -unsigned int ltq_get_io_region_clock(void) -{ - return CLOCK_133M; -} -EXPORT_SYMBOL(ltq_get_io_region_clock); - -unsigned int ltq_get_fpi_bus_clock(int fpi) -{ - return CLOCK_133M; -} -EXPORT_SYMBOL(ltq_get_fpi_bus_clock); - -unsigned int ltq_get_cpu_hz(void) -{ - if (ltq_cgu_r32(LTQ_CGU_SYS) & (1 << 5)) - return CLOCK_266M; - else - return CLOCK_133M; -} -EXPORT_SYMBOL(ltq_get_cpu_hz); - -unsigned int ltq_get_fpi_hz(void) -{ - return CLOCK_133M; -} -EXPORT_SYMBOL(ltq_get_fpi_hz); diff --git a/trunk/arch/mips/lantiq/xway/clk-xway.c b/trunk/arch/mips/lantiq/xway/clk-xway.c deleted file mode 100644 index ddd39593c581..000000000000 --- a/trunk/arch/mips/lantiq/xway/clk-xway.c +++ /dev/null @@ -1,223 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include - -#include -#include -#include - -#include - -static unsigned int ltq_ram_clocks[] = { - CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M }; -#define DDR_HZ ltq_ram_clocks[ltq_cgu_r32(LTQ_CGU_SYS) & 0x3] - -#define BASIC_FREQUENCY_1 35328000 -#define BASIC_FREQUENCY_2 36000000 -#define BASIS_REQUENCY_USB 12000000 - -#define GET_BITS(x, msb, lsb) \ - (((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb)) - -#define LTQ_CGU_PLL0_CFG 0x0004 -#define LTQ_CGU_PLL1_CFG 0x0008 -#define LTQ_CGU_PLL2_CFG 0x000C -#define LTQ_CGU_SYS 0x0010 -#define LTQ_CGU_UPDATE 0x0014 -#define LTQ_CGU_IF_CLK 0x0018 -#define LTQ_CGU_OSC_CON 0x001C -#define LTQ_CGU_SMD 0x0020 -#define LTQ_CGU_CT1SR 0x0028 -#define LTQ_CGU_CT2SR 0x002C -#define LTQ_CGU_PCMCR 0x0030 -#define LTQ_CGU_PCI_CR 0x0034 -#define LTQ_CGU_PD_PC 0x0038 -#define LTQ_CGU_FMR 0x003C - -#define CGU_PLL0_PHASE_DIVIDER_ENABLE \ - (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 31)) -#define CGU_PLL0_BYPASS \ - (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 30)) -#define CGU_PLL0_CFG_DSMSEL \ - (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 28)) -#define CGU_PLL0_CFG_FRAC_EN \ - (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 27)) -#define CGU_PLL1_SRC \ - (ltq_cgu_r32(LTQ_CGU_PLL1_CFG) & (1 << 31)) -#define CGU_PLL2_PHASE_DIVIDER_ENABLE \ - (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & (1 << 20)) -#define CGU_SYS_FPI_SEL (1 << 6) -#define CGU_SYS_DDR_SEL 0x3 -#define CGU_PLL0_SRC (1 << 29) - -#define CGU_PLL0_CFG_PLLK GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 26, 17) -#define CGU_PLL0_CFG_PLLN GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 12, 6) -#define CGU_PLL0_CFG_PLLM GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 5, 2) -#define CGU_PLL2_SRC GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 18, 17) -#define CGU_PLL2_CFG_INPUT_DIV GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 16, 13) - -static unsigned int ltq_get_pll0_fdiv(void); - -static inline unsigned int get_input_clock(int pll) -{ - switch (pll) { - case 0: - if (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & CGU_PLL0_SRC) - return BASIS_REQUENCY_USB; - else if (CGU_PLL0_PHASE_DIVIDER_ENABLE) - return BASIC_FREQUENCY_1; - else - return BASIC_FREQUENCY_2; - case 1: - if (CGU_PLL1_SRC) - return BASIS_REQUENCY_USB; - else if (CGU_PLL0_PHASE_DIVIDER_ENABLE) - return BASIC_FREQUENCY_1; - else - return BASIC_FREQUENCY_2; - case 2: - switch (CGU_PLL2_SRC) { - case 0: - return ltq_get_pll0_fdiv(); - case 1: - return CGU_PLL2_PHASE_DIVIDER_ENABLE ? - BASIC_FREQUENCY_1 : - BASIC_FREQUENCY_2; - case 2: - return BASIS_REQUENCY_USB; - } - default: - return 0; - } -} - -static inline unsigned int cal_dsm(int pll, unsigned int num, unsigned int den) -{ - u64 res, clock = get_input_clock(pll); - - res = num * clock; - do_div(res, den); - return res; -} - -static inline unsigned int mash_dsm(int pll, unsigned int M, unsigned int N, - unsigned int K) -{ - unsigned int num = ((N + 1) << 10) + K; - unsigned int den = (M + 1) << 10; - - return cal_dsm(pll, num, den); -} - -static inline unsigned int ssff_dsm_1(int pll, unsigned int M, unsigned int N, - unsigned int K) -{ - unsigned int num = ((N + 1) << 11) + K + 512; - unsigned int den = (M + 1) << 11; - - return cal_dsm(pll, num, den); -} - -static inline unsigned int ssff_dsm_2(int pll, unsigned int M, unsigned int N, - unsigned int K) -{ - unsigned int num = K >= 512 ? - ((N + 1) << 12) + K - 512 : ((N + 1) << 12) + K + 3584; - unsigned int den = (M + 1) << 12; - - return cal_dsm(pll, num, den); -} - -static inline unsigned int dsm(int pll, unsigned int M, unsigned int N, - unsigned int K, unsigned int dsmsel, unsigned int phase_div_en) -{ - if (!dsmsel) - return mash_dsm(pll, M, N, K); - else if (!phase_div_en) - return mash_dsm(pll, M, N, K); - else - return ssff_dsm_2(pll, M, N, K); -} - -static inline unsigned int ltq_get_pll0_fosc(void) -{ - if (CGU_PLL0_BYPASS) - return get_input_clock(0); - else - return !CGU_PLL0_CFG_FRAC_EN - ? dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, 0, - CGU_PLL0_CFG_DSMSEL, - CGU_PLL0_PHASE_DIVIDER_ENABLE) - : dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, - CGU_PLL0_CFG_PLLK, CGU_PLL0_CFG_DSMSEL, - CGU_PLL0_PHASE_DIVIDER_ENABLE); -} - -static unsigned int ltq_get_pll0_fdiv(void) -{ - unsigned int div = CGU_PLL2_CFG_INPUT_DIV + 1; - - return (ltq_get_pll0_fosc() + (div >> 1)) / div; -} - -unsigned int ltq_get_io_region_clock(void) -{ - unsigned int ret = ltq_get_pll0_fosc(); - - switch (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & CGU_SYS_DDR_SEL) { - default: - case 0: - return (ret + 1) / 2; - case 1: - return (ret * 2 + 2) / 5; - case 2: - return (ret + 1) / 3; - case 3: - return (ret + 2) / 4; - } -} -EXPORT_SYMBOL(ltq_get_io_region_clock); - -unsigned int ltq_get_fpi_bus_clock(int fpi) -{ - unsigned int ret = ltq_get_io_region_clock(); - - if ((fpi == 2) && (ltq_cgu_r32(LTQ_CGU_SYS) & CGU_SYS_FPI_SEL)) - ret >>= 1; - return ret; -} -EXPORT_SYMBOL(ltq_get_fpi_bus_clock); - -unsigned int ltq_get_cpu_hz(void) -{ - switch (ltq_cgu_r32(LTQ_CGU_SYS) & 0xc) { - case 0: - return CLOCK_333M; - case 4: - return DDR_HZ; - case 8: - return DDR_HZ << 1; - default: - return DDR_HZ >> 1; - } -} -EXPORT_SYMBOL(ltq_get_cpu_hz); - -unsigned int ltq_get_fpi_hz(void) -{ - unsigned int ddr_clock = DDR_HZ; - - if (ltq_cgu_r32(LTQ_CGU_SYS) & 0x40) - return ddr_clock >> 1; - return ddr_clock; -} -EXPORT_SYMBOL(ltq_get_fpi_hz); diff --git a/trunk/arch/mips/lantiq/xway/devices.c b/trunk/arch/mips/lantiq/xway/devices.c deleted file mode 100644 index e09e789dfc27..000000000000 --- a/trunk/arch/mips/lantiq/xway/devices.c +++ /dev/null @@ -1,121 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -#include "devices.h" - -/* gpio */ -static struct resource ltq_gpio_resource[] = { - { - .name = "gpio0", - .start = LTQ_GPIO0_BASE_ADDR, - .end = LTQ_GPIO0_BASE_ADDR + LTQ_GPIO_SIZE - 1, - .flags = IORESOURCE_MEM, - }, { - .name = "gpio1", - .start = LTQ_GPIO1_BASE_ADDR, - .end = LTQ_GPIO1_BASE_ADDR + LTQ_GPIO_SIZE - 1, - .flags = IORESOURCE_MEM, - }, { - .name = "gpio2", - .start = LTQ_GPIO2_BASE_ADDR, - .end = LTQ_GPIO2_BASE_ADDR + LTQ_GPIO_SIZE - 1, - .flags = IORESOURCE_MEM, - } -}; - -void __init ltq_register_gpio(void) -{ - platform_device_register_simple("ltq_gpio", 0, - <q_gpio_resource[0], 1); - platform_device_register_simple("ltq_gpio", 1, - <q_gpio_resource[1], 1); - - /* AR9 and VR9 have an extra gpio block */ - if (ltq_is_ar9() || ltq_is_vr9()) { - platform_device_register_simple("ltq_gpio", 2, - <q_gpio_resource[2], 1); - } -} - -/* serial to parallel conversion */ -static struct resource ltq_stp_resource = { - .name = "stp", - .start = LTQ_STP_BASE_ADDR, - .end = LTQ_STP_BASE_ADDR + LTQ_STP_SIZE - 1, - .flags = IORESOURCE_MEM, -}; - -void __init ltq_register_gpio_stp(void) -{ - platform_device_register_simple("ltq_stp", 0, <q_stp_resource, 1); -} - -/* asc ports - amazon se has its own serial mapping */ -static struct resource ltq_ase_asc_resources[] = { - { - .name = "asc0", - .start = LTQ_ASC1_BASE_ADDR, - .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1, - .flags = IORESOURCE_MEM, - }, - IRQ_RES(tx, LTQ_ASC_ASE_TIR), - IRQ_RES(rx, LTQ_ASC_ASE_RIR), - IRQ_RES(err, LTQ_ASC_ASE_EIR), -}; - -void __init ltq_register_ase_asc(void) -{ - platform_device_register_simple("ltq_asc", 0, - ltq_ase_asc_resources, ARRAY_SIZE(ltq_ase_asc_resources)); -} - -/* ethernet */ -static struct resource ltq_etop_resources = { - .name = "etop", - .start = LTQ_ETOP_BASE_ADDR, - .end = LTQ_ETOP_BASE_ADDR + LTQ_ETOP_SIZE - 1, - .flags = IORESOURCE_MEM, -}; - -static struct platform_device ltq_etop = { - .name = "ltq_etop", - .resource = <q_etop_resources, - .num_resources = 1, -}; - -void __init -ltq_register_etop(struct ltq_eth_data *eth) -{ - if (eth) { - ltq_etop.dev.platform_data = eth; - platform_device_register(<q_etop); - } -} diff --git a/trunk/arch/mips/lantiq/xway/devices.h b/trunk/arch/mips/lantiq/xway/devices.h deleted file mode 100644 index e90493471bc1..000000000000 --- a/trunk/arch/mips/lantiq/xway/devices.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#ifndef _LTQ_DEVICES_XWAY_H__ -#define _LTQ_DEVICES_XWAY_H__ - -#include "../devices.h" -#include - -extern void ltq_register_gpio(void); -extern void ltq_register_gpio_stp(void); -extern void ltq_register_ase_asc(void); -extern void ltq_register_etop(struct ltq_eth_data *eth); - -#endif diff --git a/trunk/arch/mips/lantiq/xway/dma.c b/trunk/arch/mips/lantiq/xway/dma.c deleted file mode 100644 index 4278a459d6c4..000000000000 --- a/trunk/arch/mips/lantiq/xway/dma.c +++ /dev/null @@ -1,253 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) 2011 John Crispin - */ - -#include -#include -#include -#include - -#include -#include - -#define LTQ_DMA_CTRL 0x10 -#define LTQ_DMA_CPOLL 0x14 -#define LTQ_DMA_CS 0x18 -#define LTQ_DMA_CCTRL 0x1C -#define LTQ_DMA_CDBA 0x20 -#define LTQ_DMA_CDLEN 0x24 -#define LTQ_DMA_CIS 0x28 -#define LTQ_DMA_CIE 0x2C -#define LTQ_DMA_PS 0x40 -#define LTQ_DMA_PCTRL 0x44 -#define LTQ_DMA_IRNEN 0xf4 - -#define DMA_DESCPT BIT(3) /* descriptor complete irq */ -#define DMA_TX BIT(8) /* TX channel direction */ -#define DMA_CHAN_ON BIT(0) /* channel on / off bit */ -#define DMA_PDEN BIT(6) /* enable packet drop */ -#define DMA_CHAN_RST BIT(1) /* channel on / off bit */ -#define DMA_RESET BIT(0) /* channel on / off bit */ -#define DMA_IRQ_ACK 0x7e /* IRQ status register */ -#define DMA_POLL BIT(31) /* turn on channel polling */ -#define DMA_CLK_DIV4 BIT(6) /* polling clock divider */ -#define DMA_2W_BURST BIT(1) /* 2 word burst length */ -#define DMA_MAX_CHANNEL 20 /* the soc has 20 channels */ -#define DMA_ETOP_ENDIANESS (0xf << 8) /* endianess swap etop channels */ -#define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */ - -#define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x)) -#define ltq_dma_w32(x, y) ltq_w32(x, ltq_dma_membase + (y)) -#define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \ - ltq_dma_membase + (z)) - -static struct resource ltq_dma_resource = { - .name = "dma", - .start = LTQ_DMA_BASE_ADDR, - .end = LTQ_DMA_BASE_ADDR + LTQ_DMA_SIZE - 1, - .flags = IORESOURCE_MEM, -}; - -static void __iomem *ltq_dma_membase; - -void -ltq_dma_enable_irq(struct ltq_dma_channel *ch) -{ - unsigned long flags; - - local_irq_save(flags); - ltq_dma_w32(ch->nr, LTQ_DMA_CS); - ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(ltq_dma_enable_irq); - -void -ltq_dma_disable_irq(struct ltq_dma_channel *ch) -{ - unsigned long flags; - - local_irq_save(flags); - ltq_dma_w32(ch->nr, LTQ_DMA_CS); - ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(ltq_dma_disable_irq); - -void -ltq_dma_ack_irq(struct ltq_dma_channel *ch) -{ - unsigned long flags; - - local_irq_save(flags); - ltq_dma_w32(ch->nr, LTQ_DMA_CS); - ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS); - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(ltq_dma_ack_irq); - -void -ltq_dma_open(struct ltq_dma_channel *ch) -{ - unsigned long flag; - - local_irq_save(flag); - ltq_dma_w32(ch->nr, LTQ_DMA_CS); - ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL); - ltq_dma_enable_irq(ch); - local_irq_restore(flag); -} -EXPORT_SYMBOL_GPL(ltq_dma_open); - -void -ltq_dma_close(struct ltq_dma_channel *ch) -{ - unsigned long flag; - - local_irq_save(flag); - ltq_dma_w32(ch->nr, LTQ_DMA_CS); - ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); - ltq_dma_disable_irq(ch); - local_irq_restore(flag); -} -EXPORT_SYMBOL_GPL(ltq_dma_close); - -static void -ltq_dma_alloc(struct ltq_dma_channel *ch) -{ - unsigned long flags; - - ch->desc = 0; - ch->desc_base = dma_alloc_coherent(NULL, - LTQ_DESC_NUM * LTQ_DESC_SIZE, - &ch->phys, GFP_ATOMIC); - memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE); - - local_irq_save(flags); - ltq_dma_w32(ch->nr, LTQ_DMA_CS); - ltq_dma_w32(ch->phys, LTQ_DMA_CDBA); - ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN); - ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); - wmb(); - ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL); - while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST) - ; - local_irq_restore(flags); -} - -void -ltq_dma_alloc_tx(struct ltq_dma_channel *ch) -{ - unsigned long flags; - - ltq_dma_alloc(ch); - - local_irq_save(flags); - ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); - ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); - ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL); - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx); - -void -ltq_dma_alloc_rx(struct ltq_dma_channel *ch) -{ - unsigned long flags; - - ltq_dma_alloc(ch); - - local_irq_save(flags); - ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); - ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); - ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL); - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx); - -void -ltq_dma_free(struct ltq_dma_channel *ch) -{ - if (!ch->desc_base) - return; - ltq_dma_close(ch); - dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE, - ch->desc_base, ch->phys); -} -EXPORT_SYMBOL_GPL(ltq_dma_free); - -void -ltq_dma_init_port(int p) -{ - ltq_dma_w32(p, LTQ_DMA_PS); - switch (p) { - case DMA_PORT_ETOP: - /* - * Tell the DMA engine to swap the endianess of data frames and - * drop packets if the channel arbitration fails. - */ - ltq_dma_w32_mask(0, DMA_ETOP_ENDIANESS | DMA_PDEN, - LTQ_DMA_PCTRL); - break; - - case DMA_PORT_DEU: - ltq_dma_w32((DMA_2W_BURST << 4) | (DMA_2W_BURST << 2), - LTQ_DMA_PCTRL); - break; - - default: - break; - } -} -EXPORT_SYMBOL_GPL(ltq_dma_init_port); - -int __init -ltq_dma_init(void) -{ - int i; - - /* insert and request the memory region */ - if (insert_resource(&iomem_resource, <q_dma_resource) < 0) - panic("Failed to insert dma memory\n"); - - if (request_mem_region(ltq_dma_resource.start, - resource_size(<q_dma_resource), "dma") < 0) - panic("Failed to request dma memory\n"); - - /* remap dma register range */ - ltq_dma_membase = ioremap_nocache(ltq_dma_resource.start, - resource_size(<q_dma_resource)); - if (!ltq_dma_membase) - panic("Failed to remap dma memory\n"); - - /* power up and reset the dma engine */ - ltq_pmu_enable(PMU_DMA); - ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL); - - /* disable all interrupts */ - ltq_dma_w32(0, LTQ_DMA_IRNEN); - - /* reset/configure each channel */ - for (i = 0; i < DMA_MAX_CHANNEL; i++) { - ltq_dma_w32(i, LTQ_DMA_CS); - ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL); - ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL); - ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); - } - return 0; -} - -postcore_initcall(ltq_dma_init); diff --git a/trunk/arch/mips/lantiq/xway/ebu.c b/trunk/arch/mips/lantiq/xway/ebu.c deleted file mode 100644 index 66eb52fa50a1..000000000000 --- a/trunk/arch/mips/lantiq/xway/ebu.c +++ /dev/null @@ -1,53 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * EBU - the external bus unit attaches PCI, NOR and NAND - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include - -#include - -/* all access to the ebu must be locked */ -DEFINE_SPINLOCK(ebu_lock); -EXPORT_SYMBOL_GPL(ebu_lock); - -static struct resource ltq_ebu_resource = { - .name = "ebu", - .start = LTQ_EBU_BASE_ADDR, - .end = LTQ_EBU_BASE_ADDR + LTQ_EBU_SIZE - 1, - .flags = IORESOURCE_MEM, -}; - -/* remapped base addr of the clock unit and external bus unit */ -void __iomem *ltq_ebu_membase; - -static int __init lantiq_ebu_init(void) -{ - /* insert and request the memory region */ - if (insert_resource(&iomem_resource, <q_ebu_resource) < 0) - panic("Failed to insert ebu memory\n"); - - if (request_mem_region(ltq_ebu_resource.start, - resource_size(<q_ebu_resource), "ebu") < 0) - panic("Failed to request ebu memory\n"); - - /* remap ebu register range */ - ltq_ebu_membase = ioremap_nocache(ltq_ebu_resource.start, - resource_size(<q_ebu_resource)); - if (!ltq_ebu_membase) - panic("Failed to remap ebu memory\n"); - - /* make sure to unprotect the memory region where flash is located */ - ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0); - return 0; -} - -postcore_initcall(lantiq_ebu_init); diff --git a/trunk/arch/mips/lantiq/xway/gpio.c b/trunk/arch/mips/lantiq/xway/gpio.c deleted file mode 100644 index a321451a5455..000000000000 --- a/trunk/arch/mips/lantiq/xway/gpio.c +++ /dev/null @@ -1,195 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include -#include -#include - -#include - -#define LTQ_GPIO_OUT 0x00 -#define LTQ_GPIO_IN 0x04 -#define LTQ_GPIO_DIR 0x08 -#define LTQ_GPIO_ALTSEL0 0x0C -#define LTQ_GPIO_ALTSEL1 0x10 -#define LTQ_GPIO_OD 0x14 - -#define PINS_PER_PORT 16 -#define MAX_PORTS 3 - -#define ltq_gpio_getbit(m, r, p) (!!(ltq_r32(m + r) & (1 << p))) -#define ltq_gpio_setbit(m, r, p) ltq_w32_mask(0, (1 << p), m + r) -#define ltq_gpio_clearbit(m, r, p) ltq_w32_mask((1 << p), 0, m + r) - -struct ltq_gpio { - void __iomem *membase; - struct gpio_chip chip; -}; - -static struct ltq_gpio ltq_gpio_port[MAX_PORTS]; - -int gpio_to_irq(unsigned int gpio) -{ - return -EINVAL; -} -EXPORT_SYMBOL(gpio_to_irq); - -int irq_to_gpio(unsigned int gpio) -{ - return -EINVAL; -} -EXPORT_SYMBOL(irq_to_gpio); - -int ltq_gpio_request(unsigned int pin, unsigned int alt0, - unsigned int alt1, unsigned int dir, const char *name) -{ - int id = 0; - - if (pin >= (MAX_PORTS * PINS_PER_PORT)) - return -EINVAL; - if (gpio_request(pin, name)) { - pr_err("failed to setup lantiq gpio: %s\n", name); - return -EBUSY; - } - if (dir) - gpio_direction_output(pin, 1); - else - gpio_direction_input(pin); - while (pin >= PINS_PER_PORT) { - pin -= PINS_PER_PORT; - id++; - } - if (alt0) - ltq_gpio_setbit(ltq_gpio_port[id].membase, - LTQ_GPIO_ALTSEL0, pin); - else - ltq_gpio_clearbit(ltq_gpio_port[id].membase, - LTQ_GPIO_ALTSEL0, pin); - if (alt1) - ltq_gpio_setbit(ltq_gpio_port[id].membase, - LTQ_GPIO_ALTSEL1, pin); - else - ltq_gpio_clearbit(ltq_gpio_port[id].membase, - LTQ_GPIO_ALTSEL1, pin); - return 0; -} -EXPORT_SYMBOL(ltq_gpio_request); - -static void ltq_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) -{ - struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); - - if (value) - ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_OUT, offset); - else - ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_OUT, offset); -} - -static int ltq_gpio_get(struct gpio_chip *chip, unsigned int offset) -{ - struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); - - return ltq_gpio_getbit(ltq_gpio->membase, LTQ_GPIO_IN, offset); -} - -static int ltq_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) -{ - struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); - - ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_OD, offset); - ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_DIR, offset); - - return 0; -} - -static int ltq_gpio_direction_output(struct gpio_chip *chip, - unsigned int offset, int value) -{ - struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); - - ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_OD, offset); - ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_DIR, offset); - ltq_gpio_set(chip, offset, value); - - return 0; -} - -static int ltq_gpio_req(struct gpio_chip *chip, unsigned offset) -{ - struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); - - ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_ALTSEL0, offset); - ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_ALTSEL1, offset); - return 0; -} - -static int ltq_gpio_probe(struct platform_device *pdev) -{ - struct resource *res; - - if (pdev->id >= MAX_PORTS) { - dev_err(&pdev->dev, "invalid gpio port %d\n", - pdev->id); - return -EINVAL; - } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "failed to get memory for gpio port %d\n", - pdev->id); - return -ENOENT; - } - res = devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), dev_name(&pdev->dev)); - if (!res) { - dev_err(&pdev->dev, - "failed to request memory for gpio port %d\n", - pdev->id); - return -EBUSY; - } - ltq_gpio_port[pdev->id].membase = devm_ioremap_nocache(&pdev->dev, - res->start, resource_size(res)); - if (!ltq_gpio_port[pdev->id].membase) { - dev_err(&pdev->dev, "failed to remap memory for gpio port %d\n", - pdev->id); - return -ENOMEM; - } - ltq_gpio_port[pdev->id].chip.label = "ltq_gpio"; - ltq_gpio_port[pdev->id].chip.direction_input = ltq_gpio_direction_input; - ltq_gpio_port[pdev->id].chip.direction_output = - ltq_gpio_direction_output; - ltq_gpio_port[pdev->id].chip.get = ltq_gpio_get; - ltq_gpio_port[pdev->id].chip.set = ltq_gpio_set; - ltq_gpio_port[pdev->id].chip.request = ltq_gpio_req; - ltq_gpio_port[pdev->id].chip.base = PINS_PER_PORT * pdev->id; - ltq_gpio_port[pdev->id].chip.ngpio = PINS_PER_PORT; - platform_set_drvdata(pdev, <q_gpio_port[pdev->id]); - return gpiochip_add(<q_gpio_port[pdev->id].chip); -} - -static struct platform_driver -ltq_gpio_driver = { - .probe = ltq_gpio_probe, - .driver = { - .name = "ltq_gpio", - .owner = THIS_MODULE, - }, -}; - -int __init ltq_gpio_init(void) -{ - int ret = platform_driver_register(<q_gpio_driver); - - if (ret) - pr_info("ltq_gpio : Error registering platfom driver!"); - return ret; -} - -postcore_initcall(ltq_gpio_init); diff --git a/trunk/arch/mips/lantiq/xway/gpio_ebu.c b/trunk/arch/mips/lantiq/xway/gpio_ebu.c deleted file mode 100644 index a479355abdb9..000000000000 --- a/trunk/arch/mips/lantiq/xway/gpio_ebu.c +++ /dev/null @@ -1,126 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include -#include -#include -#include - -#include - -/* - * By attaching hardware latches to the EBU it is possible to create output - * only gpios. This driver configures a special memory address, which when - * written to outputs 16 bit to the latches. - */ - -#define LTQ_EBU_BUSCON 0x1e7ff /* 16 bit access, slowest timing */ -#define LTQ_EBU_WP 0x80000000 /* write protect bit */ - -/* we keep a shadow value of the last value written to the ebu */ -static int ltq_ebu_gpio_shadow = 0x0; -static void __iomem *ltq_ebu_gpio_membase; - -static void ltq_ebu_apply(void) -{ - unsigned long flags; - - spin_lock_irqsave(&ebu_lock, flags); - ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1); - *((__u16 *)ltq_ebu_gpio_membase) = ltq_ebu_gpio_shadow; - ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1); - spin_unlock_irqrestore(&ebu_lock, flags); -} - -static void ltq_ebu_set(struct gpio_chip *chip, unsigned offset, int value) -{ - if (value) - ltq_ebu_gpio_shadow |= (1 << offset); - else - ltq_ebu_gpio_shadow &= ~(1 << offset); - ltq_ebu_apply(); -} - -static int ltq_ebu_direction_output(struct gpio_chip *chip, unsigned offset, - int value) -{ - ltq_ebu_set(chip, offset, value); - - return 0; -} - -static struct gpio_chip ltq_ebu_chip = { - .label = "ltq_ebu", - .direction_output = ltq_ebu_direction_output, - .set = ltq_ebu_set, - .base = 72, - .ngpio = 16, - .can_sleep = 1, - .owner = THIS_MODULE, -}; - -static int ltq_ebu_probe(struct platform_device *pdev) -{ - int ret = 0; - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - if (!res) { - dev_err(&pdev->dev, "failed to get memory resource\n"); - return -ENOENT; - } - - res = devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), dev_name(&pdev->dev)); - if (!res) { - dev_err(&pdev->dev, "failed to request memory resource\n"); - return -EBUSY; - } - - ltq_ebu_gpio_membase = devm_ioremap_nocache(&pdev->dev, res->start, - resource_size(res)); - if (!ltq_ebu_gpio_membase) { - dev_err(&pdev->dev, "Failed to ioremap mem region\n"); - return -ENOMEM; - } - - /* grab the default shadow value passed form the platform code */ - ltq_ebu_gpio_shadow = (unsigned int) pdev->dev.platform_data; - - /* tell the ebu controller which memory address we will be using */ - ltq_ebu_w32(pdev->resource->start | 0x1, LTQ_EBU_ADDRSEL1); - - /* write protect the region */ - ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1); - - ret = gpiochip_add(<q_ebu_chip); - if (!ret) - ltq_ebu_apply(); - return ret; -} - -static struct platform_driver ltq_ebu_driver = { - .probe = ltq_ebu_probe, - .driver = { - .name = "ltq_ebu", - .owner = THIS_MODULE, - }, -}; - -static int __init ltq_ebu_init(void) -{ - int ret = platform_driver_register(<q_ebu_driver); - - if (ret) - pr_info("ltq_ebu : Error registering platfom driver!"); - return ret; -} - -postcore_initcall(ltq_ebu_init); diff --git a/trunk/arch/mips/lantiq/xway/gpio_stp.c b/trunk/arch/mips/lantiq/xway/gpio_stp.c deleted file mode 100644 index 67d59d690340..000000000000 --- a/trunk/arch/mips/lantiq/xway/gpio_stp.c +++ /dev/null @@ -1,157 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2007 John Crispin - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define LTQ_STP_CON0 0x00 -#define LTQ_STP_CON1 0x04 -#define LTQ_STP_CPU0 0x08 -#define LTQ_STP_CPU1 0x0C -#define LTQ_STP_AR 0x10 - -#define LTQ_STP_CON_SWU (1 << 31) -#define LTQ_STP_2HZ 0 -#define LTQ_STP_4HZ (1 << 23) -#define LTQ_STP_8HZ (2 << 23) -#define LTQ_STP_10HZ (3 << 23) -#define LTQ_STP_SPEED_MASK (0xf << 23) -#define LTQ_STP_UPD_FPI (1 << 31) -#define LTQ_STP_UPD_MASK (3 << 30) -#define LTQ_STP_ADSL_SRC (3 << 24) - -#define LTQ_STP_GROUP0 (1 << 0) - -#define LTQ_STP_RISING 0 -#define LTQ_STP_FALLING (1 << 26) -#define LTQ_STP_EDGE_MASK (1 << 26) - -#define ltq_stp_r32(reg) __raw_readl(ltq_stp_membase + reg) -#define ltq_stp_w32(val, reg) __raw_writel(val, ltq_stp_membase + reg) -#define ltq_stp_w32_mask(clear, set, reg) \ - ltq_w32((ltq_r32(ltq_stp_membase + reg) & ~(clear)) | (set), \ - ltq_stp_membase + (reg)) - -static int ltq_stp_shadow = 0xffff; -static void __iomem *ltq_stp_membase; - -static void ltq_stp_set(struct gpio_chip *chip, unsigned offset, int value) -{ - if (value) - ltq_stp_shadow |= (1 << offset); - else - ltq_stp_shadow &= ~(1 << offset); - ltq_stp_w32(ltq_stp_shadow, LTQ_STP_CPU0); -} - -static int ltq_stp_direction_output(struct gpio_chip *chip, unsigned offset, - int value) -{ - ltq_stp_set(chip, offset, value); - - return 0; -} - -static struct gpio_chip ltq_stp_chip = { - .label = "ltq_stp", - .direction_output = ltq_stp_direction_output, - .set = ltq_stp_set, - .base = 48, - .ngpio = 24, - .can_sleep = 1, - .owner = THIS_MODULE, -}; - -static int ltq_stp_hw_init(void) -{ - /* the 3 pins used to control the external stp */ - ltq_gpio_request(4, 1, 0, 1, "stp-st"); - ltq_gpio_request(5, 1, 0, 1, "stp-d"); - ltq_gpio_request(6, 1, 0, 1, "stp-sh"); - - /* sane defaults */ - ltq_stp_w32(0, LTQ_STP_AR); - ltq_stp_w32(0, LTQ_STP_CPU0); - ltq_stp_w32(0, LTQ_STP_CPU1); - ltq_stp_w32(LTQ_STP_CON_SWU, LTQ_STP_CON0); - ltq_stp_w32(0, LTQ_STP_CON1); - - /* rising or falling edge */ - ltq_stp_w32_mask(LTQ_STP_EDGE_MASK, LTQ_STP_FALLING, LTQ_STP_CON0); - - /* per default stp 15-0 are set */ - ltq_stp_w32_mask(0, LTQ_STP_GROUP0, LTQ_STP_CON1); - - /* stp are update periodically by the FPI bus */ - ltq_stp_w32_mask(LTQ_STP_UPD_MASK, LTQ_STP_UPD_FPI, LTQ_STP_CON1); - - /* set stp update speed */ - ltq_stp_w32_mask(LTQ_STP_SPEED_MASK, LTQ_STP_8HZ, LTQ_STP_CON1); - - /* tell the hardware that pin (led) 0 and 1 are controlled - * by the dsl arc - */ - ltq_stp_w32_mask(0, LTQ_STP_ADSL_SRC, LTQ_STP_CON0); - - ltq_pmu_enable(PMU_LED); - return 0; -} - -static int __devinit ltq_stp_probe(struct platform_device *pdev) -{ - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - int ret = 0; - - if (!res) - return -ENOENT; - res = devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), dev_name(&pdev->dev)); - if (!res) { - dev_err(&pdev->dev, "failed to request STP memory\n"); - return -EBUSY; - } - ltq_stp_membase = devm_ioremap_nocache(&pdev->dev, res->start, - resource_size(res)); - if (!ltq_stp_membase) { - dev_err(&pdev->dev, "failed to remap STP memory\n"); - return -ENOMEM; - } - ret = gpiochip_add(<q_stp_chip); - if (!ret) - ret = ltq_stp_hw_init(); - - return ret; -} - -static struct platform_driver ltq_stp_driver = { - .probe = ltq_stp_probe, - .driver = { - .name = "ltq_stp", - .owner = THIS_MODULE, - }, -}; - -int __init ltq_stp_init(void) -{ - int ret = platform_driver_register(<q_stp_driver); - - if (ret) - pr_info("ltq_stp: error registering platfom driver"); - return ret; -} - -postcore_initcall(ltq_stp_init); diff --git a/trunk/arch/mips/lantiq/xway/mach-easy50601.c b/trunk/arch/mips/lantiq/xway/mach-easy50601.c deleted file mode 100644 index d5aaf637ab19..000000000000 --- a/trunk/arch/mips/lantiq/xway/mach-easy50601.c +++ /dev/null @@ -1,57 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include -#include -#include - -#include - -#include "../machtypes.h" -#include "devices.h" - -static struct mtd_partition easy50601_partitions[] = { - { - .name = "uboot", - .offset = 0x0, - .size = 0x10000, - }, - { - .name = "uboot_env", - .offset = 0x10000, - .size = 0x10000, - }, - { - .name = "linux", - .offset = 0x20000, - .size = 0xE0000, - }, - { - .name = "rootfs", - .offset = 0x100000, - .size = 0x300000, - }, -}; - -static struct physmap_flash_data easy50601_flash_data = { - .nr_parts = ARRAY_SIZE(easy50601_partitions), - .parts = easy50601_partitions, -}; - -static void __init easy50601_init(void) -{ - ltq_register_nor(&easy50601_flash_data); -} - -MIPS_MACHINE(LTQ_MACH_EASY50601, - "EASY50601", - "EASY50601 Eval Board", - easy50601_init); diff --git a/trunk/arch/mips/lantiq/xway/mach-easy50712.c b/trunk/arch/mips/lantiq/xway/mach-easy50712.c deleted file mode 100644 index ea5027b3239d..000000000000 --- a/trunk/arch/mips/lantiq/xway/mach-easy50712.c +++ /dev/null @@ -1,74 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "../machtypes.h" -#include "devices.h" - -static struct mtd_partition easy50712_partitions[] = { - { - .name = "uboot", - .offset = 0x0, - .size = 0x10000, - }, - { - .name = "uboot_env", - .offset = 0x10000, - .size = 0x10000, - }, - { - .name = "linux", - .offset = 0x20000, - .size = 0xe0000, - }, - { - .name = "rootfs", - .offset = 0x100000, - .size = 0x300000, - }, -}; - -static struct physmap_flash_data easy50712_flash_data = { - .nr_parts = ARRAY_SIZE(easy50712_partitions), - .parts = easy50712_partitions, -}; - -static struct ltq_pci_data ltq_pci_data = { - .clock = PCI_CLOCK_INT, - .gpio = PCI_GNT1 | PCI_REQ1, - .irq = { - [14] = INT_NUM_IM0_IRL0 + 22, - }, -}; - -static struct ltq_eth_data ltq_eth_data = { - .mii_mode = PHY_INTERFACE_MODE_MII, -}; - -static void __init easy50712_init(void) -{ - ltq_register_gpio_stp(); - ltq_register_nor(&easy50712_flash_data); - ltq_register_pci(<q_pci_data); - ltq_register_etop(<q_eth_data); -} - -MIPS_MACHINE(LTQ_MACH_EASY50712, - "EASY50712", - "EASY50712 Eval Board", - easy50712_init); diff --git a/trunk/arch/mips/lantiq/xway/pmu.c b/trunk/arch/mips/lantiq/xway/pmu.c deleted file mode 100644 index 9d69f01e352b..000000000000 --- a/trunk/arch/mips/lantiq/xway/pmu.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include - -#include - -/* PMU - the power management unit allows us to turn part of the core - * on and off - */ - -/* the enable / disable registers */ -#define LTQ_PMU_PWDCR 0x1C -#define LTQ_PMU_PWDSR 0x20 - -#define ltq_pmu_w32(x, y) ltq_w32((x), ltq_pmu_membase + (y)) -#define ltq_pmu_r32(x) ltq_r32(ltq_pmu_membase + (x)) - -static struct resource ltq_pmu_resource = { - .name = "pmu", - .start = LTQ_PMU_BASE_ADDR, - .end = LTQ_PMU_BASE_ADDR + LTQ_PMU_SIZE - 1, - .flags = IORESOURCE_MEM, -}; - -static void __iomem *ltq_pmu_membase; - -void ltq_pmu_enable(unsigned int module) -{ - int err = 1000000; - - ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) & ~module, LTQ_PMU_PWDCR); - do {} while (--err && (ltq_pmu_r32(LTQ_PMU_PWDSR) & module)); - - if (!err) - panic("activating PMU module failed!\n"); -} -EXPORT_SYMBOL(ltq_pmu_enable); - -void ltq_pmu_disable(unsigned int module) -{ - ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) | module, LTQ_PMU_PWDCR); -} -EXPORT_SYMBOL(ltq_pmu_disable); - -int __init ltq_pmu_init(void) -{ - if (insert_resource(&iomem_resource, <q_pmu_resource) < 0) - panic("Failed to insert pmu memory\n"); - - if (request_mem_region(ltq_pmu_resource.start, - resource_size(<q_pmu_resource), "pmu") < 0) - panic("Failed to request pmu memory\n"); - - ltq_pmu_membase = ioremap_nocache(ltq_pmu_resource.start, - resource_size(<q_pmu_resource)); - if (!ltq_pmu_membase) - panic("Failed to remap pmu memory\n"); - return 0; -} - -core_initcall(ltq_pmu_init); diff --git a/trunk/arch/mips/lantiq/xway/prom-ase.c b/trunk/arch/mips/lantiq/xway/prom-ase.c deleted file mode 100644 index abe49f4db57f..000000000000 --- a/trunk/arch/mips/lantiq/xway/prom-ase.c +++ /dev/null @@ -1,39 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include - -#include - -#include "../prom.h" - -#define SOC_AMAZON_SE "Amazon_SE" - -#define PART_SHIFT 12 -#define PART_MASK 0x0FFFFFFF -#define REV_SHIFT 28 -#define REV_MASK 0xF0000000 - -void __init ltq_soc_detect(struct ltq_soc_info *i) -{ - i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT; - i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT; - switch (i->partnum) { - case SOC_ID_AMAZON_SE: - i->name = SOC_AMAZON_SE; - i->type = SOC_TYPE_AMAZON_SE; - break; - - default: - unreachable(); - break; - } -} diff --git a/trunk/arch/mips/lantiq/xway/prom-xway.c b/trunk/arch/mips/lantiq/xway/prom-xway.c deleted file mode 100644 index 1686692ac24d..000000000000 --- a/trunk/arch/mips/lantiq/xway/prom-xway.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include - -#include - -#include "../prom.h" - -#define SOC_DANUBE "Danube" -#define SOC_TWINPASS "Twinpass" -#define SOC_AR9 "AR9" - -#define PART_SHIFT 12 -#define PART_MASK 0x0FFFFFFF -#define REV_SHIFT 28 -#define REV_MASK 0xF0000000 - -void __init ltq_soc_detect(struct ltq_soc_info *i) -{ - i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT; - i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT; - switch (i->partnum) { - case SOC_ID_DANUBE1: - case SOC_ID_DANUBE2: - i->name = SOC_DANUBE; - i->type = SOC_TYPE_DANUBE; - break; - - case SOC_ID_TWINPASS: - i->name = SOC_TWINPASS; - i->type = SOC_TYPE_DANUBE; - break; - - case SOC_ID_ARX188: - case SOC_ID_ARX168: - case SOC_ID_ARX182: - i->name = SOC_AR9; - i->type = SOC_TYPE_AR9; - break; - - default: - unreachable(); - break; - } -} diff --git a/trunk/arch/mips/lantiq/xway/reset.c b/trunk/arch/mips/lantiq/xway/reset.c deleted file mode 100644 index a1be36d0e490..000000000000 --- a/trunk/arch/mips/lantiq/xway/reset.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include -#include -#include - -#include - -#define ltq_rcu_w32(x, y) ltq_w32((x), ltq_rcu_membase + (y)) -#define ltq_rcu_r32(x) ltq_r32(ltq_rcu_membase + (x)) - -/* register definitions */ -#define LTQ_RCU_RST 0x0010 -#define LTQ_RCU_RST_ALL 0x40000000 - -#define LTQ_RCU_RST_STAT 0x0014 -#define LTQ_RCU_STAT_SHIFT 26 - -static struct resource ltq_rcu_resource = { - .name = "rcu", - .start = LTQ_RCU_BASE_ADDR, - .end = LTQ_RCU_BASE_ADDR + LTQ_RCU_SIZE - 1, - .flags = IORESOURCE_MEM, -}; - -/* remapped base addr of the reset control unit */ -static void __iomem *ltq_rcu_membase; - -/* This function is used by the watchdog driver */ -int ltq_reset_cause(void) -{ - u32 val = ltq_rcu_r32(LTQ_RCU_RST_STAT); - return val >> LTQ_RCU_STAT_SHIFT; -} -EXPORT_SYMBOL_GPL(ltq_reset_cause); - -static void ltq_machine_restart(char *command) -{ - pr_notice("System restart\n"); - local_irq_disable(); - ltq_rcu_w32(ltq_rcu_r32(LTQ_RCU_RST) | LTQ_RCU_RST_ALL, LTQ_RCU_RST); - unreachable(); -} - -static void ltq_machine_halt(void) -{ - pr_notice("System halted.\n"); - local_irq_disable(); - unreachable(); -} - -static void ltq_machine_power_off(void) -{ - pr_notice("Please turn off the power now.\n"); - local_irq_disable(); - unreachable(); -} - -static int __init mips_reboot_setup(void) -{ - /* insert and request the memory region */ - if (insert_resource(&iomem_resource, <q_rcu_resource) < 0) - panic("Failed to insert rcu memory\n"); - - if (request_mem_region(ltq_rcu_resource.start, - resource_size(<q_rcu_resource), "rcu") < 0) - panic("Failed to request rcu memory\n"); - - /* remap rcu register range */ - ltq_rcu_membase = ioremap_nocache(ltq_rcu_resource.start, - resource_size(<q_rcu_resource)); - if (!ltq_rcu_membase) - panic("Failed to remap rcu memory\n"); - - _machine_restart = ltq_machine_restart; - _machine_halt = ltq_machine_halt; - pm_power_off = ltq_machine_power_off; - - return 0; -} - -arch_initcall(mips_reboot_setup); diff --git a/trunk/arch/mips/lantiq/xway/setup-ase.c b/trunk/arch/mips/lantiq/xway/setup-ase.c deleted file mode 100644 index f6f326798a39..000000000000 --- a/trunk/arch/mips/lantiq/xway/setup-ase.c +++ /dev/null @@ -1,19 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2011 John Crispin - */ - -#include - -#include "../prom.h" -#include "devices.h" - -void __init ltq_soc_setup(void) -{ - ltq_register_ase_asc(); - ltq_register_gpio(); - ltq_register_wdt(); -} diff --git a/trunk/arch/mips/lantiq/xway/setup-xway.c b/trunk/arch/mips/lantiq/xway/setup-xway.c deleted file mode 100644 index c292f643a858..000000000000 --- a/trunk/arch/mips/lantiq/xway/setup-xway.c +++ /dev/null @@ -1,20 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2011 John Crispin - */ - -#include - -#include "../prom.h" -#include "devices.h" - -void __init ltq_soc_setup(void) -{ - ltq_register_asc(0); - ltq_register_asc(1); - ltq_register_gpio(); - ltq_register_wdt(); -} diff --git a/trunk/arch/mips/lib/Makefile b/trunk/arch/mips/lib/Makefile index b2cad4fd5fc4..2adead5a8a37 100644 --- a/trunk/arch/mips/lib/Makefile +++ b/trunk/arch/mips/lib/Makefile @@ -28,7 +28,6 @@ obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o obj-$(CONFIG_CPU_TX49XX) += dump_tlb.o obj-$(CONFIG_CPU_VR41XX) += dump_tlb.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += dump_tlb.o -obj-$(CONFIG_CPU_XLR) += dump_tlb.o # libgcc-style stuff needed in the kernel obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o diff --git a/trunk/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c b/trunk/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c index 0cb1b9760e34..8c807c965199 100644 --- a/trunk/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c +++ b/trunk/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c @@ -201,6 +201,8 @@ static struct clocksource clocksource_mfgpt = { .rating = 120, /* Functional for real use, but not desired */ .read = mfgpt_read, .mask = CLOCKSOURCE_MASK(32), + .mult = 0, + .shift = 22, }; int __init init_mfgpt_clocksource(void) @@ -208,7 +210,8 @@ int __init init_mfgpt_clocksource(void) if (num_possible_cpus() > 1) /* MFGPT does not scale! */ return 0; - return clocksource_register_hz(&clocksource_mfgpt, MFGPT_TICK_RATE); + clocksource_mfgpt.mult = clocksource_hz2mult(MFGPT_TICK_RATE, 22); + return clocksource_register(&clocksource_mfgpt); } arch_initcall(init_mfgpt_clocksource); diff --git a/trunk/arch/mips/loongson/common/env.c b/trunk/arch/mips/loongson/common/env.c index d93830ad6113..11b193f848f8 100644 --- a/trunk/arch/mips/loongson/common/env.c +++ b/trunk/arch/mips/loongson/common/env.c @@ -29,10 +29,9 @@ unsigned long memsize, highmemsize; #define parse_even_earlier(res, option, p) \ do { \ - unsigned int tmp __maybe_unused; \ - \ + int ret; \ if (strncmp(option, (char *)p, strlen(option)) == 0) \ - tmp = strict_strtol((char *)p + strlen(option"="), 10, &res); \ + ret = strict_strtol((char *)p + strlen(option"="), 10, &res); \ } while (0) void __init prom_init_env(void) diff --git a/trunk/arch/mips/mm/Makefile b/trunk/arch/mips/mm/Makefile index 4d8c1623eee2..d679c772d082 100644 --- a/trunk/arch/mips/mm/Makefile +++ b/trunk/arch/mips/mm/Makefile @@ -3,8 +3,7 @@ # obj-y += cache.o dma-default.o extable.o fault.o \ - init.o mmap.o tlbex.o tlbex-fault.o uasm.o \ - page.o + init.o tlbex.o tlbex-fault.o uasm.o page.o obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o @@ -30,7 +29,6 @@ obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o -obj-$(CONFIG_CPU_XLR) += c-r4k.o tlb-r4k.o cex-gen.o obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o diff --git a/trunk/arch/mips/mm/c-r4k.c b/trunk/arch/mips/mm/c-r4k.c index d9bc5d3593b6..b4923a75cb4b 100644 --- a/trunk/arch/mips/mm/c-r4k.c +++ b/trunk/arch/mips/mm/c-r4k.c @@ -1006,7 +1006,6 @@ static void __cpuinit probe_pcache(void) case CPU_25KF: case CPU_SB1: case CPU_SB1A: - case CPU_XLR: c->dcache.flags |= MIPS_CACHE_PINDEX; break; @@ -1076,6 +1075,7 @@ static int __cpuinit probe_scache(void) unsigned long flags, addr, begin, end, pow2; unsigned int config = read_c0_config(); struct cpuinfo_mips *c = ¤t_cpu_data; + int tmp; if (config & CONF_SC) return 0; @@ -1108,6 +1108,7 @@ static int __cpuinit probe_scache(void) /* Now search for the wrap around point. */ pow2 = (128 * 1024); + tmp = 0; for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { cache_op(Index_Load_Tag_SD, addr); __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ diff --git a/trunk/arch/mips/mm/mmap.c b/trunk/arch/mips/mm/mmap.c deleted file mode 100644 index ae3c20a9556e..000000000000 --- a/trunk/arch/mips/mm/mmap.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2011 Wind River Systems, - * written by Ralf Baechle - */ -#include -#include -#include -#include -#include -#include - -unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ - -EXPORT_SYMBOL(shm_align_mask); - -#define COLOUR_ALIGN(addr,pgoff) \ - ((((addr) + shm_align_mask) & ~shm_align_mask) + \ - (((pgoff) << PAGE_SHIFT) & shm_align_mask)) - -unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) -{ - struct vm_area_struct * vmm; - int do_color_align; - - if (len > TASK_SIZE) - return -ENOMEM; - - if (flags & MAP_FIXED) { - /* Even MAP_FIXED mappings must reside within TASK_SIZE. */ - if (TASK_SIZE - len < addr) - return -EINVAL; - - /* - * We do not accept a shared mapping if it would violate - * cache aliasing constraints. - */ - if ((flags & MAP_SHARED) && - ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) - return -EINVAL; - return addr; - } - - do_color_align = 0; - if (filp || (flags & MAP_SHARED)) - do_color_align = 1; - if (addr) { - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(addr); - vmm = find_vma(current->mm, addr); - if (TASK_SIZE - len >= addr && - (!vmm || addr + len <= vmm->vm_start)) - return addr; - } - addr = current->mm->mmap_base; - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(addr); - - for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { - /* At this point: (!vmm || addr < vmm->vm_end). */ - if (TASK_SIZE - len < addr) - return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) - return addr; - addr = vmm->vm_end; - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - } -} - -void arch_pick_mmap_layout(struct mm_struct *mm) -{ - unsigned long random_factor = 0UL; - - if (current->flags & PF_RANDOMIZE) { - random_factor = get_random_int(); - random_factor = random_factor << PAGE_SHIFT; - if (TASK_IS_32BIT_ADDR) - random_factor &= 0xfffffful; - else - random_factor &= 0xffffffful; - } - - mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; - mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; -} - -static inline unsigned long brk_rnd(void) -{ - unsigned long rnd = get_random_int(); - - rnd = rnd << PAGE_SHIFT; - /* 8MB for 32bit, 256MB for 64bit */ - if (TASK_IS_32BIT_ADDR) - rnd = rnd & 0x7ffffful; - else - rnd = rnd & 0xffffffful; - - return rnd; -} - -unsigned long arch_randomize_brk(struct mm_struct *mm) -{ - unsigned long base = mm->brk; - unsigned long ret; - - ret = PAGE_ALIGN(base + brk_rnd()); - - if (ret < mm->brk) - return mm->brk; - - return ret; -} diff --git a/trunk/arch/mips/mm/tlbex.c b/trunk/arch/mips/mm/tlbex.c index 424ed4b92e6d..5ef294fbb6e7 100644 --- a/trunk/arch/mips/mm/tlbex.c +++ b/trunk/arch/mips/mm/tlbex.c @@ -404,7 +404,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_5KC: case CPU_TX49XX: case CPU_PR4450: - case CPU_XLR: uasm_i_nop(p); tlbw(p); break; @@ -1152,8 +1151,8 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) struct uasm_reloc *r = relocs; u32 *f; unsigned int final_len; - struct mips_huge_tlb_info htlb_info __maybe_unused; - enum vmalloc64_mode vmalloc_mode __maybe_unused; + struct mips_huge_tlb_info htlb_info; + enum vmalloc64_mode vmalloc_mode; memset(tlb_handler, 0, sizeof(tlb_handler)); memset(labels, 0, sizeof(labels)); diff --git a/trunk/arch/mips/mti-malta/malta-init.c b/trunk/arch/mips/mti-malta/malta-init.c index 31180c321a1a..414f0c99b196 100644 --- a/trunk/arch/mips/mti-malta/malta-init.c +++ b/trunk/arch/mips/mti-malta/malta-init.c @@ -193,6 +193,8 @@ extern struct plat_smp_ops msmtc_smp_ops; void __init prom_init(void) { + int result; + prom_argc = fw_arg0; _prom_argv = (int *) fw_arg1; _prom_envp = (int *) fw_arg2; @@ -358,14 +360,20 @@ void __init prom_init(void) #ifdef CONFIG_SERIAL_8250_CONSOLE console_config(); #endif -#ifdef CONFIG_MIPS_CMP /* Early detection of CMP support */ - if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ)) + result = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ); + +#ifdef CONFIG_MIPS_CMP + if (result) register_smp_ops(&cmp_smp_ops); - else #endif #ifdef CONFIG_MIPS_MT_SMP +#ifdef CONFIG_MIPS_CMP + if (!result) register_smp_ops(&vsmp_smp_ops); +#else + register_smp_ops(&vsmp_smp_ops); +#endif #endif #ifdef CONFIG_MIPS_MT_SMTC register_smp_ops(&msmtc_smp_ops); diff --git a/trunk/arch/mips/mti-malta/malta-int.c b/trunk/arch/mips/mti-malta/malta-int.c index 1d36c511a7a5..9027061f0ead 100644 --- a/trunk/arch/mips/mti-malta/malta-int.c +++ b/trunk/arch/mips/mti-malta/malta-int.c @@ -56,6 +56,7 @@ static DEFINE_RAW_SPINLOCK(mips_irq_lock); static inline int mips_pcibios_iack(void) { int irq; + u32 dummy; /* * Determine highest priority pending interrupt by performing @@ -82,7 +83,7 @@ static inline int mips_pcibios_iack(void) BONITO_PCIMAP_CFG = 0x20000; /* Flush Bonito register block */ - (void) BONITO_PCIMAP_CFG; + dummy = BONITO_PCIMAP_CFG; iob(); /* sync */ irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg); @@ -308,8 +309,6 @@ static void ipi_call_dispatch(void) static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) { - scheduler_ipi(); - return IRQ_HANDLED; } diff --git a/trunk/arch/mips/netlogic/Kconfig b/trunk/arch/mips/netlogic/Kconfig deleted file mode 100644 index a5ca743613f2..000000000000 --- a/trunk/arch/mips/netlogic/Kconfig +++ /dev/null @@ -1,5 +0,0 @@ -config NLM_COMMON - bool - -config NLM_XLR - bool diff --git a/trunk/arch/mips/netlogic/xlr/Makefile b/trunk/arch/mips/netlogic/xlr/Makefile deleted file mode 100644 index 9bd3f731f62e..000000000000 --- a/trunk/arch/mips/netlogic/xlr/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -obj-y += setup.o platform.o irq.o setup.o time.o -obj-$(CONFIG_SMP) += smp.o smpboot.o -obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o - -EXTRA_CFLAGS += -Werror diff --git a/trunk/arch/mips/netlogic/xlr/irq.c b/trunk/arch/mips/netlogic/xlr/irq.c deleted file mode 100644 index 1446d58e364c..000000000000 --- a/trunk/arch/mips/netlogic/xlr/irq.c +++ /dev/null @@ -1,300 +0,0 @@ -/* - * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights - * reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the NetLogic - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -#include -#include - -static u64 nlm_irq_mask; -static DEFINE_SPINLOCK(nlm_pic_lock); - -static void xlr_pic_enable(struct irq_data *d) -{ - nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); - unsigned long flags; - nlm_reg_t reg; - int irq = d->irq; - - WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq); - - spin_lock_irqsave(&nlm_pic_lock, flags); - reg = netlogic_read_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE); - netlogic_write_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE, - reg | (1 << 6) | (1 << 30) | (1 << 31)); - spin_unlock_irqrestore(&nlm_pic_lock, flags); -} - -static void xlr_pic_mask(struct irq_data *d) -{ - nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); - unsigned long flags; - nlm_reg_t reg; - int irq = d->irq; - - WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq); - - spin_lock_irqsave(&nlm_pic_lock, flags); - reg = netlogic_read_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE); - netlogic_write_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE, - reg | (1 << 6) | (1 << 30) | (0 << 31)); - spin_unlock_irqrestore(&nlm_pic_lock, flags); -} - -#ifdef CONFIG_PCI -/* Extra ACK needed for XLR on chip PCI controller */ -static void xlr_pci_ack(struct irq_data *d) -{ - nlm_reg_t *pci_mmio = netlogic_io_mmio(NETLOGIC_IO_PCIX_OFFSET); - - netlogic_read_reg(pci_mmio, (0x140 >> 2)); -} - -/* Extra ACK needed for XLS on chip PCIe controller */ -static void xls_pcie_ack(struct irq_data *d) -{ - nlm_reg_t *pcie_mmio_le = netlogic_io_mmio(NETLOGIC_IO_PCIE_1_OFFSET); - - switch (d->irq) { - case PIC_PCIE_LINK0_IRQ: - netlogic_write_reg(pcie_mmio_le, (0x90 >> 2), 0xffffffff); - break; - case PIC_PCIE_LINK1_IRQ: - netlogic_write_reg(pcie_mmio_le, (0x94 >> 2), 0xffffffff); - break; - case PIC_PCIE_LINK2_IRQ: - netlogic_write_reg(pcie_mmio_le, (0x190 >> 2), 0xffffffff); - break; - case PIC_PCIE_LINK3_IRQ: - netlogic_write_reg(pcie_mmio_le, (0x194 >> 2), 0xffffffff); - break; - } -} - -/* For XLS B silicon, the 3,4 PCI interrupts are different */ -static void xls_pcie_ack_b(struct irq_data *d) -{ - nlm_reg_t *pcie_mmio_le = netlogic_io_mmio(NETLOGIC_IO_PCIE_1_OFFSET); - - switch (d->irq) { - case PIC_PCIE_LINK0_IRQ: - netlogic_write_reg(pcie_mmio_le, (0x90 >> 2), 0xffffffff); - break; - case PIC_PCIE_LINK1_IRQ: - netlogic_write_reg(pcie_mmio_le, (0x94 >> 2), 0xffffffff); - break; - case PIC_PCIE_XLSB0_LINK2_IRQ: - netlogic_write_reg(pcie_mmio_le, (0x190 >> 2), 0xffffffff); - break; - case PIC_PCIE_XLSB0_LINK3_IRQ: - netlogic_write_reg(pcie_mmio_le, (0x194 >> 2), 0xffffffff); - break; - } -} -#endif - -static void xlr_pic_ack(struct irq_data *d) -{ - unsigned long flags; - nlm_reg_t *mmio; - int irq = d->irq; - void *hd = irq_data_get_irq_handler_data(d); - - WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq); - - if (hd) { - void (*extra_ack)(void *) = hd; - extra_ack(d); - } - mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); - spin_lock_irqsave(&nlm_pic_lock, flags); - netlogic_write_reg(mmio, PIC_INT_ACK, (1 << (irq - PIC_IRQ_BASE))); - spin_unlock_irqrestore(&nlm_pic_lock, flags); -} - -/* - * This chip definition handles interrupts routed thru the XLR - * hardware PIC, currently IRQs 8-39 are mapped to hardware intr - * 0-31 wired the XLR PIC - */ -static struct irq_chip xlr_pic = { - .name = "XLR-PIC", - .irq_enable = xlr_pic_enable, - .irq_mask = xlr_pic_mask, - .irq_ack = xlr_pic_ack, -}; - -static void rsvd_irq_handler(struct irq_data *d) -{ - WARN(d->irq >= PIC_IRQ_BASE, "Bad irq %d", d->irq); -} - -/* - * Chip definition for CPU originated interrupts(timer, msg) and - * IPIs - */ -struct irq_chip nlm_cpu_intr = { - .name = "XLR-CPU-INTR", - .irq_enable = rsvd_irq_handler, - .irq_mask = rsvd_irq_handler, - .irq_ack = rsvd_irq_handler, -}; - -void __init init_xlr_irqs(void) -{ - nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); - uint32_t thread_mask = 1; - int level, i; - - pr_info("Interrupt thread mask [%x]\n", thread_mask); - for (i = 0; i < PIC_NUM_IRTS; i++) { - level = PIC_IRQ_IS_EDGE_TRIGGERED(i); - - /* Bind all PIC irqs to boot cpu */ - netlogic_write_reg(mmio, PIC_IRT_0_BASE + i, thread_mask); - - /* - * Use local scheduling and high polarity for all IRTs - * Invalidate all IRTs, by default - */ - netlogic_write_reg(mmio, PIC_IRT_1_BASE + i, - (level << 30) | (1 << 6) | (PIC_IRQ_BASE + i)); - } - - /* Make all IRQs as level triggered by default */ - for (i = 0; i < NR_IRQS; i++) { - if (PIC_IRQ_IS_IRT(i)) - irq_set_chip_and_handler(i, &xlr_pic, handle_level_irq); - else - irq_set_chip_and_handler(i, &nlm_cpu_intr, - handle_level_irq); - } -#ifdef CONFIG_SMP - irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr, - nlm_smp_function_ipi_handler); - irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr, - nlm_smp_resched_ipi_handler); - nlm_irq_mask |= - ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE)); -#endif - -#ifdef CONFIG_PCI - /* - * For PCI interrupts, we need to ack the PIC controller too, overload - * irq handler data to do this - */ - if (nlm_chip_is_xls()) { - if (nlm_chip_is_xls_b()) { - irq_set_handler_data(PIC_PCIE_LINK0_IRQ, - xls_pcie_ack_b); - irq_set_handler_data(PIC_PCIE_LINK1_IRQ, - xls_pcie_ack_b); - irq_set_handler_data(PIC_PCIE_XLSB0_LINK2_IRQ, - xls_pcie_ack_b); - irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, - xls_pcie_ack_b); - } else { - irq_set_handler_data(PIC_PCIE_LINK0_IRQ, xls_pcie_ack); - irq_set_handler_data(PIC_PCIE_LINK1_IRQ, xls_pcie_ack); - irq_set_handler_data(PIC_PCIE_LINK2_IRQ, xls_pcie_ack); - irq_set_handler_data(PIC_PCIE_LINK3_IRQ, xls_pcie_ack); - } - } else { - /* XLR PCI controller ACK */ - irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, xlr_pci_ack); - } -#endif - /* unmask all PIC related interrupts. If no handler is installed by the - * drivers, it'll just ack the interrupt and return - */ - for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) - nlm_irq_mask |= (1ULL << i); - - nlm_irq_mask |= (1ULL << IRQ_TIMER); -} - -void __init arch_init_irq(void) -{ - /* Initialize the irq descriptors */ - init_xlr_irqs(); - write_c0_eimr(nlm_irq_mask); -} - -void __cpuinit nlm_smp_irq_init(void) -{ - /* set interrupt mask for non-zero cpus */ - write_c0_eimr(nlm_irq_mask); -} - -asmlinkage void plat_irq_dispatch(void) -{ - uint64_t eirr; - int i; - - eirr = read_c0_eirr() & read_c0_eimr(); - if (!eirr) - return; - - /* no need of EIRR here, writing compare clears interrupt */ - if (eirr & (1 << IRQ_TIMER)) { - do_IRQ(IRQ_TIMER); - return; - } - - /* use dcltz: optimize below code */ - for (i = 63; i != -1; i--) { - if (eirr & (1ULL << i)) - break; - } - if (i == -1) { - pr_err("no interrupt !!\n"); - return; - } - - /* Ack eirr */ - write_c0_eirr(1ULL << i); - - do_IRQ(i); -} diff --git a/trunk/arch/mips/netlogic/xlr/platform.c b/trunk/arch/mips/netlogic/xlr/platform.c deleted file mode 100644 index 609ec2534642..000000000000 --- a/trunk/arch/mips/netlogic/xlr/platform.c +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2011, Netlogic Microsystems. - * Copyright 2004, Matt Porter - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset) -{ - nlm_reg_t *mmio; - unsigned int value; - - /* XLR uart does not need any mapping of regs */ - mmio = (nlm_reg_t *)(p->membase + (offset << p->regshift)); - value = netlogic_read_reg(mmio, 0); - - /* See XLR/XLS errata */ - if (offset == UART_MSR) - value ^= 0xF0; - else if (offset == UART_MCR) - value ^= 0x3; - - return value; -} - -void nlm_xlr_uart_out(struct uart_port *p, int offset, int value) -{ - nlm_reg_t *mmio; - - /* XLR uart does not need any mapping of regs */ - mmio = (nlm_reg_t *)(p->membase + (offset << p->regshift)); - - /* See XLR/XLS errata */ - if (offset == UART_MSR) - value ^= 0xF0; - else if (offset == UART_MCR) - value ^= 0x3; - - netlogic_write_reg(mmio, 0, value); -} - -#define PORT(_irq) \ - { \ - .irq = _irq, \ - .regshift = 2, \ - .iotype = UPIO_MEM32, \ - .flags = (UPF_SKIP_TEST | \ - UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF),\ - .uartclk = PIC_CLKS_PER_SEC, \ - .type = PORT_16550A, \ - .serial_in = nlm_xlr_uart_in, \ - .serial_out = nlm_xlr_uart_out, \ - } - -static struct plat_serial8250_port xlr_uart_data[] = { - PORT(PIC_UART_0_IRQ), - PORT(PIC_UART_1_IRQ), - {}, -}; - -static struct platform_device uart_device = { - .name = "serial8250", - .id = PLAT8250_DEV_PLATFORM, - .dev = { - .platform_data = xlr_uart_data, - }, -}; - -static int __init nlm_uart_init(void) -{ - nlm_reg_t *mmio; - - mmio = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET); - xlr_uart_data[0].membase = (void __iomem *)mmio; - xlr_uart_data[0].mapbase = CPHYSADDR((unsigned long)mmio); - - mmio = netlogic_io_mmio(NETLOGIC_IO_UART_1_OFFSET); - xlr_uart_data[1].membase = (void __iomem *)mmio; - xlr_uart_data[1].mapbase = CPHYSADDR((unsigned long)mmio); - - return platform_device_register(&uart_device); -} - -arch_initcall(nlm_uart_init); diff --git a/trunk/arch/mips/netlogic/xlr/setup.c b/trunk/arch/mips/netlogic/xlr/setup.c deleted file mode 100644 index 482802569e74..000000000000 --- a/trunk/arch/mips/netlogic/xlr/setup.c +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights - * reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the NetLogic - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include - -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include - -unsigned long netlogic_io_base = (unsigned long)(DEFAULT_NETLOGIC_IO_BASE); -unsigned long nlm_common_ebase = 0x0; -struct psb_info nlm_prom_info; - -static void nlm_early_serial_setup(void) -{ - struct uart_port s; - nlm_reg_t *uart_base; - - uart_base = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET); - memset(&s, 0, sizeof(s)); - s.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST; - s.iotype = UPIO_MEM32; - s.regshift = 2; - s.irq = PIC_UART_0_IRQ; - s.uartclk = PIC_CLKS_PER_SEC; - s.serial_in = nlm_xlr_uart_in; - s.serial_out = nlm_xlr_uart_out; - s.mapbase = (unsigned long)uart_base; - s.membase = (unsigned char __iomem *)uart_base; - early_serial_setup(&s); -} - -static void nlm_linux_exit(void) -{ - nlm_reg_t *mmio; - - mmio = netlogic_io_mmio(NETLOGIC_IO_GPIO_OFFSET); - /* trigger a chip reset by writing 1 to GPIO_SWRESET_REG */ - netlogic_write_reg(mmio, NETLOGIC_GPIO_SWRESET_REG, 1); - for ( ; ; ) - cpu_wait(); -} - -void __init plat_mem_setup(void) -{ - panic_timeout = 5; - _machine_restart = (void (*)(char *))nlm_linux_exit; - _machine_halt = nlm_linux_exit; - pm_power_off = nlm_linux_exit; -} - -const char *get_system_type(void) -{ - return "Netlogic XLR/XLS Series"; -} - -void __init prom_free_prom_memory(void) -{ - /* Nothing yet */ -} - -static void build_arcs_cmdline(int *argv) -{ - int i, remain, len; - char *arg; - - remain = sizeof(arcs_cmdline) - 1; - arcs_cmdline[0] = '\0'; - for (i = 0; argv[i] != 0; i++) { - arg = (char *)(long)argv[i]; - len = strlen(arg); - if (len + 1 > remain) - break; - strcat(arcs_cmdline, arg); - strcat(arcs_cmdline, " "); - remain -= len + 1; - } - - /* Add the default options here */ - if ((strstr(arcs_cmdline, "console=")) == NULL) { - arg = "console=ttyS0,38400 "; - len = strlen(arg); - if (len > remain) - goto fail; - strcat(arcs_cmdline, arg); - remain -= len; - } -#ifdef CONFIG_BLK_DEV_INITRD - if ((strstr(arcs_cmdline, "rdinit=")) == NULL) { - arg = "rdinit=/sbin/init "; - len = strlen(arg); - if (len > remain) - goto fail; - strcat(arcs_cmdline, arg); - remain -= len; - } -#endif - return; -fail: - panic("Cannot add %s, command line too big!", arg); -} - -static void prom_add_memory(void) -{ - struct nlm_boot_mem_map *bootm; - u64 start, size; - u64 pref_backup = 512; /* avoid pref walking beyond end */ - int i; - - bootm = (void *)(long)nlm_prom_info.psb_mem_map; - for (i = 0; i < bootm->nr_map; i++) { - if (bootm->map[i].type != BOOT_MEM_RAM) - continue; - start = bootm->map[i].addr; - size = bootm->map[i].size; - - /* Work around for using bootloader mem */ - if (i == 0 && start == 0 && size == 0x0c000000) - size = 0x0ff00000; - - add_memory_region(start, size - pref_backup, BOOT_MEM_RAM); - } -} - -void __init prom_init(void) -{ - int *argv, *envp; /* passed as 32 bit ptrs */ - struct psb_info *prom_infop; - - /* truncate to 32 bit and sign extend all args */ - argv = (int *)(long)(int)fw_arg1; - envp = (int *)(long)(int)fw_arg2; - prom_infop = (struct psb_info *)(long)(int)fw_arg3; - - nlm_prom_info = *prom_infop; - - nlm_early_serial_setup(); - build_arcs_cmdline(argv); - nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1)); - prom_add_memory(); - -#ifdef CONFIG_SMP - nlm_wakeup_secondary_cpus(nlm_prom_info.online_cpu_map); - register_smp_ops(&nlm_smp_ops); -#endif -} diff --git a/trunk/arch/mips/netlogic/xlr/smp.c b/trunk/arch/mips/netlogic/xlr/smp.c deleted file mode 100644 index b495a7f1433b..000000000000 --- a/trunk/arch/mips/netlogic/xlr/smp.c +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights - * reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the NetLogic - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include - -#include - -#include -#include - -#include -#include -#include - -void core_send_ipi(int logical_cpu, unsigned int action) -{ - int cpu = cpu_logical_map(logical_cpu); - u32 tid = cpu & 0x3; - u32 pid = (cpu >> 2) & 0x07; - u32 ipi = (tid << 16) | (pid << 20); - - if (action & SMP_CALL_FUNCTION) - ipi |= IRQ_IPI_SMP_FUNCTION; - else if (action & SMP_RESCHEDULE_YOURSELF) - ipi |= IRQ_IPI_SMP_RESCHEDULE; - else - return; - - pic_send_ipi(ipi); -} - -void nlm_send_ipi_single(int cpu, unsigned int action) -{ - core_send_ipi(cpu, action); -} - -void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action) -{ - int cpu; - - for_each_cpu(cpu, mask) { - core_send_ipi(cpu, action); - } -} - -/* IRQ_IPI_SMP_FUNCTION Handler */ -void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc) -{ - smp_call_function_interrupt(); -} - -/* IRQ_IPI_SMP_RESCHEDULE handler */ -void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc) -{ - set_need_resched(); -} - -void nlm_common_ipi_handler(int irq, struct pt_regs *regs) -{ - if (irq == IRQ_IPI_SMP_FUNCTION) { - smp_call_function_interrupt(); - } else { - /* Announce that we are for reschduling */ - set_need_resched(); - } -} - -/* - * Called before going into mips code, early cpu init - */ -void nlm_early_init_secondary(void) -{ - write_c0_ebase((uint32_t)nlm_common_ebase); - /* TLB partition here later */ -} - -/* - * Code to run on secondary just after probing the CPU - */ -static void __cpuinit nlm_init_secondary(void) -{ - nlm_smp_irq_init(); -} - -void nlm_smp_finish(void) -{ -#ifdef notyet - nlm_common_msgring_cpu_init(); -#endif -} - -void nlm_cpus_done(void) -{ -} - -/* - * Boot all other cpus in the system, initialize them, and bring them into - * the boot function - */ -int nlm_cpu_unblock[NR_CPUS]; -int nlm_cpu_ready[NR_CPUS]; -unsigned long nlm_next_gp; -unsigned long nlm_next_sp; -cpumask_t phys_cpu_present_map; - -void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) -{ - unsigned long gp = (unsigned long)task_thread_info(idle); - unsigned long sp = (unsigned long)__KSTK_TOS(idle); - int cpu = cpu_logical_map(logical_cpu); - - nlm_next_sp = sp; - nlm_next_gp = gp; - - /* barrier */ - __sync(); - nlm_cpu_unblock[cpu] = 1; -} - -void __init nlm_smp_setup(void) -{ - unsigned int boot_cpu; - int num_cpus, i; - - boot_cpu = hard_smp_processor_id(); - cpus_clear(phys_cpu_present_map); - - cpu_set(boot_cpu, phys_cpu_present_map); - __cpu_number_map[boot_cpu] = 0; - __cpu_logical_map[0] = boot_cpu; - cpu_set(0, cpu_possible_map); - - num_cpus = 1; - for (i = 0; i < NR_CPUS; i++) { - if (nlm_cpu_ready[i]) { - cpu_set(i, phys_cpu_present_map); - __cpu_number_map[i] = num_cpus; - __cpu_logical_map[num_cpus] = i; - cpu_set(num_cpus, cpu_possible_map); - ++num_cpus; - } - } - - pr_info("Phys CPU present map: %lx, possible map %lx\n", - (unsigned long)phys_cpu_present_map.bits[0], - (unsigned long)cpu_possible_map.bits[0]); - - pr_info("Detected %i Slave CPU(s)\n", num_cpus); -} - -void nlm_prepare_cpus(unsigned int max_cpus) -{ -} - -struct plat_smp_ops nlm_smp_ops = { - .send_ipi_single = nlm_send_ipi_single, - .send_ipi_mask = nlm_send_ipi_mask, - .init_secondary = nlm_init_secondary, - .smp_finish = nlm_smp_finish, - .cpus_done = nlm_cpus_done, - .boot_secondary = nlm_boot_secondary, - .smp_setup = nlm_smp_setup, - .prepare_cpus = nlm_prepare_cpus, -}; - -unsigned long secondary_entry_point; - -int nlm_wakeup_secondary_cpus(u32 wakeup_mask) -{ - unsigned int tid, pid, ipi, i, boot_cpu; - void *reset_vec; - - secondary_entry_point = (unsigned long)prom_pre_boot_secondary_cpus; - reset_vec = (void *)CKSEG1ADDR(0x1fc00000); - memcpy(reset_vec, nlm_boot_smp_nmi, 0x80); - boot_cpu = hard_smp_processor_id(); - - for (i = 0; i < NR_CPUS; i++) { - if (i == boot_cpu) - continue; - if (wakeup_mask & (1u << i)) { - tid = i & 0x3; - pid = (i >> 2) & 0x7; - ipi = (tid << 16) | (pid << 20) | (1 << 8); - pic_send_ipi(ipi); - } - } - - return 0; -} diff --git a/trunk/arch/mips/netlogic/xlr/smpboot.S b/trunk/arch/mips/netlogic/xlr/smpboot.S deleted file mode 100644 index b8e074402c99..000000000000 --- a/trunk/arch/mips/netlogic/xlr/smpboot.S +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights - * reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the NetLogic - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include - - -/* Don't jump to linux function from Bootloader stack. Change it - * here. Kernel might allocate bootloader memory before all the CPUs are - * brought up (eg: Inode cache region) and we better don't overwrite this - * memory - */ -NESTED(prom_pre_boot_secondary_cpus, 16, sp) - .set mips64 - mfc0 t0, $15, 1 # read ebase - andi t0, 0x1f # t0 has the processor_id() - sll t0, 2 # offset in cpu array - - PTR_LA t1, nlm_cpu_ready # mark CPU ready - PTR_ADDU t1, t0 - li t2, 1 - sw t2, 0(t1) - - PTR_LA t1, nlm_cpu_unblock - PTR_ADDU t1, t0 -1: lw t2, 0(t1) # wait till unblocked - beqz t2, 1b - nop - - PTR_LA t1, nlm_next_sp - PTR_L sp, 0(t1) - PTR_LA t1, nlm_next_gp - PTR_L gp, 0(t1) - - PTR_LA t0, nlm_early_init_secondary - jalr t0 - nop - - PTR_LA t0, smp_bootstrap - jr t0 - nop -END(prom_pre_boot_secondary_cpus) - -NESTED(nlm_boot_smp_nmi, 0, sp) - .set push - .set noat - .set mips64 - .set noreorder - - /* Clear the NMI and BEV bits */ - MFC0 k0, CP0_STATUS - li k1, 0xffb7ffff - and k0, k0, k1 - MTC0 k0, CP0_STATUS - - PTR_LA k1, secondary_entry_point - PTR_L k0, 0(k1) - jr k0 - nop - .set pop -END(nlm_boot_smp_nmi) diff --git a/trunk/arch/mips/netlogic/xlr/time.c b/trunk/arch/mips/netlogic/xlr/time.c deleted file mode 100644 index 0d81b262593c..000000000000 --- a/trunk/arch/mips/netlogic/xlr/time.c +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights - * reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the NetLogic - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include - -#include -#include -#include - -unsigned int __cpuinit get_c0_compare_int(void) -{ - return IRQ_TIMER; -} - -void __init plat_time_init(void) -{ - mips_hpt_frequency = nlm_prom_info.cpu_frequency; - pr_info("MIPS counter frequency [%ld]\n", - (unsigned long)mips_hpt_frequency); -} diff --git a/trunk/arch/mips/netlogic/xlr/xlr_console.c b/trunk/arch/mips/netlogic/xlr/xlr_console.c deleted file mode 100644 index 759df0692201..000000000000 --- a/trunk/arch/mips/netlogic/xlr/xlr_console.c +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights - * reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the NetLogic - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include - -void prom_putchar(char c) -{ - nlm_reg_t *mmio; - - mmio = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET); - while (netlogic_read_reg(mmio, 0x5) == 0) - ; - netlogic_write_reg(mmio, 0x0, c); -} diff --git a/trunk/arch/mips/pci/Makefile b/trunk/arch/mips/pci/Makefile index 4df879937446..c9209ca6c8e7 100644 --- a/trunk/arch/mips/pci/Makefile +++ b/trunk/arch/mips/pci/Makefile @@ -41,7 +41,6 @@ obj-$(CONFIG_SIBYTE_SB1250) += fixup-sb1250.o pci-sb1250.o obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o -obj-$(CONFIG_SOC_XWAY) += pci-lantiq.o ops-lantiq.o obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o @@ -56,7 +55,6 @@ obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capcella.o obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o -obj-$(CONFIG_NLM_XLR) += pci-xlr.o ifdef CONFIG_PCI_MSI obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o diff --git a/trunk/arch/mips/pci/ops-lantiq.c b/trunk/arch/mips/pci/ops-lantiq.c deleted file mode 100644 index 1f2afb55cc71..000000000000 --- a/trunk/arch/mips/pci/ops-lantiq.c +++ /dev/null @@ -1,116 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "pci-lantiq.h" - -#define LTQ_PCI_CFG_BUSNUM_SHF 16 -#define LTQ_PCI_CFG_DEVNUM_SHF 11 -#define LTQ_PCI_CFG_FUNNUM_SHF 8 - -#define PCI_ACCESS_READ 0 -#define PCI_ACCESS_WRITE 1 - -static int ltq_pci_config_access(unsigned char access_type, struct pci_bus *bus, - unsigned int devfn, unsigned int where, u32 *data) -{ - unsigned long cfg_base; - unsigned long flags; - u32 temp; - - /* we support slot from 0 to 15 dev_fn & 0x68 (AD29) is the - SoC itself */ - if ((bus->number != 0) || ((devfn & 0xf8) > 0x78) - || ((devfn & 0xf8) == 0) || ((devfn & 0xf8) == 0x68)) - return 1; - - spin_lock_irqsave(&ebu_lock, flags); - - cfg_base = (unsigned long) ltq_pci_mapped_cfg; - cfg_base |= (bus->number << LTQ_PCI_CFG_BUSNUM_SHF) | (devfn << - LTQ_PCI_CFG_FUNNUM_SHF) | (where & ~0x3); - - /* Perform access */ - if (access_type == PCI_ACCESS_WRITE) { - ltq_w32(swab32(*data), ((u32 *)cfg_base)); - } else { - *data = ltq_r32(((u32 *)(cfg_base))); - *data = swab32(*data); - } - wmb(); - - /* clean possible Master abort */ - cfg_base = (unsigned long) ltq_pci_mapped_cfg; - cfg_base |= (0x0 << LTQ_PCI_CFG_FUNNUM_SHF) + 4; - temp = ltq_r32(((u32 *)(cfg_base))); - temp = swab32(temp); - cfg_base = (unsigned long) ltq_pci_mapped_cfg; - cfg_base |= (0x68 << LTQ_PCI_CFG_FUNNUM_SHF) + 4; - ltq_w32(temp, ((u32 *)cfg_base)); - - spin_unlock_irqrestore(&ebu_lock, flags); - - if (((*data) == 0xffffffff) && (access_type == PCI_ACCESS_READ)) - return 1; - - return 0; -} - -int ltq_pci_read_config_dword(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *val) -{ - u32 data = 0; - - if (ltq_pci_config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (size == 1) - *val = (data >> ((where & 3) << 3)) & 0xff; - else if (size == 2) - *val = (data >> ((where & 3) << 3)) & 0xffff; - else - *val = data; - - return PCIBIOS_SUCCESSFUL; -} - -int ltq_pci_write_config_dword(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 val) -{ - u32 data = 0; - - if (size == 4) { - data = val; - } else { - if (ltq_pci_config_access(PCI_ACCESS_READ, bus, - devfn, where, &data)) - return PCIBIOS_DEVICE_NOT_FOUND; - - if (size == 1) - data = (data & ~(0xff << ((where & 3) << 3))) | - (val << ((where & 3) << 3)); - else if (size == 2) - data = (data & ~(0xffff << ((where & 3) << 3))) | - (val << ((where & 3) << 3)); - } - - if (ltq_pci_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) - return PCIBIOS_DEVICE_NOT_FOUND; - - return PCIBIOS_SUCCESSFUL; -} diff --git a/trunk/arch/mips/pci/pci-lantiq.c b/trunk/arch/mips/pci/pci-lantiq.c deleted file mode 100644 index 603d7493e966..000000000000 --- a/trunk/arch/mips/pci/pci-lantiq.c +++ /dev/null @@ -1,297 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include - -#include "pci-lantiq.h" - -#define LTQ_PCI_CFG_BASE 0x17000000 -#define LTQ_PCI_CFG_SIZE 0x00008000 -#define LTQ_PCI_MEM_BASE 0x18000000 -#define LTQ_PCI_MEM_SIZE 0x02000000 -#define LTQ_PCI_IO_BASE 0x1AE00000 -#define LTQ_PCI_IO_SIZE 0x00200000 - -#define PCI_CR_FCI_ADDR_MAP0 0x00C0 -#define PCI_CR_FCI_ADDR_MAP1 0x00C4 -#define PCI_CR_FCI_ADDR_MAP2 0x00C8 -#define PCI_CR_FCI_ADDR_MAP3 0x00CC -#define PCI_CR_FCI_ADDR_MAP4 0x00D0 -#define PCI_CR_FCI_ADDR_MAP5 0x00D4 -#define PCI_CR_FCI_ADDR_MAP6 0x00D8 -#define PCI_CR_FCI_ADDR_MAP7 0x00DC -#define PCI_CR_CLK_CTRL 0x0000 -#define PCI_CR_PCI_MOD 0x0030 -#define PCI_CR_PC_ARB 0x0080 -#define PCI_CR_FCI_ADDR_MAP11hg 0x00E4 -#define PCI_CR_BAR11MASK 0x0044 -#define PCI_CR_BAR12MASK 0x0048 -#define PCI_CR_BAR13MASK 0x004C -#define PCI_CS_BASE_ADDR1 0x0010 -#define PCI_CR_PCI_ADDR_MAP11 0x0064 -#define PCI_CR_FCI_BURST_LENGTH 0x00E8 -#define PCI_CR_PCI_EOI 0x002C -#define PCI_CS_STS_CMD 0x0004 - -#define PCI_MASTER0_REQ_MASK_2BITS 8 -#define PCI_MASTER1_REQ_MASK_2BITS 10 -#define PCI_MASTER2_REQ_MASK_2BITS 12 -#define INTERNAL_ARB_ENABLE_BIT 0 - -#define LTQ_CGU_IFCCR 0x0018 -#define LTQ_CGU_PCICR 0x0034 - -#define ltq_pci_w32(x, y) ltq_w32((x), ltq_pci_membase + (y)) -#define ltq_pci_r32(x) ltq_r32(ltq_pci_membase + (x)) - -#define ltq_pci_cfg_w32(x, y) ltq_w32((x), ltq_pci_mapped_cfg + (y)) -#define ltq_pci_cfg_r32(x) ltq_r32(ltq_pci_mapped_cfg + (x)) - -struct ltq_pci_gpio_map { - int pin; - int alt0; - int alt1; - int dir; - char *name; -}; - -/* the pci core can make use of the following gpios */ -static struct ltq_pci_gpio_map ltq_pci_gpio_map[] = { - { 0, 1, 0, 0, "pci-exin0" }, - { 1, 1, 0, 0, "pci-exin1" }, - { 2, 1, 0, 0, "pci-exin2" }, - { 39, 1, 0, 0, "pci-exin3" }, - { 10, 1, 0, 0, "pci-exin4" }, - { 9, 1, 0, 0, "pci-exin5" }, - { 30, 1, 0, 1, "pci-gnt1" }, - { 23, 1, 0, 1, "pci-gnt2" }, - { 19, 1, 0, 1, "pci-gnt3" }, - { 38, 1, 0, 1, "pci-gnt4" }, - { 29, 1, 0, 0, "pci-req1" }, - { 31, 1, 0, 0, "pci-req2" }, - { 3, 1, 0, 0, "pci-req3" }, - { 37, 1, 0, 0, "pci-req4" }, -}; - -__iomem void *ltq_pci_mapped_cfg; -static __iomem void *ltq_pci_membase; - -int (*ltqpci_plat_dev_init)(struct pci_dev *dev) = NULL; - -/* Since the PCI REQ pins can be reused for other functionality, make it - possible to exclude those from interpretation by the PCI controller */ -static int ltq_pci_req_mask = 0xf; - -static int *ltq_pci_irq_map; - -struct pci_ops ltq_pci_ops = { - .read = ltq_pci_read_config_dword, - .write = ltq_pci_write_config_dword -}; - -static struct resource pci_io_resource = { - .name = "pci io space", - .start = LTQ_PCI_IO_BASE, - .end = LTQ_PCI_IO_BASE + LTQ_PCI_IO_SIZE - 1, - .flags = IORESOURCE_IO -}; - -static struct resource pci_mem_resource = { - .name = "pci memory space", - .start = LTQ_PCI_MEM_BASE, - .end = LTQ_PCI_MEM_BASE + LTQ_PCI_MEM_SIZE - 1, - .flags = IORESOURCE_MEM -}; - -static struct pci_controller ltq_pci_controller = { - .pci_ops = <q_pci_ops, - .mem_resource = &pci_mem_resource, - .mem_offset = 0x00000000UL, - .io_resource = &pci_io_resource, - .io_offset = 0x00000000UL, -}; - -int pcibios_plat_dev_init(struct pci_dev *dev) -{ - if (ltqpci_plat_dev_init) - return ltqpci_plat_dev_init(dev); - - return 0; -} - -static u32 ltq_calc_bar11mask(void) -{ - u32 mem, bar11mask; - - /* BAR11MASK value depends on available memory on system. */ - mem = num_physpages * PAGE_SIZE; - bar11mask = (0x0ffffff0 & ~((1 << (fls(mem) - 1)) - 1)) | 8; - - return bar11mask; -} - -static void ltq_pci_setup_gpio(int gpio) -{ - int i; - for (i = 0; i < ARRAY_SIZE(ltq_pci_gpio_map); i++) { - if (gpio & (1 << i)) { - ltq_gpio_request(ltq_pci_gpio_map[i].pin, - ltq_pci_gpio_map[i].alt0, - ltq_pci_gpio_map[i].alt1, - ltq_pci_gpio_map[i].dir, - ltq_pci_gpio_map[i].name); - } - } - ltq_gpio_request(21, 0, 0, 1, "pci-reset"); - ltq_pci_req_mask = (gpio >> PCI_REQ_SHIFT) & PCI_REQ_MASK; -} - -static int __devinit ltq_pci_startup(struct ltq_pci_data *conf) -{ - u32 temp_buffer; - - /* set clock to 33Mhz */ - ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR); - ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR); - - /* external or internal clock ? */ - if (conf->clock) { - ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~(1 << 16), - LTQ_CGU_IFCCR); - ltq_cgu_w32((1 << 30), LTQ_CGU_PCICR); - } else { - ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | (1 << 16), - LTQ_CGU_IFCCR); - ltq_cgu_w32((1 << 31) | (1 << 30), LTQ_CGU_PCICR); - } - - /* setup pci clock and gpis used by pci */ - ltq_pci_setup_gpio(conf->gpio); - - /* enable auto-switching between PCI and EBU */ - ltq_pci_w32(0xa, PCI_CR_CLK_CTRL); - - /* busy, i.e. configuration is not done, PCI access has to be retried */ - ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) & ~(1 << 24), PCI_CR_PCI_MOD); - wmb(); - /* BUS Master/IO/MEM access */ - ltq_pci_cfg_w32(ltq_pci_cfg_r32(PCI_CS_STS_CMD) | 7, PCI_CS_STS_CMD); - - /* enable external 2 PCI masters */ - temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB); - temp_buffer &= (~(ltq_pci_req_mask << 16)); - /* enable internal arbiter */ - temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT); - /* enable internal PCI master reqest */ - temp_buffer &= (~(3 << PCI_MASTER0_REQ_MASK_2BITS)); - - /* enable EBU request */ - temp_buffer &= (~(3 << PCI_MASTER1_REQ_MASK_2BITS)); - - /* enable all external masters request */ - temp_buffer &= (~(3 << PCI_MASTER2_REQ_MASK_2BITS)); - ltq_pci_w32(temp_buffer, PCI_CR_PC_ARB); - wmb(); - - /* setup BAR memory regions */ - ltq_pci_w32(0x18000000, PCI_CR_FCI_ADDR_MAP0); - ltq_pci_w32(0x18400000, PCI_CR_FCI_ADDR_MAP1); - ltq_pci_w32(0x18800000, PCI_CR_FCI_ADDR_MAP2); - ltq_pci_w32(0x18c00000, PCI_CR_FCI_ADDR_MAP3); - ltq_pci_w32(0x19000000, PCI_CR_FCI_ADDR_MAP4); - ltq_pci_w32(0x19400000, PCI_CR_FCI_ADDR_MAP5); - ltq_pci_w32(0x19800000, PCI_CR_FCI_ADDR_MAP6); - ltq_pci_w32(0x19c00000, PCI_CR_FCI_ADDR_MAP7); - ltq_pci_w32(0x1ae00000, PCI_CR_FCI_ADDR_MAP11hg); - ltq_pci_w32(ltq_calc_bar11mask(), PCI_CR_BAR11MASK); - ltq_pci_w32(0, PCI_CR_PCI_ADDR_MAP11); - ltq_pci_w32(0, PCI_CS_BASE_ADDR1); - /* both TX and RX endian swap are enabled */ - ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_EOI) | 3, PCI_CR_PCI_EOI); - wmb(); - ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR12MASK) | 0x80000000, - PCI_CR_BAR12MASK); - ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR13MASK) | 0x80000000, - PCI_CR_BAR13MASK); - /*use 8 dw burst length */ - ltq_pci_w32(0x303, PCI_CR_FCI_BURST_LENGTH); - ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) | (1 << 24), PCI_CR_PCI_MOD); - wmb(); - - /* setup irq line */ - ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_CON) | 0xc, LTQ_EBU_PCC_CON); - ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN); - - /* toggle reset pin */ - __gpio_set_value(21, 0); - wmb(); - mdelay(1); - __gpio_set_value(21, 1); - return 0; -} - -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -{ - if (ltq_pci_irq_map[slot]) - return ltq_pci_irq_map[slot]; - printk(KERN_ERR "lq_pci: trying to map irq for unknown slot %d\n", - slot); - - return 0; -} - -static int __devinit ltq_pci_probe(struct platform_device *pdev) -{ - struct ltq_pci_data *ltq_pci_data = - (struct ltq_pci_data *) pdev->dev.platform_data; - pci_probe_only = 0; - ltq_pci_irq_map = ltq_pci_data->irq; - ltq_pci_membase = ioremap_nocache(PCI_CR_BASE_ADDR, PCI_CR_SIZE); - ltq_pci_mapped_cfg = - ioremap_nocache(LTQ_PCI_CFG_BASE, LTQ_PCI_CFG_BASE); - ltq_pci_controller.io_map_base = - (unsigned long)ioremap(LTQ_PCI_IO_BASE, LTQ_PCI_IO_SIZE - 1); - ltq_pci_startup(ltq_pci_data); - register_pci_controller(<q_pci_controller); - - return 0; -} - -static struct platform_driver -ltq_pci_driver = { - .probe = ltq_pci_probe, - .driver = { - .name = "ltq_pci", - .owner = THIS_MODULE, - }, -}; - -int __init pcibios_init(void) -{ - int ret = platform_driver_register(<q_pci_driver); - if (ret) - printk(KERN_INFO "ltq_pci: Error registering platfom driver!"); - return ret; -} - -arch_initcall(pcibios_init); diff --git a/trunk/arch/mips/pci/pci-lantiq.h b/trunk/arch/mips/pci/pci-lantiq.h deleted file mode 100644 index 66bf6cd6be3c..000000000000 --- a/trunk/arch/mips/pci/pci-lantiq.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - */ - -#ifndef _LTQ_PCI_H__ -#define _LTQ_PCI_H__ - -extern __iomem void *ltq_pci_mapped_cfg; -extern int ltq_pci_read_config_dword(struct pci_bus *bus, - unsigned int devfn, int where, int size, u32 *val); -extern int ltq_pci_write_config_dword(struct pci_bus *bus, - unsigned int devfn, int where, int size, u32 val); - -#endif diff --git a/trunk/arch/mips/pci/pci-xlr.c b/trunk/arch/mips/pci/pci-xlr.c deleted file mode 100644 index 38fece16c435..000000000000 --- a/trunk/arch/mips/pci/pci-xlr.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights - * reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the NetLogic - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - -static void *pci_config_base; - -#define pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off)) - -/* PCI ops */ -static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn, - int where) -{ - u32 data; - u32 *cfgaddr; - - cfgaddr = (u32 *)(pci_config_base + - pci_cfg_addr(bus->number, devfn, where & ~3)); - data = *cfgaddr; - return cpu_to_le32(data); -} - -static inline void pci_cfg_write_32bit(struct pci_bus *bus, unsigned int devfn, - int where, u32 data) -{ - u32 *cfgaddr; - - cfgaddr = (u32 *)(pci_config_base + - pci_cfg_addr(bus->number, devfn, where & ~3)); - *cfgaddr = cpu_to_le32(data); -} - -static int nlm_pcibios_read(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 *val) -{ - u32 data; - - if ((size == 2) && (where & 1)) - return PCIBIOS_BAD_REGISTER_NUMBER; - else if ((size == 4) && (where & 3)) - return PCIBIOS_BAD_REGISTER_NUMBER; - - data = pci_cfg_read_32bit(bus, devfn, where); - - if (size == 1) - *val = (data >> ((where & 3) << 3)) & 0xff; - else if (size == 2) - *val = (data >> ((where & 3) << 3)) & 0xffff; - else - *val = data; - - return PCIBIOS_SUCCESSFUL; -} - - -static int nlm_pcibios_write(struct pci_bus *bus, unsigned int devfn, - int where, int size, u32 val) -{ - u32 data; - - if ((size == 2) && (where & 1)) - return PCIBIOS_BAD_REGISTER_NUMBER; - else if ((size == 4) && (where & 3)) - return PCIBIOS_BAD_REGISTER_NUMBER; - - data = pci_cfg_read_32bit(bus, devfn, where); - - if (size == 1) - data = (data & ~(0xff << ((where & 3) << 3))) | - (val << ((where & 3) << 3)); - else if (size == 2) - data = (data & ~(0xffff << ((where & 3) << 3))) | - (val << ((where & 3) << 3)); - else - data = val; - - pci_cfg_write_32bit(bus, devfn, where, data); - - return PCIBIOS_SUCCESSFUL; -} - -struct pci_ops nlm_pci_ops = { - .read = nlm_pcibios_read, - .write = nlm_pcibios_write -}; - -static struct resource nlm_pci_mem_resource = { - .name = "XLR PCI MEM", - .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */ - .end = 0xdfffffffUL, - .flags = IORESOURCE_MEM, -}; - -static struct resource nlm_pci_io_resource = { - .name = "XLR IO MEM", - .start = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */ - .end = 0x100fffffUL, - .flags = IORESOURCE_IO, -}; - -struct pci_controller nlm_pci_controller = { - .index = 0, - .pci_ops = &nlm_pci_ops, - .mem_resource = &nlm_pci_mem_resource, - .mem_offset = 0x00000000UL, - .io_resource = &nlm_pci_io_resource, - .io_offset = 0x00000000UL, -}; - -int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -{ - if (!nlm_chip_is_xls()) - return PIC_PCIX_IRQ; /* for XLR just one IRQ*/ - - /* - * For XLS PCIe, there is an IRQ per Link, find out which - * link the device is on to assign interrupts - */ - if (dev->bus->self == NULL) - return 0; - - switch (dev->bus->self->devfn) { - case 0x0: - return PIC_PCIE_LINK0_IRQ; - case 0x8: - return PIC_PCIE_LINK1_IRQ; - case 0x10: - if (nlm_chip_is_xls_b()) - return PIC_PCIE_XLSB0_LINK2_IRQ; - else - return PIC_PCIE_LINK2_IRQ; - case 0x18: - if (nlm_chip_is_xls_b()) - return PIC_PCIE_XLSB0_LINK3_IRQ; - else - return PIC_PCIE_LINK3_IRQ; - } - WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn); - return 0; -} - -/* Do platform specific device initialization at pci_enable_device() time */ -int pcibios_plat_dev_init(struct pci_dev *dev) -{ - return 0; -} - -static int __init pcibios_init(void) -{ - /* PSB assigns PCI resources */ - pci_probe_only = 1; - pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20); - - /* Extend IO port for memory mapped io */ - ioport_resource.start = 0; - ioport_resource.end = ~0; - - set_io_port_base(CKSEG1); - nlm_pci_controller.io_map_base = CKSEG1; - - pr_info("Registering XLR/XLS PCIX/PCIE Controller.\n"); - register_pci_controller(&nlm_pci_controller); - - return 0; -} - -arch_initcall(pcibios_init); - -struct pci_fixup pcibios_fixups[] = { - {0} -}; diff --git a/trunk/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/trunk/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c index 98fd0099d964..f9b9dcdfa9dd 100644 --- a/trunk/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c +++ b/trunk/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c @@ -97,7 +97,7 @@ static int msp_per_irq_set_affinity(struct irq_data *d, static struct irq_chip msp_per_irq_controller = { .name = "MSP_PER", - .irq_enable = unmask_per_irq, + .irq_enable = unmask_per_irq. .irq_disable = mask_per_irq, .irq_ack = msp_per_irq_ack, #ifdef CONFIG_SMP diff --git a/trunk/arch/mips/pmc-sierra/yosemite/smp.c b/trunk/arch/mips/pmc-sierra/yosemite/smp.c index 2608752898c0..efc9e889b349 100644 --- a/trunk/arch/mips/pmc-sierra/yosemite/smp.c +++ b/trunk/arch/mips/pmc-sierra/yosemite/smp.c @@ -55,8 +55,6 @@ void titan_mailbox_irq(void) if (status & 0x2) smp_call_function_interrupt(); - if (status & 0x4) - scheduler_ipi(); break; case 1: @@ -65,8 +63,6 @@ void titan_mailbox_irq(void) if (status & 0x2) smp_call_function_interrupt(); - if (status & 0x4) - scheduler_ipi(); break; } } diff --git a/trunk/arch/mips/power/hibernate.S b/trunk/arch/mips/power/hibernate.S index f8a751c03282..dbb5c7b4b70f 100644 --- a/trunk/arch/mips/power/hibernate.S +++ b/trunk/arch/mips/power/hibernate.S @@ -35,7 +35,7 @@ LEAF(swsusp_arch_resume) 0: PTR_L t1, PBE_ADDRESS(t0) /* source */ PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ - PTR_ADDU t3, t1, PAGE_SIZE + PTR_ADDIU t3, t1, PAGE_SIZE 1: REG_L t8, (t1) REG_S t8, (t2) diff --git a/trunk/arch/mips/rb532/gpio.c b/trunk/arch/mips/rb532/gpio.c index 6c47dfeb7be3..37de05d595e7 100644 --- a/trunk/arch/mips/rb532/gpio.c +++ b/trunk/arch/mips/rb532/gpio.c @@ -185,7 +185,7 @@ int __init rb532_gpio_init(void) struct resource *r; r = rb532_gpio_reg0_res; - rb532_gpio_chip->regbase = ioremap_nocache(r->start, resource_size(r)); + rb532_gpio_chip->regbase = ioremap_nocache(r->start, r->end - r->start); if (!rb532_gpio_chip->regbase) { printk(KERN_ERR "rb532: cannot remap GPIO register 0\n"); diff --git a/trunk/arch/mips/sgi-ip22/ip22-platform.c b/trunk/arch/mips/sgi-ip22/ip22-platform.c index 698904daf901..deddbf0ebe5c 100644 --- a/trunk/arch/mips/sgi-ip22/ip22-platform.c +++ b/trunk/arch/mips/sgi-ip22/ip22-platform.c @@ -132,7 +132,7 @@ static struct platform_device eth1_device = { */ static int __init sgiseeq_devinit(void) { - unsigned int pbdma __maybe_unused; + unsigned int tmp; int res, i; eth0_pd.hpc = hpc3c0; @@ -151,7 +151,7 @@ static int __init sgiseeq_devinit(void) /* Second HPC is missing? */ if (ip22_is_fullhouse() || - get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1])) + get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1])) return 0; sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 | diff --git a/trunk/arch/mips/sgi-ip22/ip22-time.c b/trunk/arch/mips/sgi-ip22/ip22-time.c index 1a94c9894188..603fc91c1030 100644 --- a/trunk/arch/mips/sgi-ip22/ip22-time.c +++ b/trunk/arch/mips/sgi-ip22/ip22-time.c @@ -32,7 +32,7 @@ static unsigned long dosample(void) { u32 ct0, ct1; - u8 msb; + u8 msb, lsb; /* Start the counter. */ sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | @@ -46,7 +46,7 @@ static unsigned long dosample(void) /* Latch and spin until top byte of counter2 is zero */ do { writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword); - (void) readb(&sgint->tcnt2); + lsb = readb(&sgint->tcnt2); msb = readb(&sgint->tcnt2); ct1 = read_c0_count(); } while (msb); diff --git a/trunk/arch/mips/sgi-ip27/ip27-hubio.c b/trunk/arch/mips/sgi-ip27/ip27-hubio.c index cd0d5b06cd83..a1fa4abb3f6a 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-hubio.c +++ b/trunk/arch/mips/sgi-ip27/ip27-hubio.c @@ -29,6 +29,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, unsigned long xtalk_addr, size_t size) { nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); + volatile hubreg_t junk; unsigned i; /* use small-window mapping if possible */ @@ -63,7 +64,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, * after we write it. */ IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr); - (void) HUB_L(IIO_ITTE_GET(nasid, i)); + junk = HUB_L(IIO_ITTE_GET(nasid, i)); return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE); } diff --git a/trunk/arch/mips/sgi-ip27/ip27-irq.c b/trunk/arch/mips/sgi-ip27/ip27-irq.c index b18b04e48577..0a04603d577c 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-irq.c +++ b/trunk/arch/mips/sgi-ip27/ip27-irq.c @@ -147,10 +147,8 @@ static void ip27_do_irq_mask0(void) #ifdef CONFIG_SMP if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ); - scheduler_ipi(); } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ); - scheduler_ipi(); } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); smp_call_function_interrupt(); diff --git a/trunk/arch/mips/sgi-ip27/ip27-klnuma.c b/trunk/arch/mips/sgi-ip27/ip27-klnuma.c index 1d1919a44e88..c3d30a88daf3 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-klnuma.c +++ b/trunk/arch/mips/sgi-ip27/ip27-klnuma.c @@ -54,8 +54,11 @@ void __init setup_replication_mask(void) static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid) { + cnodeid_t client_cnode; kern_vars_t *kvp; + client_cnode = NASID_TO_COMPACT_NODEID(client_nasid); + kvp = &hub_data(client_nasid)->kern_vars; KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp; diff --git a/trunk/arch/mips/sgi-ip27/ip27-timer.c b/trunk/arch/mips/sgi-ip27/ip27-timer.c index ef74f3267f91..a152538d3c97 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-timer.c +++ b/trunk/arch/mips/sgi-ip27/ip27-timer.c @@ -66,7 +66,18 @@ static int rt_next_event(unsigned long delta, struct clock_event_device *evt) static void rt_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { - /* Nothing to do ... */ + switch (mode) { + case CLOCK_EVT_MODE_ONESHOT: + /* The only mode supported */ + break; + + case CLOCK_EVT_MODE_PERIODIC: + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_RESUME: + /* Nothing to do */ + break; + } } int rt_timer_irq; @@ -163,7 +174,8 @@ static void __init hub_rt_clocksource_init(void) { struct clocksource *cs = &hub_rt_clocksource; - clocksource_register_hz(cs, CYCLES_PER_SEC); + clocksource_set_clock(cs, CYCLES_PER_SEC); + clocksource_register(cs); } void __init plat_time_init(void) diff --git a/trunk/arch/mips/sibyte/bcm1480/smp.c b/trunk/arch/mips/sibyte/bcm1480/smp.c index d667875be564..47b347c992ea 100644 --- a/trunk/arch/mips/sibyte/bcm1480/smp.c +++ b/trunk/arch/mips/sibyte/bcm1480/smp.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include @@ -190,8 +189,10 @@ void bcm1480_mailbox_interrupt(void) /* Clear the mailbox to clear the interrupt */ __raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]); - if (action & SMP_RESCHEDULE_YOURSELF) - scheduler_ipi(); + /* + * Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the + * interrupt will do the reschedule for us + */ if (action & SMP_CALL_FUNCTION) smp_call_function_interrupt(); diff --git a/trunk/arch/mips/sibyte/sb1250/smp.c b/trunk/arch/mips/sibyte/sb1250/smp.c index 38e7f6bd7922..c00a5cb1128d 100644 --- a/trunk/arch/mips/sibyte/sb1250/smp.c +++ b/trunk/arch/mips/sibyte/sb1250/smp.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include @@ -178,8 +177,10 @@ void sb1250_mailbox_interrupt(void) /* Clear the mailbox to clear the interrupt */ ____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]); - if (action & SMP_RESCHEDULE_YOURSELF) - scheduler_ipi(); + /* + * Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the + * interrupt will do the reschedule for us + */ if (action & SMP_CALL_FUNCTION) smp_call_function_interrupt(); diff --git a/trunk/arch/mips/sni/time.c b/trunk/arch/mips/sni/time.c index 0904d4d30cb3..c76151b56568 100644 --- a/trunk/arch/mips/sni/time.c +++ b/trunk/arch/mips/sni/time.c @@ -95,7 +95,7 @@ static void __init sni_a20r_timer_setup(void) static __init unsigned long dosample(void) { u32 ct0, ct1; - volatile u8 msb; + volatile u8 msb, lsb; /* Start the counter. */ outb_p(0x34, 0x43); @@ -108,7 +108,7 @@ static __init unsigned long dosample(void) /* Latch and spin until top byte of counter0 is zero */ do { outb(0x00, 0x43); - (void) inb(0x40); + lsb = inb(0x40); msb = inb(0x40); ct1 = read_c0_count(); } while (msb); diff --git a/trunk/arch/mn10300/kernel/smp.c b/trunk/arch/mn10300/kernel/smp.c index 83fb27912231..226c826a2194 100644 --- a/trunk/arch/mn10300/kernel/smp.c +++ b/trunk/arch/mn10300/kernel/smp.c @@ -494,11 +494,14 @@ void smp_send_stop(void) * @irq: The interrupt number. * @dev_id: The device ID. * + * We need do nothing here, since the scheduling will be effected on our way + * back through entry.S. + * * Returns IRQ_HANDLED to indicate we handled the interrupt successfully. */ static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) { - scheduler_ipi(); + /* do nothing */ return IRQ_HANDLED; } diff --git a/trunk/arch/parisc/kernel/smp.c b/trunk/arch/parisc/kernel/smp.c index 828305f19cff..69d63d354ef0 100644 --- a/trunk/arch/parisc/kernel/smp.c +++ b/trunk/arch/parisc/kernel/smp.c @@ -155,7 +155,10 @@ ipi_interrupt(int irq, void *dev_id) case IPI_RESCHEDULE: smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); - scheduler_ipi(); + /* + * Reschedule callback. Everything to be + * done is done by the interrupt return path. + */ break; case IPI_CALL_FUNC: diff --git a/trunk/arch/parisc/mm/init.c b/trunk/arch/parisc/mm/init.c index b1d126258dee..b7ed8d7a9b33 100644 --- a/trunk/arch/parisc/mm/init.c +++ b/trunk/arch/parisc/mm/init.c @@ -266,10 +266,8 @@ static void __init setup_bootmem(void) } memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); - for (i = 0; i < npmem_ranges; i++) { - node_set_state(i, N_NORMAL_MEMORY); + for (i = 0; i < npmem_ranges; i++) node_set_online(i); - } #endif /* diff --git a/trunk/arch/powerpc/include/asm/8xx_immap.h b/trunk/arch/powerpc/include/asm/8xx_immap.h index bdf0563ba423..6b6dc20b0beb 100644 --- a/trunk/arch/powerpc/include/asm/8xx_immap.h +++ b/trunk/arch/powerpc/include/asm/8xx_immap.h @@ -393,8 +393,8 @@ typedef struct fec { uint fec_addr_low; /* lower 32 bits of station address */ ushort fec_addr_high; /* upper 16 bits of station address */ ushort res1; /* reserved */ - uint fec_grp_hash_table_high; /* upper 32-bits of hash table */ - uint fec_grp_hash_table_low; /* lower 32-bits of hash table */ + uint fec_hash_table_high; /* upper 32-bits of hash table */ + uint fec_hash_table_low; /* lower 32-bits of hash table */ uint fec_r_des_start; /* beginning of Rx descriptor ring */ uint fec_x_des_start; /* beginning of Tx descriptor ring */ uint fec_r_buff_size; /* Rx buffer size */ diff --git a/trunk/arch/powerpc/include/asm/mpic.h b/trunk/arch/powerpc/include/asm/mpic.h index 49baddcdd14e..7005ee0b074d 100644 --- a/trunk/arch/powerpc/include/asm/mpic.h +++ b/trunk/arch/powerpc/include/asm/mpic.h @@ -3,6 +3,7 @@ #ifdef __KERNEL__ #include +#include #include #include @@ -319,6 +320,8 @@ struct mpic /* link */ struct mpic *next; + struct sys_device sysdev; + #ifdef CONFIG_PM struct mpic_irq_save *save_data; #endif diff --git a/trunk/arch/powerpc/include/asm/uninorth.h b/trunk/arch/powerpc/include/asm/uninorth.h index d12b11d7641e..ae9c899c8a6d 100644 --- a/trunk/arch/powerpc/include/asm/uninorth.h +++ b/trunk/arch/powerpc/include/asm/uninorth.h @@ -60,7 +60,7 @@ * * Obviously, the GART is not cache coherent and so any change to it * must be flushed to memory (or maybe just make the GART space non - * cachable). AGP memory itself doesn't seem to be cache coherent neither. + * cachable). AGP memory itself does't seem to be cache coherent neither. * * In order to invalidate the GART (which is probably necessary to inval * the bridge internal TLBs), the following sequence has to be written, diff --git a/trunk/arch/powerpc/kernel/ptrace.c b/trunk/arch/powerpc/kernel/ptrace.c index a6ae1cfad86c..55613e33e263 100644 --- a/trunk/arch/powerpc/kernel/ptrace.c +++ b/trunk/arch/powerpc/kernel/ptrace.c @@ -933,16 +933,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, if (data && !(data & DABR_TRANSLATION)) return -EIO; #ifdef CONFIG_HAVE_HW_BREAKPOINT - if (ptrace_get_breakpoints(task) < 0) - return -ESRCH; - bp = thread->ptrace_bps[0]; if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) { if (bp) { unregister_hw_breakpoint(bp); thread->ptrace_bps[0] = NULL; } - ptrace_put_breakpoints(task); return 0; } if (bp) { @@ -952,12 +948,9 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, (DABR_DATA_WRITE | DABR_DATA_READ), &attr.bp_type); ret = modify_user_hw_breakpoint(bp, &attr); - if (ret) { - ptrace_put_breakpoints(task); + if (ret) return ret; - } thread->ptrace_bps[0] = bp; - ptrace_put_breakpoints(task); thread->dabr = data; return 0; } @@ -972,12 +965,9 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, ptrace_triggered, task); if (IS_ERR(bp)) { thread->ptrace_bps[0] = NULL; - ptrace_put_breakpoints(task); return PTR_ERR(bp); } - ptrace_put_breakpoints(task); - #endif /* CONFIG_HAVE_HW_BREAKPOINT */ /* Move contents to the DABR register */ diff --git a/trunk/arch/powerpc/kernel/smp.c b/trunk/arch/powerpc/kernel/smp.c index 9f9c204bef69..cbdbb14be4b0 100644 --- a/trunk/arch/powerpc/kernel/smp.c +++ b/trunk/arch/powerpc/kernel/smp.c @@ -116,7 +116,7 @@ void smp_message_recv(int msg) generic_smp_call_function_interrupt(); break; case PPC_MSG_RESCHEDULE: - scheduler_ipi(); + /* we notice need_resched on exit */ break; case PPC_MSG_CALL_FUNC_SINGLE: generic_smp_call_function_single_interrupt(); @@ -146,7 +146,7 @@ static irqreturn_t call_function_action(int irq, void *data) static irqreturn_t reschedule_action(int irq, void *data) { - scheduler_ipi(); + /* we just need the return path side effect of checking need_resched */ return IRQ_HANDLED; } diff --git a/trunk/arch/powerpc/platforms/83xx/suspend.c b/trunk/arch/powerpc/platforms/83xx/suspend.c index 104faa8aa23c..188272934cfb 100644 --- a/trunk/arch/powerpc/platforms/83xx/suspend.c +++ b/trunk/arch/powerpc/platforms/83xx/suspend.c @@ -318,20 +318,17 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = { .end = mpc83xx_suspend_end, }; -static struct of_device_id pmc_match[]; static int pmc_probe(struct platform_device *ofdev) { - const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct resource res; struct pmc_type *type; int ret = 0; - match = of_match_device(pmc_match, &ofdev->dev); - if (!match) + if (!ofdev->dev.of_match) return -EINVAL; - type = match->data; + type = ofdev->dev.of_match->data; if (!of_device_is_available(np)) return -ENODEV; diff --git a/trunk/arch/powerpc/platforms/cell/spu_base.c b/trunk/arch/powerpc/platforms/cell/spu_base.c index 3675da73623f..acfaccea5f4f 100644 --- a/trunk/arch/powerpc/platforms/cell/spu_base.c +++ b/trunk/arch/powerpc/platforms/cell/spu_base.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -522,8 +521,18 @@ void spu_init_channels(struct spu *spu) } EXPORT_SYMBOL_GPL(spu_init_channels); +static int spu_shutdown(struct sys_device *sysdev) +{ + struct spu *spu = container_of(sysdev, struct spu, sysdev); + + spu_free_irqs(spu); + spu_destroy_spu(spu); + return 0; +} + static struct sysdev_class spu_sysdev_class = { .name = "spu", + .shutdown = spu_shutdown, }; int spu_add_sysdev_attr(struct sysdev_attribute *attr) @@ -788,22 +797,6 @@ static inline void crash_register_spus(struct list_head *list) } #endif -static void spu_shutdown(void) -{ - struct spu *spu; - - mutex_lock(&spu_full_list_mutex); - list_for_each_entry(spu, &spu_full_list, full_list) { - spu_free_irqs(spu); - spu_destroy_spu(spu); - } - mutex_unlock(&spu_full_list_mutex); -} - -static struct syscore_ops spu_syscore_ops = { - .shutdown = spu_shutdown, -}; - static int __init init_spu_base(void) { int i, ret = 0; @@ -837,7 +830,6 @@ static int __init init_spu_base(void) crash_register_spus(&spu_full_list); mutex_unlock(&spu_full_list_mutex); spu_add_sysdev_attr(&attr_stat); - register_syscore_ops(&spu_syscore_ops); spu_init_affinity(); diff --git a/trunk/arch/powerpc/platforms/powermac/pic.c b/trunk/arch/powerpc/platforms/powermac/pic.c index 7c18a1607d1c..023f24086a0a 100644 --- a/trunk/arch/powerpc/platforms/powermac/pic.c +++ b/trunk/arch/powerpc/platforms/powermac/pic.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -677,7 +677,7 @@ static int pmacpic_find_viaint(void) return viaint; } -static int pmacpic_suspend(void) +static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) { int viaint = pmacpic_find_viaint(); @@ -698,7 +698,7 @@ static int pmacpic_suspend(void) return 0; } -static void pmacpic_resume(void) +static int pmacpic_resume(struct sys_device *sysdev) { int i; @@ -709,19 +709,39 @@ static void pmacpic_resume(void) for (i = 0; i < max_real_irqs; ++i) if (test_bit(i, sleep_save_mask)) pmac_unmask_irq(irq_get_irq_data(i)); + + return 0; } -static struct syscore_ops pmacpic_syscore_ops = { - .suspend = pmacpic_suspend, - .resume = pmacpic_resume, +#endif /* CONFIG_PM && CONFIG_PPC32 */ + +static struct sysdev_class pmacpic_sysclass = { + .name = "pmac_pic", }; -static int __init init_pmacpic_syscore(void) +static struct sys_device device_pmacpic = { + .id = 0, + .cls = &pmacpic_sysclass, +}; + +static struct sysdev_driver driver_pmacpic = { +#if defined(CONFIG_PM) && defined(CONFIG_PPC32) + .suspend = &pmacpic_suspend, + .resume = &pmacpic_resume, +#endif /* CONFIG_PM && CONFIG_PPC32 */ +}; + +static int __init init_pmacpic_sysfs(void) { - register_syscore_ops(&pmacpic_syscore_ops); +#ifdef CONFIG_PPC32 + if (max_irqs == 0) + return -ENODEV; +#endif + printk(KERN_DEBUG "Registering pmac pic with sysfs...\n"); + sysdev_class_register(&pmacpic_sysclass); + sysdev_register(&device_pmacpic); + sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic); return 0; } +machine_subsys_initcall(powermac, init_pmacpic_sysfs); -machine_subsys_initcall(powermac, init_pmacpic_syscore); - -#endif /* CONFIG_PM && CONFIG_PPC32 */ diff --git a/trunk/arch/powerpc/sysdev/fsl_msi.c b/trunk/arch/powerpc/sysdev/fsl_msi.c index 01cd2f089512..d5679dc1e20f 100644 --- a/trunk/arch/powerpc/sysdev/fsl_msi.c +++ b/trunk/arch/powerpc/sysdev/fsl_msi.c @@ -304,10 +304,8 @@ static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi, return 0; } -static const struct of_device_id fsl_of_msi_ids[]; static int __devinit fsl_of_msi_probe(struct platform_device *dev) { - const struct of_device_id *match; struct fsl_msi *msi; struct resource res; int err, i, j, irq_index, count; @@ -318,10 +316,9 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev) u32 offset; static const u32 all_avail[] = { 0, NR_MSI_IRQS }; - match = of_match_device(fsl_of_msi_ids, &dev->dev); - if (!match) + if (!dev->dev.of_match) return -EINVAL; - features = match->data; + features = dev->dev.of_match->data; printk(KERN_DEBUG "Setting up Freescale MSI support\n"); diff --git a/trunk/arch/powerpc/sysdev/ipic.c b/trunk/arch/powerpc/sysdev/ipic.c index 596554a8725e..fa438be962b7 100644 --- a/trunk/arch/powerpc/sysdev/ipic.c +++ b/trunk/arch/powerpc/sysdev/ipic.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include @@ -902,7 +902,7 @@ static struct { u32 sercr; } ipic_saved_state; -static int ipic_suspend(void) +static int ipic_suspend(struct sys_device *sdev, pm_message_t state) { struct ipic *ipic = primary_ipic; @@ -933,7 +933,7 @@ static int ipic_suspend(void) return 0; } -static void ipic_resume(void) +static int ipic_resume(struct sys_device *sdev) { struct ipic *ipic = primary_ipic; @@ -949,26 +949,44 @@ static void ipic_resume(void) ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr); ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr); ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr); + + return 0; } #else #define ipic_suspend NULL #define ipic_resume NULL #endif -static struct syscore_ops ipic_syscore_ops = { +static struct sysdev_class ipic_sysclass = { + .name = "ipic", .suspend = ipic_suspend, .resume = ipic_resume, }; -static int __init init_ipic_syscore(void) +static struct sys_device device_ipic = { + .id = 0, + .cls = &ipic_sysclass, +}; + +static int __init init_ipic_sysfs(void) { + int rc; + if (!primary_ipic || !primary_ipic->regs) return -ENODEV; + printk(KERN_DEBUG "Registering ipic with sysfs...\n"); - printk(KERN_DEBUG "Registering ipic system core operations\n"); - register_syscore_ops(&ipic_syscore_ops); - + rc = sysdev_class_register(&ipic_sysclass); + if (rc) { + printk(KERN_ERR "Failed registering ipic sys class\n"); + return -ENODEV; + } + rc = sysdev_register(&device_ipic); + if (rc) { + printk(KERN_ERR "Failed registering ipic sys device\n"); + return -ENODEV; + } return 0; } -subsys_initcall(init_ipic_syscore); +subsys_initcall(init_ipic_sysfs); diff --git a/trunk/arch/powerpc/sysdev/mpic.c b/trunk/arch/powerpc/sysdev/mpic.c index 7e5dc8f4984a..f91c065bed5a 100644 --- a/trunk/arch/powerpc/sysdev/mpic.c +++ b/trunk/arch/powerpc/sysdev/mpic.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include @@ -1703,8 +1702,9 @@ void mpic_reset_core(int cpu) #endif /* CONFIG_SMP */ #ifdef CONFIG_PM -static void mpic_suspend_one(struct mpic *mpic) +static int mpic_suspend(struct sys_device *dev, pm_message_t state) { + struct mpic *mpic = container_of(dev, struct mpic, sysdev); int i; for (i = 0; i < mpic->num_sources; i++) { @@ -1713,22 +1713,13 @@ static void mpic_suspend_one(struct mpic *mpic) mpic->save_data[i].dest = mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); } -} - -static int mpic_suspend(void) -{ - struct mpic *mpic = mpics; - - while (mpic) { - mpic_suspend_one(mpic); - mpic = mpic->next; - } return 0; } -static void mpic_resume_one(struct mpic *mpic) +static int mpic_resume(struct sys_device *dev) { + struct mpic *mpic = container_of(dev, struct mpic, sysdev); int i; for (i = 0; i < mpic->num_sources; i++) { @@ -1755,28 +1746,33 @@ static void mpic_resume_one(struct mpic *mpic) } #endif } /* end for loop */ -} - -static void mpic_resume(void) -{ - struct mpic *mpic = mpics; - while (mpic) { - mpic_resume_one(mpic); - mpic = mpic->next; - } + return 0; } +#endif -static struct syscore_ops mpic_syscore_ops = { +static struct sysdev_class mpic_sysclass = { +#ifdef CONFIG_PM .resume = mpic_resume, .suspend = mpic_suspend, +#endif + .name = "mpic", }; static int mpic_init_sys(void) { - register_syscore_ops(&mpic_syscore_ops); - return 0; + struct mpic *mpic = mpics; + int error, id = 0; + + error = sysdev_class_register(&mpic_sysclass); + + while (mpic && !error) { + mpic->sysdev.cls = &mpic_sysclass; + mpic->sysdev.id = id++; + error = sysdev_register(&mpic->sysdev); + mpic = mpic->next; + } + return error; } device_initcall(mpic_init_sys); -#endif diff --git a/trunk/arch/s390/Kconfig b/trunk/arch/s390/Kconfig index 4a7f14079e03..2508a6f31588 100644 --- a/trunk/arch/s390/Kconfig +++ b/trunk/arch/s390/Kconfig @@ -88,7 +88,6 @@ config S390 select HAVE_KERNEL_XZ select HAVE_GET_USER_PAGES_FAST select HAVE_ARCH_MUTEX_CPU_RELAX - select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 select ARCH_INLINE_SPIN_TRYLOCK select ARCH_INLINE_SPIN_TRYLOCK_BH select ARCH_INLINE_SPIN_LOCK diff --git a/trunk/arch/s390/crypto/prng.c b/trunk/arch/s390/crypto/prng.c index 8b16c479585b..44bca3f994b0 100644 --- a/trunk/arch/s390/crypto/prng.c +++ b/trunk/arch/s390/crypto/prng.c @@ -76,7 +76,7 @@ static void prng_seed(int nbytes) /* Add the entropy */ while (nbytes >= 8) { - *((__u64 *)parm_block) ^= *((__u64 *)(buf+i)); + *((__u64 *)parm_block) ^= *((__u64 *)buf+i); prng_add_entropy(); i += 8; nbytes -= 8; diff --git a/trunk/arch/s390/include/asm/cacheflush.h b/trunk/arch/s390/include/asm/cacheflush.h index 3e20383d0921..43a5c78046db 100644 --- a/trunk/arch/s390/include/asm/cacheflush.h +++ b/trunk/arch/s390/include/asm/cacheflush.h @@ -11,6 +11,5 @@ void kernel_map_pages(struct page *page, int numpages, int enable); int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); -int set_memory_x(unsigned long addr, int numpages); #endif /* _S390_CACHEFLUSH_H */ diff --git a/trunk/arch/s390/include/asm/diag.h b/trunk/arch/s390/include/asm/diag.h index 7e91c58072e2..72b2e2f2d32d 100644 --- a/trunk/arch/s390/include/asm/diag.h +++ b/trunk/arch/s390/include/asm/diag.h @@ -9,22 +9,9 @@ #define _ASM_S390_DIAG_H /* - * Diagnose 10: Release page range + * Diagnose 10: Release pages */ -static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) -{ - unsigned long start_addr, end_addr; - - start_addr = start_pfn << PAGE_SHIFT; - end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; - - asm volatile( - "0: diag %0,%1,0x10\n" - "1:\n" - EX_TABLE(0b, 1b) - EX_TABLE(1b, 1b) - : : "a" (start_addr), "a" (end_addr)); -} +extern void diag10(unsigned long addr); /* * Diagnose 14: Input spool file manipulation diff --git a/trunk/arch/s390/include/asm/ftrace.h b/trunk/arch/s390/include/asm/ftrace.h index b7931faaef6d..3c29be4836ed 100644 --- a/trunk/arch/s390/include/asm/ftrace.h +++ b/trunk/arch/s390/include/asm/ftrace.h @@ -11,13 +11,15 @@ struct dyn_arch_ftrace { }; #ifdef CONFIG_64BIT #define MCOUNT_INSN_SIZE 12 +#define MCOUNT_OFFSET 8 #else #define MCOUNT_INSN_SIZE 20 +#define MCOUNT_OFFSET 4 #endif static inline unsigned long ftrace_call_adjust(unsigned long addr) { - return addr; + return addr - MCOUNT_OFFSET; } #endif /* __ASSEMBLY__ */ diff --git a/trunk/arch/s390/include/asm/jump_label.h b/trunk/arch/s390/include/asm/jump_label.h deleted file mode 100644 index 95a6cf2b5b67..000000000000 --- a/trunk/arch/s390/include/asm/jump_label.h +++ /dev/null @@ -1,37 +0,0 @@ -#ifndef _ASM_S390_JUMP_LABEL_H -#define _ASM_S390_JUMP_LABEL_H - -#include - -#define JUMP_LABEL_NOP_SIZE 6 - -#ifdef CONFIG_64BIT -#define ASM_PTR ".quad" -#define ASM_ALIGN ".balign 8" -#else -#define ASM_PTR ".long" -#define ASM_ALIGN ".balign 4" -#endif - -static __always_inline bool arch_static_branch(struct jump_label_key *key) -{ - asm goto("0: brcl 0,0\n" - ".pushsection __jump_table, \"aw\"\n" - ASM_ALIGN "\n" - ASM_PTR " 0b, %l[label], %0\n" - ".popsection\n" - : : "X" (key) : : label); - return false; -label: - return true; -} - -typedef unsigned long jump_label_t; - -struct jump_entry { - jump_label_t code; - jump_label_t target; - jump_label_t key; -}; - -#endif diff --git a/trunk/arch/s390/include/asm/mmu_context.h b/trunk/arch/s390/include/asm/mmu_context.h index 8c277caa8d3a..a6f0e7cc9cde 100644 --- a/trunk/arch/s390/include/asm/mmu_context.h +++ b/trunk/arch/s390/include/asm/mmu_context.h @@ -23,7 +23,7 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_64BIT mm->context.asce_bits |= _ASCE_TYPE_REGION3; #endif - if (current->mm && current->mm->context.alloc_pgste) { + if (current->mm->context.alloc_pgste) { /* * alloc_pgste indicates, that any NEW context will be created * with extended page tables. The old context is unchanged. The diff --git a/trunk/arch/s390/kernel/Makefile b/trunk/arch/s390/kernel/Makefile index 5ff15dacb571..64230bc392fa 100644 --- a/trunk/arch/s390/kernel/Makefile +++ b/trunk/arch/s390/kernel/Makefile @@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ - vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o + vdso.o vtime.o sysinfo.o nmi.o sclp.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) diff --git a/trunk/arch/s390/kernel/diag.c b/trunk/arch/s390/kernel/diag.c index 8237fc07ac79..c032d11da8a1 100644 --- a/trunk/arch/s390/kernel/diag.c +++ b/trunk/arch/s390/kernel/diag.c @@ -8,6 +8,27 @@ #include #include +/* + * Diagnose 10: Release pages + */ +void diag10(unsigned long addr) +{ + if (addr >= 0x7ff00000) + return; + asm volatile( +#ifdef CONFIG_64BIT + " sam31\n" + " diag %0,%0,0x10\n" + "0: sam64\n" +#else + " diag %0,%0,0x10\n" + "0:\n" +#endif + EX_TABLE(0b, 0b) + : : "a" (addr)); +} +EXPORT_SYMBOL(diag10); + /* * Diagnose 14: Input spool file manipulation */ diff --git a/trunk/arch/s390/kernel/dis.c b/trunk/arch/s390/kernel/dis.c index 3d4a78fc1adc..c83726c9fe03 100644 --- a/trunk/arch/s390/kernel/dis.c +++ b/trunk/arch/s390/kernel/dis.c @@ -672,7 +672,6 @@ static struct insn opcode_b2[] = { { "rp", 0x77, INSTR_S_RD }, { "stcke", 0x78, INSTR_S_RD }, { "sacf", 0x79, INSTR_S_RD }, - { "spp", 0x80, INSTR_S_RD }, { "stsi", 0x7d, INSTR_S_RD }, { "srnm", 0x99, INSTR_S_RD }, { "stfpc", 0x9c, INSTR_S_RD }, diff --git a/trunk/arch/s390/kernel/entry.S b/trunk/arch/s390/kernel/entry.S index 1b67fc6ebdc2..648f64239a9d 100644 --- a/trunk/arch/s390/kernel/entry.S +++ b/trunk/arch/s390/kernel/entry.S @@ -836,7 +836,7 @@ restart_base: stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on basr %r14,0 l %r14,restart_addr-.(%r14) - basr %r14,%r14 # branch to start_secondary + br %r14 # branch to start_secondary restart_addr: .long start_secondary .align 8 diff --git a/trunk/arch/s390/kernel/entry64.S b/trunk/arch/s390/kernel/entry64.S index 9fd864563499..9d3603d6c511 100644 --- a/trunk/arch/s390/kernel/entry64.S +++ b/trunk/arch/s390/kernel/entry64.S @@ -841,7 +841,7 @@ restart_base: mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on - brasl %r14,start_secondary + jg start_secondary .align 8 restart_vtime: .long 0x7fffffff,0xffffffff diff --git a/trunk/arch/s390/kernel/jump_label.c b/trunk/arch/s390/kernel/jump_label.c deleted file mode 100644 index 44cc06bedf77..000000000000 --- a/trunk/arch/s390/kernel/jump_label.c +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Jump label s390 support - * - * Copyright IBM Corp. 2011 - * Author(s): Jan Glauber - */ -#include -#include -#include -#include -#include - -#ifdef HAVE_JUMP_LABEL - -struct insn { - u16 opcode; - s32 offset; -} __packed; - -struct insn_args { - unsigned long *target; - struct insn *insn; - ssize_t size; -}; - -static int __arch_jump_label_transform(void *data) -{ - struct insn_args *args = data; - int rc; - - rc = probe_kernel_write(args->target, args->insn, args->size); - WARN_ON_ONCE(rc < 0); - return 0; -} - -void arch_jump_label_transform(struct jump_entry *entry, - enum jump_label_type type) -{ - struct insn_args args; - struct insn insn; - - if (type == JUMP_LABEL_ENABLE) { - /* brcl 15,offset */ - insn.opcode = 0xc0f4; - insn.offset = (entry->target - entry->code) >> 1; - } else { - /* brcl 0,0 */ - insn.opcode = 0xc004; - insn.offset = 0; - } - - args.target = (void *) entry->code; - args.insn = &insn; - args.size = JUMP_LABEL_NOP_SIZE; - - stop_machine(__arch_jump_label_transform, &args, NULL); -} - -#endif diff --git a/trunk/arch/s390/kernel/smp.c b/trunk/arch/s390/kernel/smp.c index 63c7d9ff220d..63a97db83f96 100644 --- a/trunk/arch/s390/kernel/smp.c +++ b/trunk/arch/s390/kernel/smp.c @@ -165,12 +165,12 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++; /* * handle bit signal external calls + * + * For the ec_schedule signal we have to do nothing. All the work + * is done automatically when we return from the interrupt. */ bits = xchg(&S390_lowcore.ext_call_fast, 0); - if (test_bit(ec_schedule, &bits)) - scheduler_ipi(); - if (test_bit(ec_call_function, &bits)) generic_smp_call_function_interrupt(); diff --git a/trunk/arch/s390/mm/cmm.c b/trunk/arch/s390/mm/cmm.c index 1f1dba9dcf58..c66ffd8dbbb7 100644 --- a/trunk/arch/s390/mm/cmm.c +++ b/trunk/arch/s390/mm/cmm.c @@ -91,7 +91,7 @@ static long cmm_alloc_pages(long nr, long *counter, } else free_page((unsigned long) npa); } - diag10_range(addr >> PAGE_SHIFT, 1); + diag10(addr); pa->pages[pa->index++] = addr; (*counter)++; spin_unlock(&cmm_lock); diff --git a/trunk/arch/s390/mm/fault.c b/trunk/arch/s390/mm/fault.c index ab988135e5c6..4cf85fef407c 100644 --- a/trunk/arch/s390/mm/fault.c +++ b/trunk/arch/s390/mm/fault.c @@ -543,6 +543,7 @@ static void pfault_interrupt(unsigned int ext_int_code, struct task_struct *tsk; __u16 subcode; + kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; /* * Get the external interruption subcode & pfault * initial/completion signal bit. VM stores this @@ -552,7 +553,6 @@ static void pfault_interrupt(unsigned int ext_int_code, subcode = ext_int_code >> 16; if ((subcode & 0xff00) != __SUBCODE_MASK) return; - kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; /* * Get the token (= address of the task structure of the affected task). diff --git a/trunk/arch/s390/mm/pageattr.c b/trunk/arch/s390/mm/pageattr.c index f05edcc3beff..0607e4b14b27 100644 --- a/trunk/arch/s390/mm/pageattr.c +++ b/trunk/arch/s390/mm/pageattr.c @@ -54,8 +54,3 @@ int set_memory_nx(unsigned long addr, int numpages) return 0; } EXPORT_SYMBOL_GPL(set_memory_nx); - -int set_memory_x(unsigned long addr, int numpages) -{ - return 0; -} diff --git a/trunk/arch/s390/oprofile/hwsampler.c b/trunk/arch/s390/oprofile/hwsampler.c index 33cbd373cce4..4952872d6f0a 100644 --- a/trunk/arch/s390/oprofile/hwsampler.c +++ b/trunk/arch/s390/oprofile/hwsampler.c @@ -1021,14 +1021,20 @@ int hwsampler_deallocate() return rc; } -unsigned long hwsampler_query_min_interval(void) +long hwsampler_query_min_interval(void) { - return min_sampler_rate; + if (min_sampler_rate) + return min_sampler_rate; + else + return -EINVAL; } -unsigned long hwsampler_query_max_interval(void) +long hwsampler_query_max_interval(void) { - return max_sampler_rate; + if (max_sampler_rate) + return max_sampler_rate; + else + return -EINVAL; } unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) diff --git a/trunk/arch/s390/oprofile/hwsampler.h b/trunk/arch/s390/oprofile/hwsampler.h index 1912f3bb190c..8c72b59316b5 100644 --- a/trunk/arch/s390/oprofile/hwsampler.h +++ b/trunk/arch/s390/oprofile/hwsampler.h @@ -102,8 +102,8 @@ int hwsampler_setup(void); int hwsampler_shutdown(void); int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); int hwsampler_deallocate(void); -unsigned long hwsampler_query_min_interval(void); -unsigned long hwsampler_query_max_interval(void); +long hwsampler_query_min_interval(void); +long hwsampler_query_max_interval(void); int hwsampler_start_all(unsigned long interval); int hwsampler_stop_all(void); int hwsampler_deactivate(unsigned int cpu); diff --git a/trunk/arch/s390/oprofile/init.c b/trunk/arch/s390/oprofile/init.c index 5995e9bc72d9..c63d7e58352b 100644 --- a/trunk/arch/s390/oprofile/init.c +++ b/trunk/arch/s390/oprofile/init.c @@ -145,11 +145,15 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) * create hwsampler files only if hwsampler_setup() succeeds. */ oprofile_min_interval = hwsampler_query_min_interval(); - if (oprofile_min_interval == 0) + if (oprofile_min_interval < 0) { + oprofile_min_interval = 0; return -ENODEV; + } oprofile_max_interval = hwsampler_query_max_interval(); - if (oprofile_max_interval == 0) + if (oprofile_max_interval < 0) { + oprofile_max_interval = 0; return -ENODEV; + } if (oprofile_timer_init(ops)) return -ENODEV; diff --git a/trunk/arch/sh/Kconfig b/trunk/arch/sh/Kconfig index bc439de48cd1..4b89da248d17 100644 --- a/trunk/arch/sh/Kconfig +++ b/trunk/arch/sh/Kconfig @@ -24,6 +24,7 @@ config SUPERH select RTC_LIB select GENERIC_ATOMIC64 select GENERIC_IRQ_SHOW + select ARCH_NO_SYSDEV_OPS help The SuperH is a RISC processor targeted for use in embedded systems and consumer electronics; it was also used in the Sega Dreamcast diff --git a/trunk/arch/sh/configs/apsh4ad0a_defconfig b/trunk/arch/sh/configs/apsh4ad0a_defconfig index 77ec0e7b8ddf..e71a531f1e31 100644 --- a/trunk/arch/sh/configs/apsh4ad0a_defconfig +++ b/trunk/arch/sh/configs/apsh4ad0a_defconfig @@ -48,6 +48,7 @@ CONFIG_PREEMPT=y CONFIG_BINFMT_MISC=y CONFIG_PM=y CONFIG_PM_DEBUG=y +CONFIG_PM_VERBOSE=y CONFIG_PM_RUNTIME=y CONFIG_CPU_IDLE=y CONFIG_NET=y diff --git a/trunk/arch/sh/configs/sdk7786_defconfig b/trunk/arch/sh/configs/sdk7786_defconfig index c41650572d79..dc4a2eb6a616 100644 --- a/trunk/arch/sh/configs/sdk7786_defconfig +++ b/trunk/arch/sh/configs/sdk7786_defconfig @@ -83,6 +83,7 @@ CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_BINFMT_MISC=y CONFIG_PM=y CONFIG_PM_DEBUG=y +CONFIG_PM_VERBOSE=y CONFIG_PM_RUNTIME=y CONFIG_CPU_IDLE=y CONFIG_NET=y diff --git a/trunk/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/trunk/arch/sh/kernel/cpu/shmobile/pm_runtime.c index 22db127afa7b..6dcb8166a64d 100644 --- a/trunk/arch/sh/kernel/cpu/shmobile/pm_runtime.c +++ b/trunk/arch/sh/kernel/cpu/shmobile/pm_runtime.c @@ -139,7 +139,7 @@ void platform_pm_runtime_suspend_idle(void) queue_work(pm_wq, &hwblk_work); } -static int default_platform_runtime_suspend(struct device *dev) +int platform_pm_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pdev_archdata *ad = &pdev->archdata; @@ -147,7 +147,7 @@ static int default_platform_runtime_suspend(struct device *dev) int hwblk = ad->hwblk_id; int ret = 0; - dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); + dev_dbg(dev, "platform_pm_runtime_suspend() [%d]\n", hwblk); /* ignore off-chip platform devices */ if (!hwblk) @@ -183,20 +183,20 @@ static int default_platform_runtime_suspend(struct device *dev) mutex_unlock(&ad->mutex); out: - dev_dbg(dev, "%s() [%d] returns %d\n", - __func__, hwblk, ret); + dev_dbg(dev, "platform_pm_runtime_suspend() [%d] returns %d\n", + hwblk, ret); return ret; } -static int default_platform_runtime_resume(struct device *dev) +int platform_pm_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pdev_archdata *ad = &pdev->archdata; int hwblk = ad->hwblk_id; int ret = 0; - dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); + dev_dbg(dev, "platform_pm_runtime_resume() [%d]\n", hwblk); /* ignore off-chip platform devices */ if (!hwblk) @@ -228,19 +228,19 @@ static int default_platform_runtime_resume(struct device *dev) */ mutex_unlock(&ad->mutex); out: - dev_dbg(dev, "%s() [%d] returns %d\n", - __func__, hwblk, ret); + dev_dbg(dev, "platform_pm_runtime_resume() [%d] returns %d\n", + hwblk, ret); return ret; } -static int default_platform_runtime_idle(struct device *dev) +int platform_pm_runtime_idle(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); int hwblk = pdev->archdata.hwblk_id; int ret = 0; - dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); + dev_dbg(dev, "platform_pm_runtime_idle() [%d]\n", hwblk); /* ignore off-chip platform devices */ if (!hwblk) @@ -252,19 +252,10 @@ static int default_platform_runtime_idle(struct device *dev) /* suspend synchronously to disable clocks immediately */ ret = pm_runtime_suspend(dev); out: - dev_dbg(dev, "%s() [%d] done!\n", __func__, hwblk); + dev_dbg(dev, "platform_pm_runtime_idle() [%d] done!\n", hwblk); return ret; } -static struct dev_power_domain default_power_domain = { - .ops = { - .runtime_suspend = default_platform_runtime_suspend, - .runtime_resume = default_platform_runtime_resume, - .runtime_idle = default_platform_runtime_idle, - USE_PLATFORM_PM_SLEEP_OPS - }, -}; - static int platform_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { @@ -285,7 +276,6 @@ static int platform_bus_notify(struct notifier_block *nb, hwblk_disable(hwblk_info, hwblk); /* make sure driver re-inits itself once */ __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); - dev->pwr_domain = &default_power_domain; break; /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ case BUS_NOTIFY_BOUND_DRIVER: @@ -299,7 +289,6 @@ static int platform_bus_notify(struct notifier_block *nb, __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); break; case BUS_NOTIFY_DEL_DEVICE: - dev->pwr_domain = NULL; break; } return 0; diff --git a/trunk/arch/sh/kernel/ptrace_32.c b/trunk/arch/sh/kernel/ptrace_32.c index 3d7b209b2178..2130ca674e9b 100644 --- a/trunk/arch/sh/kernel/ptrace_32.c +++ b/trunk/arch/sh/kernel/ptrace_32.c @@ -117,11 +117,7 @@ void user_enable_single_step(struct task_struct *child) set_tsk_thread_flag(child, TIF_SINGLESTEP); - if (ptrace_get_breakpoints(child) < 0) - return; - set_single_step(child, pc); - ptrace_put_breakpoints(child); } void user_disable_single_step(struct task_struct *child) diff --git a/trunk/arch/sh/kernel/smp.c b/trunk/arch/sh/kernel/smp.c index 6207561ea34a..509b36b45115 100644 --- a/trunk/arch/sh/kernel/smp.c +++ b/trunk/arch/sh/kernel/smp.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -324,7 +323,6 @@ void smp_message_recv(unsigned int msg) generic_smp_call_function_interrupt(); break; case SMP_MSG_RESCHEDULE: - scheduler_ipi(); break; case SMP_MSG_FUNCTION_SINGLE: generic_smp_call_function_single_interrupt(); diff --git a/trunk/arch/sparc/include/asm/jump_label.h b/trunk/arch/sparc/include/asm/jump_label.h index fc73a82366f8..427d4684e0d2 100644 --- a/trunk/arch/sparc/include/asm/jump_label.h +++ b/trunk/arch/sparc/include/asm/jump_label.h @@ -7,20 +7,17 @@ #define JUMP_LABEL_NOP_SIZE 4 -static __always_inline bool arch_static_branch(struct jump_label_key *key) -{ - asm goto("1:\n\t" - "nop\n\t" - "nop\n\t" - ".pushsection __jump_table, \"aw\"\n\t" - ".align 4\n\t" - ".word 1b, %l[l_yes], %c0\n\t" - ".popsection \n\t" - : : "i" (key) : : l_yes); - return false; -l_yes: - return true; -} +#define JUMP_LABEL(key, label) \ + do { \ + asm goto("1:\n\t" \ + "nop\n\t" \ + "nop\n\t" \ + ".pushsection __jump_table, \"a\"\n\t"\ + ".align 4\n\t" \ + ".word 1b, %l[" #label "], %c0\n\t" \ + ".popsection \n\t" \ + : : "i" (key) : : label);\ + } while (0) #endif /* __KERNEL__ */ diff --git a/trunk/arch/sparc/include/asm/topology_64.h b/trunk/arch/sparc/include/asm/topology_64.h index 8b9c556d630b..1c79f32734a0 100644 --- a/trunk/arch/sparc/include/asm/topology_64.h +++ b/trunk/arch/sparc/include/asm/topology_64.h @@ -65,10 +65,6 @@ static inline int pcibus_to_node(struct pci_bus *pbus) #define smt_capable() (sparc64_multi_core) #endif /* CONFIG_SMP */ -extern cpumask_t cpu_core_map[NR_CPUS]; -static inline const struct cpumask *cpu_coregroup_mask(int cpu) -{ - return &cpu_core_map[cpu]; -} +#define cpu_coregroup_mask(cpu) (&cpu_core_map[cpu]) #endif /* _ASM_SPARC64_TOPOLOGY_H */ diff --git a/trunk/arch/sparc/kernel/apc.c b/trunk/arch/sparc/kernel/apc.c index 1e34f29e58bb..f679c57644d5 100644 --- a/trunk/arch/sparc/kernel/apc.c +++ b/trunk/arch/sparc/kernel/apc.c @@ -165,7 +165,7 @@ static int __devinit apc_probe(struct platform_device *op) return 0; } -static struct of_device_id apc_match[] = { +static struct of_device_id __initdata apc_match[] = { { .name = APC_OBPNAME, }, diff --git a/trunk/arch/sparc/kernel/pci_sabre.c b/trunk/arch/sparc/kernel/pci_sabre.c index d1840dbdaa2f..948068a083fc 100644 --- a/trunk/arch/sparc/kernel/pci_sabre.c +++ b/trunk/arch/sparc/kernel/pci_sabre.c @@ -452,10 +452,8 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm, sabre_scan_bus(pbm, &op->dev); } -static const struct of_device_id sabre_match[]; static int __devinit sabre_probe(struct platform_device *op) { - const struct of_device_id *match; const struct linux_prom64_registers *pr_regs; struct device_node *dp = op->dev.of_node; struct pci_pbm_info *pbm; @@ -465,8 +463,7 @@ static int __devinit sabre_probe(struct platform_device *op) const u32 *vdma; u64 clear_irq; - match = of_match_device(sabre_match, &op->dev); - hummingbird_p = match && (match->data != NULL); + hummingbird_p = op->dev.of_match && (op->dev.of_match->data != NULL); if (!hummingbird_p) { struct device_node *cpu_dp; diff --git a/trunk/arch/sparc/kernel/pci_schizo.c b/trunk/arch/sparc/kernel/pci_schizo.c index 283fbc329a43..fecfcb2063c8 100644 --- a/trunk/arch/sparc/kernel/pci_schizo.c +++ b/trunk/arch/sparc/kernel/pci_schizo.c @@ -1458,15 +1458,11 @@ static int __devinit __schizo_init(struct platform_device *op, unsigned long chi return err; } -static const struct of_device_id schizo_match[]; static int __devinit schizo_probe(struct platform_device *op) { - const struct of_device_id *match; - - match = of_match_device(schizo_match, &op->dev); - if (!match) + if (!op->dev.of_match) return -EINVAL; - return __schizo_init(op, (unsigned long)match->data); + return __schizo_init(op, (unsigned long) op->dev.of_match->data); } /* The ordering of this table is very important. Some Tomatillo diff --git a/trunk/arch/sparc/kernel/pmc.c b/trunk/arch/sparc/kernel/pmc.c index 6a585d393580..93d7b4465f8d 100644 --- a/trunk/arch/sparc/kernel/pmc.c +++ b/trunk/arch/sparc/kernel/pmc.c @@ -69,7 +69,7 @@ static int __devinit pmc_probe(struct platform_device *op) return 0; } -static struct of_device_id pmc_match[] = { +static struct of_device_id __initdata pmc_match[] = { { .name = PMC_OBPNAME, }, diff --git a/trunk/arch/sparc/kernel/smp_32.c b/trunk/arch/sparc/kernel/smp_32.c index 442286d83435..91c10fb70858 100644 --- a/trunk/arch/sparc/kernel/smp_32.c +++ b/trunk/arch/sparc/kernel/smp_32.c @@ -53,7 +53,6 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE; void __cpuinit smp_store_cpu_info(int id) { int cpu_node; - int mid; cpu_data(id).udelay_val = loops_per_jiffy; @@ -61,13 +60,10 @@ void __cpuinit smp_store_cpu_info(int id) cpu_data(id).clock_tick = prom_getintdefault(cpu_node, "clock-frequency", 0); cpu_data(id).prom_node = cpu_node; - mid = cpu_get_hwmid(cpu_node); + cpu_data(id).mid = cpu_get_hwmid(cpu_node); - if (mid < 0) { - printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node); - mid = 0; - } - cpu_data(id).mid = mid; + if (cpu_data(id).mid < 0) + panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); } void __init smp_cpus_done(unsigned int max_cpus) @@ -129,9 +125,7 @@ struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 }; void smp_send_reschedule(int cpu) { - /* - * XXX missing reschedule IPI, see scheduler_ipi() - */ + /* See sparc64 */ } void smp_send_stop(void) diff --git a/trunk/arch/sparc/kernel/smp_64.c b/trunk/arch/sparc/kernel/smp_64.c index 9478da7fdb3e..3e94a8c23238 100644 --- a/trunk/arch/sparc/kernel/smp_64.c +++ b/trunk/arch/sparc/kernel/smp_64.c @@ -1368,7 +1368,6 @@ void smp_send_reschedule(int cpu) void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); - scheduler_ipi(); } /* This is a nop because we capture all other cpus diff --git a/trunk/arch/sparc/kernel/time_32.c b/trunk/arch/sparc/kernel/time_32.c index 96046a4024c2..4e236391b635 100644 --- a/trunk/arch/sparc/kernel/time_32.c +++ b/trunk/arch/sparc/kernel/time_32.c @@ -168,7 +168,7 @@ static int __devinit clock_probe(struct platform_device *op) return 0; } -static struct of_device_id clock_match[] = { +static struct of_device_id __initdata clock_match[] = { { .name = "eeprom", }, diff --git a/trunk/arch/sparc/lib/checksum_32.S b/trunk/arch/sparc/lib/checksum_32.S index 0084c3361e15..3632cb34e914 100644 --- a/trunk/arch/sparc/lib/checksum_32.S +++ b/trunk/arch/sparc/lib/checksum_32.S @@ -289,16 +289,10 @@ cc_end_cruft: /* Also, handle the alignment code out of band. */ cc_dword_align: - cmp %g1, 16 - bge 1f - srl %g1, 1, %o3 -2: cmp %o3, 0 - be,a ccte + cmp %g1, 6 + bl,a ccte andcc %g1, 0xf, %o3 - andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits) - be,a 2b - srl %o3, 1, %o3 -1: andcc %o0, 0x1, %g0 + andcc %o0, 0x1, %g0 bne ccslow andcc %o0, 0x2, %g0 be 1f diff --git a/trunk/arch/tile/kernel/smp.c b/trunk/arch/tile/kernel/smp.c index c52224d5ed45..a4293102ef81 100644 --- a/trunk/arch/tile/kernel/smp.c +++ b/trunk/arch/tile/kernel/smp.c @@ -189,8 +189,12 @@ void flush_icache_range(unsigned long start, unsigned long end) /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ static irqreturn_t handle_reschedule_ipi(int irq, void *token) { + /* + * Nothing to do here; when we return from interrupt, the + * rescheduling will occur there. But do bump the interrupt + * profiler count in the meantime. + */ __get_cpu_var(irq_stat).irq_resched_count++; - scheduler_ipi(); return IRQ_HANDLED; } diff --git a/trunk/arch/um/Kconfig.um b/trunk/arch/um/Kconfig.um index b5e675e370c6..90a438acbfaf 100644 --- a/trunk/arch/um/Kconfig.um +++ b/trunk/arch/um/Kconfig.um @@ -47,7 +47,7 @@ config HOSTFS config HPPFS tristate "HoneyPot ProcFS (EXPERIMENTAL)" - depends on EXPERIMENTAL && PROC_FS + depends on EXPERIMENTAL help hppfs (HoneyPot ProcFS) is a filesystem which allows UML /proc entries to be overridden, removed, or fabricated from the host. diff --git a/trunk/arch/um/include/asm/thread_info.h b/trunk/arch/um/include/asm/thread_info.h index 5bd1bad33fab..e2cf786bda0a 100644 --- a/trunk/arch/um/include/asm/thread_info.h +++ b/trunk/arch/um/include/asm/thread_info.h @@ -49,10 +49,7 @@ static inline struct thread_info *current_thread_info(void) { struct thread_info *ti; unsigned long mask = THREAD_SIZE - 1; - void *p; - - asm volatile ("" : "=r" (p) : "0" (&ti)); - ti = (struct thread_info *) (((unsigned long)p) & ~mask); + ti = (struct thread_info *) (((unsigned long) &ti) & ~mask); return ti; } diff --git a/trunk/arch/um/kernel/smp.c b/trunk/arch/um/kernel/smp.c index eefb107d2d73..106bf27e2a9a 100644 --- a/trunk/arch/um/kernel/smp.c +++ b/trunk/arch/um/kernel/smp.c @@ -173,7 +173,7 @@ void IPI_handler(int cpu) break; case 'R': - scheduler_ipi(); + set_tsk_need_resched(current); break; case 'S': diff --git a/trunk/arch/um/os-Linux/util.c b/trunk/arch/um/os-Linux/util.c index 42827cafa6af..6ea77979531c 100644 --- a/trunk/arch/um/os-Linux/util.c +++ b/trunk/arch/um/os-Linux/util.c @@ -5,7 +5,6 @@ #include #include -#include #include #include #include @@ -76,26 +75,6 @@ void setup_hostinfo(char *buf, int len) host.release, host.version, host.machine); } -/* - * We cannot use glibc's abort(). It makes use of tgkill() which - * has no effect within UML's kernel threads. - * After that glibc would execute an invalid instruction to kill - * the calling process and UML crashes with SIGSEGV. - */ -static inline void __attribute__ ((noreturn)) uml_abort(void) -{ - sigset_t sig; - - fflush(NULL); - - if (!sigemptyset(&sig) && !sigaddset(&sig, SIGABRT)) - sigprocmask(SIG_UNBLOCK, &sig, 0); - - for (;;) - if (kill(getpid(), SIGABRT) < 0) - exit(127); -} - void os_dump_core(void) { int pid; @@ -137,5 +116,5 @@ void os_dump_core(void) while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0) os_kill_ptraced_process(pid, 0); - uml_abort(); + abort(); } diff --git a/trunk/arch/um/sys-i386/Makefile b/trunk/arch/um/sys-i386/Makefile index b1da91c1b200..804b28dd0328 100644 --- a/trunk/arch/um/sys-i386/Makefile +++ b/trunk/arch/um/sys-i386/Makefile @@ -4,7 +4,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \ ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \ - sys_call_table.o tls.o atomic64_cx8_32.o + sys_call_table.o tls.o obj-$(CONFIG_BINFMT_ELF) += elfcore.o diff --git a/trunk/arch/um/sys-i386/atomic64_cx8_32.S b/trunk/arch/um/sys-i386/atomic64_cx8_32.S deleted file mode 100644 index 1e901d3d4a95..000000000000 --- a/trunk/arch/um/sys-i386/atomic64_cx8_32.S +++ /dev/null @@ -1,225 +0,0 @@ -/* - * atomic64_t for 586+ - * - * Copied from arch/x86/lib/atomic64_cx8_32.S - * - * Copyright © 2010 Luca Barbieri - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - */ - -#include -#include -#include - -.macro SAVE reg - pushl_cfi %\reg - CFI_REL_OFFSET \reg, 0 -.endm - -.macro RESTORE reg - popl_cfi %\reg - CFI_RESTORE \reg -.endm - -.macro read64 reg - movl %ebx, %eax - movl %ecx, %edx -/* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */ - LOCK_PREFIX - cmpxchg8b (\reg) -.endm - -ENTRY(atomic64_read_cx8) - CFI_STARTPROC - - read64 %ecx - ret - CFI_ENDPROC -ENDPROC(atomic64_read_cx8) - -ENTRY(atomic64_set_cx8) - CFI_STARTPROC - -1: -/* we don't need LOCK_PREFIX since aligned 64-bit writes - * are atomic on 586 and newer */ - cmpxchg8b (%esi) - jne 1b - - ret - CFI_ENDPROC -ENDPROC(atomic64_set_cx8) - -ENTRY(atomic64_xchg_cx8) - CFI_STARTPROC - - movl %ebx, %eax - movl %ecx, %edx -1: - LOCK_PREFIX - cmpxchg8b (%esi) - jne 1b - - ret - CFI_ENDPROC -ENDPROC(atomic64_xchg_cx8) - -.macro addsub_return func ins insc -ENTRY(atomic64_\func\()_return_cx8) - CFI_STARTPROC - SAVE ebp - SAVE ebx - SAVE esi - SAVE edi - - movl %eax, %esi - movl %edx, %edi - movl %ecx, %ebp - - read64 %ebp -1: - movl %eax, %ebx - movl %edx, %ecx - \ins\()l %esi, %ebx - \insc\()l %edi, %ecx - LOCK_PREFIX - cmpxchg8b (%ebp) - jne 1b - -10: - movl %ebx, %eax - movl %ecx, %edx - RESTORE edi - RESTORE esi - RESTORE ebx - RESTORE ebp - ret - CFI_ENDPROC -ENDPROC(atomic64_\func\()_return_cx8) -.endm - -addsub_return add add adc -addsub_return sub sub sbb - -.macro incdec_return func ins insc -ENTRY(atomic64_\func\()_return_cx8) - CFI_STARTPROC - SAVE ebx - - read64 %esi -1: - movl %eax, %ebx - movl %edx, %ecx - \ins\()l $1, %ebx - \insc\()l $0, %ecx - LOCK_PREFIX - cmpxchg8b (%esi) - jne 1b - -10: - movl %ebx, %eax - movl %ecx, %edx - RESTORE ebx - ret - CFI_ENDPROC -ENDPROC(atomic64_\func\()_return_cx8) -.endm - -incdec_return inc add adc -incdec_return dec sub sbb - -ENTRY(atomic64_dec_if_positive_cx8) - CFI_STARTPROC - SAVE ebx - - read64 %esi -1: - movl %eax, %ebx - movl %edx, %ecx - subl $1, %ebx - sbb $0, %ecx - js 2f - LOCK_PREFIX - cmpxchg8b (%esi) - jne 1b - -2: - movl %ebx, %eax - movl %ecx, %edx - RESTORE ebx - ret - CFI_ENDPROC -ENDPROC(atomic64_dec_if_positive_cx8) - -ENTRY(atomic64_add_unless_cx8) - CFI_STARTPROC - SAVE ebp - SAVE ebx -/* these just push these two parameters on the stack */ - SAVE edi - SAVE esi - - movl %ecx, %ebp - movl %eax, %esi - movl %edx, %edi - - read64 %ebp -1: - cmpl %eax, 0(%esp) - je 4f -2: - movl %eax, %ebx - movl %edx, %ecx - addl %esi, %ebx - adcl %edi, %ecx - LOCK_PREFIX - cmpxchg8b (%ebp) - jne 1b - - movl $1, %eax -3: - addl $8, %esp - CFI_ADJUST_CFA_OFFSET -8 - RESTORE ebx - RESTORE ebp - ret -4: - cmpl %edx, 4(%esp) - jne 2b - xorl %eax, %eax - jmp 3b - CFI_ENDPROC -ENDPROC(atomic64_add_unless_cx8) - -ENTRY(atomic64_inc_not_zero_cx8) - CFI_STARTPROC - SAVE ebx - - read64 %esi -1: - testl %eax, %eax - je 4f -2: - movl %eax, %ebx - movl %edx, %ecx - addl $1, %ebx - adcl $0, %ecx - LOCK_PREFIX - cmpxchg8b (%esi) - jne 1b - - movl $1, %eax -3: - RESTORE ebx - ret -4: - testl %edx, %edx - jne 2b - jmp 3b - CFI_ENDPROC -ENDPROC(atomic64_inc_not_zero_cx8) diff --git a/trunk/arch/unicore32/kernel/irq.c b/trunk/arch/unicore32/kernel/irq.c index d4efa7d679ff..2aa30a364bbe 100644 --- a/trunk/arch/unicore32/kernel/irq.c +++ b/trunk/arch/unicore32/kernel/irq.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include @@ -237,7 +237,7 @@ static struct puv3_irq_state { unsigned int iccr; } puv3_irq_state; -static int puv3_irq_suspend(void) +static int puv3_irq_suspend(struct sys_device *dev, pm_message_t state) { struct puv3_irq_state *st = &puv3_irq_state; @@ -265,7 +265,7 @@ static int puv3_irq_suspend(void) return 0; } -static void puv3_irq_resume(void) +static int puv3_irq_resume(struct sys_device *dev) { struct puv3_irq_state *st = &puv3_irq_state; @@ -278,20 +278,27 @@ static void puv3_irq_resume(void) writel(st->icmr, INTC_ICMR); } + return 0; } -static struct syscore_ops puv3_irq_syscore_ops = { +static struct sysdev_class puv3_irq_sysclass = { + .name = "pkunity-irq", .suspend = puv3_irq_suspend, .resume = puv3_irq_resume, }; -static int __init puv3_irq_init_syscore(void) +static struct sys_device puv3_irq_device = { + .id = 0, + .cls = &puv3_irq_sysclass, +}; + +static int __init puv3_irq_init_devicefs(void) { - register_syscore_ops(&puv3_irq_syscore_ops); - return 0; + sysdev_class_register(&puv3_irq_sysclass); + return sysdev_register(&puv3_irq_device); } -device_initcall(puv3_irq_init_syscore); +device_initcall(puv3_irq_init_devicefs); void __init init_IRQ(void) { diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index 7a0ff52c73fa..cc6c53a95bfd 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -8,7 +8,6 @@ config 64BIT config X86_32 def_bool !64BIT - select CLKSRC_I8253 config X86_64 def_bool 64BIT @@ -72,6 +71,7 @@ config X86 select GENERIC_IRQ_SHOW select IRQ_FORCED_THREADING select USE_GENERIC_SMP_HELPERS if SMP + select ARCH_NO_SYSDEV_OPS config INSTRUCTION_DECODER def_bool (KPROBES || PERF_EVENTS) @@ -690,7 +690,6 @@ config AMD_IOMMU bool "AMD IOMMU support" select SWIOTLB select PCI_MSI - select PCI_IOV depends on X86_64 && PCI && ACPI ---help--- With this option you can enable support for AMD IOMMU hardware in @@ -1849,7 +1848,7 @@ config APM_ALLOW_INTS endif # APM -source "drivers/cpufreq/Kconfig" +source "arch/x86/kernel/cpu/cpufreq/Kconfig" source "drivers/cpuidle/Kconfig" diff --git a/trunk/arch/x86/boot/memory.c b/trunk/arch/x86/boot/memory.c index db75d07c3645..cae3feb1035e 100644 --- a/trunk/arch/x86/boot/memory.c +++ b/trunk/arch/x86/boot/memory.c @@ -91,7 +91,7 @@ static int detect_memory_e801(void) if (oreg.ax > 15*1024) { return -1; /* Bogus! */ } else if (oreg.ax == 15*1024) { - boot_params.alt_mem_k = (oreg.bx << 6) + oreg.ax; + boot_params.alt_mem_k = (oreg.dx << 6) + oreg.ax; } else { /* * This ignores memory above 16MB if we have a memory diff --git a/trunk/arch/x86/include/asm/alternative-asm.h b/trunk/arch/x86/include/asm/alternative-asm.h index 94d420b360d1..a63a68be1cce 100644 --- a/trunk/arch/x86/include/asm/alternative-asm.h +++ b/trunk/arch/x86/include/asm/alternative-asm.h @@ -15,13 +15,4 @@ .endm #endif -.macro altinstruction_entry orig alt feature orig_len alt_len - .align 8 - .quad \orig - .quad \alt - .word \feature - .byte \orig_len - .byte \alt_len -.endm - #endif /* __ASSEMBLY__ */ diff --git a/trunk/arch/x86/include/asm/alternative.h b/trunk/arch/x86/include/asm/alternative.h index 8cdd1e247975..13009d1af99a 100644 --- a/trunk/arch/x86/include/asm/alternative.h +++ b/trunk/arch/x86/include/asm/alternative.h @@ -4,6 +4,7 @@ #include #include #include +#include #include /* @@ -190,7 +191,7 @@ extern void *text_poke(void *addr, const void *opcode, size_t len); extern void *text_poke_smp(void *addr, const void *opcode, size_t len); extern void text_poke_smp_batch(struct text_poke_param *params, int n); -#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) +#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) #define IDEAL_NOP_SIZE_5 5 extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; extern void arch_init_ideal_nop5(void); diff --git a/trunk/arch/x86/include/asm/amd_iommu_proto.h b/trunk/arch/x86/include/asm/amd_iommu_proto.h index 55d95eb789b3..916bc8111a01 100644 --- a/trunk/arch/x86/include/asm/amd_iommu_proto.h +++ b/trunk/arch/x86/include/asm/amd_iommu_proto.h @@ -19,12 +19,13 @@ #ifndef _ASM_X86_AMD_IOMMU_PROTO_H #define _ASM_X86_AMD_IOMMU_PROTO_H -#include +struct amd_iommu; extern int amd_iommu_init_dma_ops(void); extern int amd_iommu_init_passthrough(void); -extern irqreturn_t amd_iommu_int_thread(int irq, void *data); extern irqreturn_t amd_iommu_int_handler(int irq, void *data); +extern void amd_iommu_flush_all_domains(void); +extern void amd_iommu_flush_all_devices(void); extern void amd_iommu_apply_erratum_63(u16 devid); extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); extern int amd_iommu_init_devices(void); @@ -43,12 +44,4 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev) (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); } -static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) -{ - if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) - return false; - - return !!(iommu->features & f); -} - #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ diff --git a/trunk/arch/x86/include/asm/amd_iommu_types.h b/trunk/arch/x86/include/asm/amd_iommu_types.h index 4c9982995414..e3509fc303bf 100644 --- a/trunk/arch/x86/include/asm/amd_iommu_types.h +++ b/trunk/arch/x86/include/asm/amd_iommu_types.h @@ -68,25 +68,12 @@ #define MMIO_CONTROL_OFFSET 0x0018 #define MMIO_EXCL_BASE_OFFSET 0x0020 #define MMIO_EXCL_LIMIT_OFFSET 0x0028 -#define MMIO_EXT_FEATURES 0x0030 #define MMIO_CMD_HEAD_OFFSET 0x2000 #define MMIO_CMD_TAIL_OFFSET 0x2008 #define MMIO_EVT_HEAD_OFFSET 0x2010 #define MMIO_EVT_TAIL_OFFSET 0x2018 #define MMIO_STATUS_OFFSET 0x2020 - -/* Extended Feature Bits */ -#define FEATURE_PREFETCH (1ULL<<0) -#define FEATURE_PPR (1ULL<<1) -#define FEATURE_X2APIC (1ULL<<2) -#define FEATURE_NX (1ULL<<3) -#define FEATURE_GT (1ULL<<4) -#define FEATURE_IA (1ULL<<6) -#define FEATURE_GA (1ULL<<7) -#define FEATURE_HE (1ULL<<8) -#define FEATURE_PC (1ULL<<9) - /* MMIO status bits */ #define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 @@ -126,9 +113,7 @@ /* command specific defines */ #define CMD_COMPL_WAIT 0x01 #define CMD_INV_DEV_ENTRY 0x02 -#define CMD_INV_IOMMU_PAGES 0x03 -#define CMD_INV_IOTLB_PAGES 0x04 -#define CMD_INV_ALL 0x08 +#define CMD_INV_IOMMU_PAGES 0x03 #define CMD_COMPL_WAIT_STORE_MASK 0x01 #define CMD_COMPL_WAIT_INT_MASK 0x02 @@ -230,8 +215,6 @@ #define IOMMU_PTE_IR (1ULL << 61) #define IOMMU_PTE_IW (1ULL << 62) -#define DTE_FLAG_IOTLB 0x01 - #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) @@ -244,7 +227,6 @@ /* IOMMU capabilities */ #define IOMMU_CAP_IOTLB 24 #define IOMMU_CAP_NPCACHE 26 -#define IOMMU_CAP_EFR 27 #define MAX_DOMAIN_ID 65536 @@ -267,8 +249,6 @@ extern bool amd_iommu_dump; /* global flag if IOMMUs cache non-present entries */ extern bool amd_iommu_np_cache; -/* Only true if all IOMMUs support device IOTLBs */ -extern bool amd_iommu_iotlb_sup; /* * Make iterating over all IOMMUs easier @@ -391,9 +371,6 @@ struct amd_iommu { /* flags read from acpi table */ u8 acpi_flags; - /* Extended features */ - u64 features; - /* * Capability pointer. There could be more than one IOMMU per PCI * device function if there are more than one AMD IOMMU capability @@ -432,6 +409,9 @@ struct amd_iommu { /* if one, we need to send a completion wait command */ bool need_sync; + /* becomes true if a command buffer reset is running */ + bool reset_in_progress; + /* default dma_ops domain for that IOMMU */ struct dma_ops_domain *default_dom; diff --git a/trunk/arch/x86/include/asm/apicdef.h b/trunk/arch/x86/include/asm/apicdef.h index 34595d5e1038..d87988bacf3e 100644 --- a/trunk/arch/x86/include/asm/apicdef.h +++ b/trunk/arch/x86/include/asm/apicdef.h @@ -78,7 +78,6 @@ #define APIC_DEST_LOGICAL 0x00800 #define APIC_DEST_PHYSICAL 0x00000 #define APIC_DM_FIXED 0x00000 -#define APIC_DM_FIXED_MASK 0x00700 #define APIC_DM_LOWEST 0x00100 #define APIC_DM_SMI 0x00200 #define APIC_DM_REMRD 0x00300 diff --git a/trunk/arch/x86/include/asm/cpufeature.h b/trunk/arch/x86/include/asm/cpufeature.h index 7f2f7b123293..91f3e087cf21 100644 --- a/trunk/arch/x86/include/asm/cpufeature.h +++ b/trunk/arch/x86/include/asm/cpufeature.h @@ -195,7 +195,6 @@ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ -#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ #if defined(__KERNEL__) && !defined(__ASSEMBLY__) diff --git a/trunk/arch/x86/include/asm/ftrace.h b/trunk/arch/x86/include/asm/ftrace.h index 268c783ab1c0..db24c2278be0 100644 --- a/trunk/arch/x86/include/asm/ftrace.h +++ b/trunk/arch/x86/include/asm/ftrace.h @@ -38,10 +38,11 @@ extern void mcount(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { /* - * addr is the address of the mcount call instruction. - * recordmcount does the necessary offset calculation. + * call mcount is "e8 <4 byte offset>" + * The addr points to the 4 byte offset and the caller of this + * function wants the pointer to e8. Simply subtract one. */ - return addr; + return addr - 1; } #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/trunk/arch/x86/include/asm/i8253.h b/trunk/arch/x86/include/asm/i8253.h index 65aaa91d5850..fc1f579fb965 100644 --- a/trunk/arch/x86/include/asm/i8253.h +++ b/trunk/arch/x86/include/asm/i8253.h @@ -6,8 +6,6 @@ #define PIT_CH0 0x40 #define PIT_CH2 0x42 -#define PIT_LATCH LATCH - extern raw_spinlock_t i8253_lock; extern struct clock_event_device *global_clock_event; diff --git a/trunk/arch/x86/include/asm/io_apic.h b/trunk/arch/x86/include/asm/io_apic.h index a97a240f67f3..c4bd267dfc50 100644 --- a/trunk/arch/x86/include/asm/io_apic.h +++ b/trunk/arch/x86/include/asm/io_apic.h @@ -150,7 +150,7 @@ void setup_IO_APIC_irq_extra(u32 gsi); extern void ioapic_and_gsi_init(void); extern void ioapic_insert_resources(void); -int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr); +int io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr); extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); diff --git a/trunk/arch/x86/include/asm/jump_label.h b/trunk/arch/x86/include/asm/jump_label.h index a32b18ce6ead..574dbc22893a 100644 --- a/trunk/arch/x86/include/asm/jump_label.h +++ b/trunk/arch/x86/include/asm/jump_label.h @@ -5,25 +5,20 @@ #include #include -#include #define JUMP_LABEL_NOP_SIZE 5 -#define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" - -static __always_inline bool arch_static_branch(struct jump_label_key *key) -{ - asm goto("1:" - JUMP_LABEL_INITIAL_NOP - ".pushsection __jump_table, \"aw\" \n\t" - _ASM_ALIGN "\n\t" - _ASM_PTR "1b, %l[l_yes], %c0 \n\t" - ".popsection \n\t" - : : "i" (key) : : l_yes); - return false; -l_yes: - return true; -} +# define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" + +# define JUMP_LABEL(key, label) \ + do { \ + asm goto("1:" \ + JUMP_LABEL_INITIAL_NOP \ + ".pushsection __jump_table, \"aw\" \n\t"\ + _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ + ".popsection \n\t" \ + : : "i" (key) : : label); \ + } while (0) #endif /* __KERNEL__ */ diff --git a/trunk/arch/x86/include/asm/pgtable_types.h b/trunk/arch/x86/include/asm/pgtable_types.h index d56187c6b838..7db7723d1f32 100644 --- a/trunk/arch/x86/include/asm/pgtable_types.h +++ b/trunk/arch/x86/include/asm/pgtable_types.h @@ -299,7 +299,6 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, /* Install a pte for a particular vaddr in kernel space. */ void set_pte_vaddr(unsigned long vaddr, pte_t pte); -extern void native_pagetable_reserve(u64 start, u64 end); #ifdef CONFIG_X86_32 extern void native_pagetable_setup_start(pgd_t *base); extern void native_pagetable_setup_done(pgd_t *base); diff --git a/trunk/arch/x86/include/asm/setup.h b/trunk/arch/x86/include/asm/setup.h index 647d8a06ce4f..db8aa19a08a2 100644 --- a/trunk/arch/x86/include/asm/setup.h +++ b/trunk/arch/x86/include/asm/setup.h @@ -88,7 +88,7 @@ void *extend_brk(size_t size, size_t align); * executable.) */ #define RESERVE_BRK(name,sz) \ - static void __section(.discard.text) __used notrace \ + static void __section(.discard.text) __used \ __brk_reservation_fn_##name##__(void) { \ asm volatile ( \ ".pushsection .brk_reservation,\"aw\",@nobits;" \ diff --git a/trunk/arch/x86/include/asm/stacktrace.h b/trunk/arch/x86/include/asm/stacktrace.h index 70bbe39043a9..d7e89c83645d 100644 --- a/trunk/arch/x86/include/asm/stacktrace.h +++ b/trunk/arch/x86/include/asm/stacktrace.h @@ -37,6 +37,9 @@ print_context_stack_bp(struct thread_info *tinfo, /* Generic stack tracer with callbacks */ struct stacktrace_ops { + void (*warning)(void *data, char *msg); + /* msg must contain %s for the symbol */ + void (*warning_symbol)(void *data, char *msg, unsigned long symbol); void (*address)(void *data, unsigned long address, int reliable); /* On negative return stop dumping */ int (*stack)(void *data, char *name); diff --git a/trunk/arch/x86/include/asm/uaccess.h b/trunk/arch/x86/include/asm/uaccess.h index 99f0ad753f32..abd3e0ea762a 100644 --- a/trunk/arch/x86/include/asm/uaccess.h +++ b/trunk/arch/x86/include/asm/uaccess.h @@ -42,7 +42,7 @@ * Returns 0 if the range is valid, nonzero otherwise. * * This is equivalent to the following test: - * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64) + * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64) * * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... */ diff --git a/trunk/arch/x86/include/asm/uv/uv_bau.h b/trunk/arch/x86/include/asm/uv/uv_bau.h index 130f1eeee5fe..3e094af443c3 100644 --- a/trunk/arch/x86/include/asm/uv/uv_bau.h +++ b/trunk/arch/x86/include/asm/uv/uv_bau.h @@ -94,8 +94,6 @@ /* after this # consecutive successes, bump up the throttle if it was lowered */ #define COMPLETE_THRESHOLD 5 -#define UV_LB_SUBNODEID 0x10 - /* * number of entries in the destination side payload queue */ @@ -126,7 +124,7 @@ * The distribution specification (32 bytes) is interpreted as a 256-bit * distribution vector. Adjacent bits correspond to consecutive even numbered * nodeIDs. The result of adding the index of a given bit to the 15-bit - * 'base_dest_nasid' field of the header corresponds to the + * 'base_dest_nodeid' field of the header corresponds to the * destination nodeID associated with that specified bit. */ struct bau_target_uvhubmask { @@ -178,7 +176,7 @@ struct bau_msg_payload { struct bau_msg_header { unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ /* bits 5:0 */ - unsigned int base_dest_nasid:15; /* nasid of the */ + unsigned int base_dest_nodeid:15; /* nasid of the */ /* bits 20:6 */ /* first bit in uvhub map */ unsigned int command:8; /* message type */ /* bits 28:21 */ @@ -380,10 +378,6 @@ struct ptc_stats { unsigned long d_rcanceled; /* number of messages canceled by resets */ }; -struct hub_and_pnode { - short uvhub; - short pnode; -}; /* * one per-cpu; to locate the software tables */ @@ -405,12 +399,10 @@ struct bau_control { int baudisabled; int set_bau_off; short cpu; - short osnode; short uvhub_cpu; short uvhub; short cpus_in_socket; short cpus_in_uvhub; - short partition_base_pnode; unsigned short message_number; unsigned short uvhub_quiesce; short socket_acknowledge_count[DEST_Q_SIZE]; @@ -430,16 +422,15 @@ struct bau_control { int congested_period; cycles_t period_time; long period_requests; - struct hub_and_pnode *target_hub_and_pnode; }; static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) { return constant_test_bit(uvhub, &dstp->bits[0]); } -static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp) +static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp) { - __set_bit(pnode, &dstp->bits[0]); + __set_bit(uvhub, &dstp->bits[0]); } static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp, int nbits) diff --git a/trunk/arch/x86/include/asm/uv/uv_hub.h b/trunk/arch/x86/include/asm/uv/uv_hub.h index 4298002d0c83..a501741c2335 100644 --- a/trunk/arch/x86/include/asm/uv/uv_hub.h +++ b/trunk/arch/x86/include/asm/uv/uv_hub.h @@ -398,8 +398,6 @@ struct uv_blade_info { unsigned short nr_online_cpus; unsigned short pnode; short memory_nid; - spinlock_t nmi_lock; - unsigned long nmi_count; }; extern struct uv_blade_info *uv_blade_info; extern short *uv_node_to_blade; diff --git a/trunk/arch/x86/include/asm/uv/uv_mmrs.h b/trunk/arch/x86/include/asm/uv/uv_mmrs.h index f5bb64a823d7..20cafeac7455 100644 --- a/trunk/arch/x86/include/asm/uv/uv_mmrs.h +++ b/trunk/arch/x86/include/asm/uv/uv_mmrs.h @@ -5,7 +5,7 @@ * * SGI UV MMR definitions * - * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. */ #ifndef _ASM_X86_UV_UV_MMRS_H @@ -1099,19 +1099,5 @@ union uvh_rtc1_int_config_u { } s; }; -/* ========================================================================= */ -/* UVH_SCRATCH5 */ -/* ========================================================================= */ -#define UVH_SCRATCH5 0x2d0200UL -#define UVH_SCRATCH5_32 0x00778 - -#define UVH_SCRATCH5_SCRATCH5_SHFT 0 -#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL -union uvh_scratch5_u { - unsigned long v; - struct uvh_scratch5_s { - unsigned long scratch5 : 64; /* RW, W1CS */ - } s; -}; #endif /* __ASM_UV_MMRS_X86_H__ */ diff --git a/trunk/arch/x86/include/asm/x86_init.h b/trunk/arch/x86/include/asm/x86_init.h index d3d859035af9..643ebf2e2ad8 100644 --- a/trunk/arch/x86/include/asm/x86_init.h +++ b/trunk/arch/x86/include/asm/x86_init.h @@ -67,17 +67,6 @@ struct x86_init_oem { void (*banner)(void); }; -/** - * struct x86_init_mapping - platform specific initial kernel pagetable setup - * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage - * - * For more details on the purpose of this hook, look in - * init_memory_mapping and the commit that added it. - */ -struct x86_init_mapping { - void (*pagetable_reserve)(u64 start, u64 end); -}; - /** * struct x86_init_paging - platform specific paging functions * @pagetable_setup_start: platform specific pre paging_init() call @@ -134,7 +123,6 @@ struct x86_init_ops { struct x86_init_mpparse mpparse; struct x86_init_irqs irqs; struct x86_init_oem oem; - struct x86_init_mapping mapping; struct x86_init_paging paging; struct x86_init_timers timers; struct x86_init_iommu iommu; diff --git a/trunk/arch/x86/include/asm/xen/page.h b/trunk/arch/x86/include/asm/xen/page.h index 64a619d47d34..c61934fbf22a 100644 --- a/trunk/arch/x86/include/asm/xen/page.h +++ b/trunk/arch/x86/include/asm/xen/page.h @@ -47,9 +47,8 @@ extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); extern unsigned long set_phys_range_identity(unsigned long pfn_s, unsigned long pfn_e); -extern int m2p_add_override(unsigned long mfn, struct page *page, - bool clear_pte); -extern int m2p_remove_override(struct page *page, bool clear_pte); +extern int m2p_add_override(unsigned long mfn, struct page *page); +extern int m2p_remove_override(struct page *page); extern struct page *m2p_find_override(unsigned long mfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); diff --git a/trunk/arch/x86/include/asm/xen/pci.h b/trunk/arch/x86/include/asm/xen/pci.h index 4fbda9a3f339..aa8620989162 100644 --- a/trunk/arch/x86/include/asm/xen/pci.h +++ b/trunk/arch/x86/include/asm/xen/pci.h @@ -15,26 +15,10 @@ static inline int pci_xen_hvm_init(void) #endif #if defined(CONFIG_XEN_DOM0) void __init xen_setup_pirqs(void); -int xen_find_device_domain_owner(struct pci_dev *dev); -int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain); -int xen_unregister_device_domain_owner(struct pci_dev *dev); #else static inline void __init xen_setup_pirqs(void) { } -static inline int xen_find_device_domain_owner(struct pci_dev *dev) -{ - return -1; -} -static inline int xen_register_device_domain_owner(struct pci_dev *dev, - uint16_t domain) -{ - return -1; -} -static inline int xen_unregister_device_domain_owner(struct pci_dev *dev) -{ - return -1; -} #endif #if defined(CONFIG_PCI_MSI) diff --git a/trunk/arch/x86/kernel/Makefile b/trunk/arch/x86/kernel/Makefile index 97ebf82e0b7f..7338ef2218bc 100644 --- a/trunk/arch/x86/kernel/Makefile +++ b/trunk/arch/x86/kernel/Makefile @@ -117,7 +117,7 @@ obj-$(CONFIG_OF) += devicetree.o ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_AUDIT) += audit_64.o - obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o + obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o diff --git a/trunk/arch/x86/kernel/acpi/sleep.c b/trunk/arch/x86/kernel/acpi/sleep.c index 18a857ba7a25..ff93bc1b09c3 100644 --- a/trunk/arch/x86/kernel/acpi/sleep.c +++ b/trunk/arch/x86/kernel/acpi/sleep.c @@ -112,6 +112,11 @@ static int __init acpi_sleep_setup(char *str) #ifdef CONFIG_HIBERNATION if (strncmp(str, "s4_nohwsig", 10) == 0) acpi_no_s4_hw_signature(); + if (strncmp(str, "s4_nonvs", 8) == 0) { + pr_warning("ACPI: acpi_sleep=s4_nonvs is deprecated, " + "please use acpi_sleep=nonvs instead"); + acpi_nvs_nosave(); + } #endif if (strncmp(str, "nonvs", 5) == 0) acpi_nvs_nosave(); diff --git a/trunk/arch/x86/kernel/alternative.c b/trunk/arch/x86/kernel/alternative.c index 1eeeafcb4410..4a234677e213 100644 --- a/trunk/arch/x86/kernel/alternative.c +++ b/trunk/arch/x86/kernel/alternative.c @@ -210,15 +210,6 @@ void __init_or_module apply_alternatives(struct alt_instr *start, u8 insnbuf[MAX_PATCH_LEN]; DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); - /* - * The scan order should be from start to end. A later scanned - * alternative code can overwrite a previous scanned alternative code. - * Some kernel functions (e.g. memcpy, memset, etc) use this order to - * patch code. - * - * So be careful if you want to change the scan order to any other - * order. - */ for (a = start; a < end; a++) { u8 *instr = a->instr; BUG_ON(a->replacementlen > a->instrlen); @@ -688,7 +679,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); } -#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) +#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) #ifdef CONFIG_X86_64 unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; diff --git a/trunk/arch/x86/kernel/amd_iommu.c b/trunk/arch/x86/kernel/amd_iommu.c index 873e7e1ead7b..57ca77787220 100644 --- a/trunk/arch/x86/kernel/amd_iommu.c +++ b/trunk/arch/x86/kernel/amd_iommu.c @@ -18,7 +18,6 @@ */ #include -#include #include #include #include @@ -26,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -36,7 +34,7 @@ #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) -#define LOOP_TIMEOUT 100000 +#define EXIT_LOOP_COUNT 10000000 static DEFINE_RWLOCK(amd_iommu_devtable_lock); @@ -59,6 +57,7 @@ struct iommu_cmd { u32 data[4]; }; +static void reset_iommu_command_buffer(struct amd_iommu *iommu); static void update_domain(struct protection_domain *domain); /**************************************************************************** @@ -323,6 +322,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) break; case EVENT_TYPE_ILL_CMD: printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); + iommu->reset_in_progress = true; + reset_iommu_command_buffer(iommu); dump_command(address); break; case EVENT_TYPE_CMD_HARD_ERR: @@ -366,7 +367,7 @@ static void iommu_poll_events(struct amd_iommu *iommu) spin_unlock_irqrestore(&iommu->lock, flags); } -irqreturn_t amd_iommu_int_thread(int irq, void *data) +irqreturn_t amd_iommu_int_handler(int irq, void *data) { struct amd_iommu *iommu; @@ -376,300 +377,192 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) return IRQ_HANDLED; } -irqreturn_t amd_iommu_int_handler(int irq, void *data) -{ - return IRQ_WAKE_THREAD; -} - /**************************************************************************** * * IOMMU command queuing functions * ****************************************************************************/ -static int wait_on_sem(volatile u64 *sem) -{ - int i = 0; - - while (*sem == 0 && i < LOOP_TIMEOUT) { - udelay(1); - i += 1; - } - - if (i == LOOP_TIMEOUT) { - pr_alert("AMD-Vi: Completion-Wait loop timed out\n"); - return -EIO; - } - - return 0; -} - -static void copy_cmd_to_buffer(struct amd_iommu *iommu, - struct iommu_cmd *cmd, - u32 tail) +/* + * Writes the command to the IOMMUs command buffer and informs the + * hardware about the new command. Must be called with iommu->lock held. + */ +static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) { + u32 tail, head; u8 *target; + WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); + tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); target = iommu->cmd_buf + tail; - tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; - - /* Copy command to buffer */ - memcpy(target, cmd, sizeof(*cmd)); - - /* Tell the IOMMU about it */ + memcpy_toio(target, cmd, sizeof(*cmd)); + tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; + head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); + if (tail == head) + return -ENOMEM; writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); -} - -static void build_completion_wait(struct iommu_cmd *cmd, u64 address) -{ - WARN_ON(address & 0x7ULL); - - memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK; - cmd->data[1] = upper_32_bits(__pa(address)); - cmd->data[2] = 1; - CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); -} - -static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) -{ - memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = devid; - CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY); -} - -static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, - size_t size, u16 domid, int pde) -{ - u64 pages; - int s; - - pages = iommu_num_pages(address, size, PAGE_SIZE); - s = 0; - - if (pages > 1) { - /* - * If we have to flush more than one page, flush all - * TLB entries for this domain - */ - address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - s = 1; - } - - address &= PAGE_MASK; - - memset(cmd, 0, sizeof(*cmd)); - cmd->data[1] |= domid; - cmd->data[2] = lower_32_bits(address); - cmd->data[3] = upper_32_bits(address); - CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); - if (s) /* size bit - we flush more than one 4kb page */ - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; - if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ - cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; -} - -static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, - u64 address, size_t size) -{ - u64 pages; - int s; - - pages = iommu_num_pages(address, size, PAGE_SIZE); - s = 0; - - if (pages > 1) { - /* - * If we have to flush more than one page, flush all - * TLB entries for this domain - */ - address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - s = 1; - } - - address &= PAGE_MASK; - memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = devid; - cmd->data[0] |= (qdep & 0xff) << 24; - cmd->data[1] = devid; - cmd->data[2] = lower_32_bits(address); - cmd->data[3] = upper_32_bits(address); - CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); - if (s) - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; -} - -static void build_inv_all(struct iommu_cmd *cmd) -{ - memset(cmd, 0, sizeof(*cmd)); - CMD_SET_TYPE(cmd, CMD_INV_ALL); + return 0; } /* - * Writes the command to the IOMMUs command buffer and informs the - * hardware about the new command. + * General queuing function for commands. Takes iommu->lock and calls + * __iommu_queue_command(). */ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) { - u32 left, tail, head, next_tail; unsigned long flags; + int ret; - WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); - -again: spin_lock_irqsave(&iommu->lock, flags); + ret = __iommu_queue_command(iommu, cmd); + if (!ret) + iommu->need_sync = true; + spin_unlock_irqrestore(&iommu->lock, flags); - head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); - tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); - next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; - left = (head - next_tail) % iommu->cmd_buf_size; - - if (left <= 2) { - struct iommu_cmd sync_cmd; - volatile u64 sem = 0; - int ret; - - build_completion_wait(&sync_cmd, (u64)&sem); - copy_cmd_to_buffer(iommu, &sync_cmd, tail); + return ret; +} - spin_unlock_irqrestore(&iommu->lock, flags); +/* + * This function waits until an IOMMU has completed a completion + * wait command + */ +static void __iommu_wait_for_completion(struct amd_iommu *iommu) +{ + int ready = 0; + unsigned status = 0; + unsigned long i = 0; - if ((ret = wait_on_sem(&sem)) != 0) - return ret; + INC_STATS_COUNTER(compl_wait); - goto again; + while (!ready && (i < EXIT_LOOP_COUNT)) { + ++i; + /* wait for the bit to become one */ + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + ready = status & MMIO_STATUS_COM_WAIT_INT_MASK; } - copy_cmd_to_buffer(iommu, cmd, tail); - - /* We need to sync now to make sure all commands are processed */ - iommu->need_sync = true; - - spin_unlock_irqrestore(&iommu->lock, flags); + /* set bit back to zero */ + status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; + writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); - return 0; + if (unlikely(i == EXIT_LOOP_COUNT)) + iommu->reset_in_progress = true; } /* * This function queues a completion wait command into the command * buffer of an IOMMU */ -static int iommu_completion_wait(struct amd_iommu *iommu) +static int __iommu_completion_wait(struct amd_iommu *iommu) { struct iommu_cmd cmd; - volatile u64 sem = 0; - int ret; - if (!iommu->need_sync) - return 0; + memset(&cmd, 0, sizeof(cmd)); + cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; + CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); - build_completion_wait(&cmd, (u64)&sem); - - ret = iommu_queue_command(iommu, &cmd); - if (ret) - return ret; - - return wait_on_sem(&sem); + return __iommu_queue_command(iommu, &cmd); } -static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) +/* + * This function is called whenever we need to ensure that the IOMMU has + * completed execution of all commands we sent. It sends a + * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs + * us about that by writing a value to a physical address we pass with + * the command. + */ +static int iommu_completion_wait(struct amd_iommu *iommu) { - struct iommu_cmd cmd; + int ret = 0; + unsigned long flags; - build_inv_dte(&cmd, devid); + spin_lock_irqsave(&iommu->lock, flags); - return iommu_queue_command(iommu, &cmd); -} + if (!iommu->need_sync) + goto out; -static void iommu_flush_dte_all(struct amd_iommu *iommu) -{ - u32 devid; + ret = __iommu_completion_wait(iommu); - for (devid = 0; devid <= 0xffff; ++devid) - iommu_flush_dte(iommu, devid); + iommu->need_sync = false; - iommu_completion_wait(iommu); -} + if (ret) + goto out; -/* - * This function uses heavy locking and may disable irqs for some time. But - * this is no issue because it is only called during resume. - */ -static void iommu_flush_tlb_all(struct amd_iommu *iommu) -{ - u32 dom_id; + __iommu_wait_for_completion(iommu); - for (dom_id = 0; dom_id <= 0xffff; ++dom_id) { - struct iommu_cmd cmd; - build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, - dom_id, 1); - iommu_queue_command(iommu, &cmd); - } +out: + spin_unlock_irqrestore(&iommu->lock, flags); - iommu_completion_wait(iommu); + if (iommu->reset_in_progress) + reset_iommu_command_buffer(iommu); + + return 0; } -static void iommu_flush_all(struct amd_iommu *iommu) +static void iommu_flush_complete(struct protection_domain *domain) { - struct iommu_cmd cmd; - - build_inv_all(&cmd); + int i; - iommu_queue_command(iommu, &cmd); - iommu_completion_wait(iommu); -} + for (i = 0; i < amd_iommus_present; ++i) { + if (!domain->dev_iommu[i]) + continue; -void iommu_flush_all_caches(struct amd_iommu *iommu) -{ - if (iommu_feature(iommu, FEATURE_IA)) { - iommu_flush_all(iommu); - } else { - iommu_flush_dte_all(iommu); - iommu_flush_tlb_all(iommu); + /* + * Devices of this domain are behind this IOMMU + * We need to wait for completion of all commands. + */ + iommu_completion_wait(amd_iommus[i]); } } /* - * Command send function for flushing on-device TLB + * Command send function for invalidating a device table entry */ -static int device_flush_iotlb(struct device *dev, u64 address, size_t size) +static int iommu_flush_device(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); struct amd_iommu *iommu; struct iommu_cmd cmd; u16 devid; - int qdep; - qdep = pci_ats_queue_depth(pdev); devid = get_device_id(dev); iommu = amd_iommu_rlookup_table[devid]; - build_inv_iotlb_pages(&cmd, devid, qdep, address, size); + /* Build command */ + memset(&cmd, 0, sizeof(cmd)); + CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); + cmd.data[0] = devid; return iommu_queue_command(iommu, &cmd); } +static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, + u16 domid, int pde, int s) +{ + memset(cmd, 0, sizeof(*cmd)); + address &= PAGE_MASK; + CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); + cmd->data[1] |= domid; + cmd->data[2] = lower_32_bits(address); + cmd->data[3] = upper_32_bits(address); + if (s) /* size bit - we flush more than one 4kb page */ + cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; + if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ + cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; +} + /* - * Command send function for invalidating a device table entry + * Generic command send function for invalidaing TLB entries */ -static int device_flush_dte(struct device *dev) +static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, + u64 address, u16 domid, int pde, int s) { - struct amd_iommu *iommu; - struct pci_dev *pdev; - u16 devid; + struct iommu_cmd cmd; int ret; - pdev = to_pci_dev(dev); - devid = get_device_id(dev); - iommu = amd_iommu_rlookup_table[devid]; + __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s); - ret = iommu_flush_dte(iommu, devid); - if (ret) - return ret; - - if (pci_ats_enabled(pdev)) - ret = device_flush_iotlb(dev, 0, ~0UL); + ret = iommu_queue_command(iommu, &cmd); return ret; } @@ -679,14 +572,23 @@ static int device_flush_dte(struct device *dev) * It invalidates a single PTE if the range to flush is within a single * page. Otherwise it flushes the whole TLB of the IOMMU. */ -static void __domain_flush_pages(struct protection_domain *domain, - u64 address, size_t size, int pde) +static void __iommu_flush_pages(struct protection_domain *domain, + u64 address, size_t size, int pde) { - struct iommu_dev_data *dev_data; - struct iommu_cmd cmd; - int ret = 0, i; + int s = 0, i; + unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE); + + address &= PAGE_MASK; + + if (pages > 1) { + /* + * If we have to flush more than one page, flush all + * TLB entries for this domain + */ + address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; + s = 1; + } - build_inv_iommu_pages(&cmd, address, size, domain->id, pde); for (i = 0; i < amd_iommus_present; ++i) { if (!domain->dev_iommu[i]) @@ -696,70 +598,101 @@ static void __domain_flush_pages(struct protection_domain *domain, * Devices of this domain are behind this IOMMU * We need a TLB flush */ - ret |= iommu_queue_command(amd_iommus[i], &cmd); - } - - list_for_each_entry(dev_data, &domain->dev_list, list) { - struct pci_dev *pdev = to_pci_dev(dev_data->dev); - - if (!pci_ats_enabled(pdev)) - continue; - - ret |= device_flush_iotlb(dev_data->dev, address, size); + iommu_queue_inv_iommu_pages(amd_iommus[i], address, + domain->id, pde, s); } - WARN_ON(ret); + return; } -static void domain_flush_pages(struct protection_domain *domain, - u64 address, size_t size) +static void iommu_flush_pages(struct protection_domain *domain, + u64 address, size_t size) { - __domain_flush_pages(domain, address, size, 0); + __iommu_flush_pages(domain, address, size, 0); } /* Flush the whole IO/TLB for a given protection domain */ -static void domain_flush_tlb(struct protection_domain *domain) +static void iommu_flush_tlb(struct protection_domain *domain) { - __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); + __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); } /* Flush the whole IO/TLB for a given protection domain - including PDE */ -static void domain_flush_tlb_pde(struct protection_domain *domain) +static void iommu_flush_tlb_pde(struct protection_domain *domain) { - __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); + __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); } -static void domain_flush_complete(struct protection_domain *domain) + +/* + * This function flushes the DTEs for all devices in domain + */ +static void iommu_flush_domain_devices(struct protection_domain *domain) { - int i; + struct iommu_dev_data *dev_data; + unsigned long flags; - for (i = 0; i < amd_iommus_present; ++i) { - if (!domain->dev_iommu[i]) - continue; + spin_lock_irqsave(&domain->lock, flags); - /* - * Devices of this domain are behind this IOMMU - * We need to wait for completion of all commands. - */ - iommu_completion_wait(amd_iommus[i]); + list_for_each_entry(dev_data, &domain->dev_list, list) + iommu_flush_device(dev_data->dev); + + spin_unlock_irqrestore(&domain->lock, flags); +} + +static void iommu_flush_all_domain_devices(void) +{ + struct protection_domain *domain; + unsigned long flags; + + spin_lock_irqsave(&amd_iommu_pd_lock, flags); + + list_for_each_entry(domain, &amd_iommu_pd_list, list) { + iommu_flush_domain_devices(domain); + iommu_flush_complete(domain); } + + spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); } +void amd_iommu_flush_all_devices(void) +{ + iommu_flush_all_domain_devices(); +} /* - * This function flushes the DTEs for all devices in domain + * This function uses heavy locking and may disable irqs for some time. But + * this is no issue because it is only called during resume. */ -static void domain_flush_devices(struct protection_domain *domain) +void amd_iommu_flush_all_domains(void) { - struct iommu_dev_data *dev_data; + struct protection_domain *domain; unsigned long flags; - spin_lock_irqsave(&domain->lock, flags); + spin_lock_irqsave(&amd_iommu_pd_lock, flags); - list_for_each_entry(dev_data, &domain->dev_list, list) - device_flush_dte(dev_data->dev); + list_for_each_entry(domain, &amd_iommu_pd_list, list) { + spin_lock(&domain->lock); + iommu_flush_tlb_pde(domain); + iommu_flush_complete(domain); + spin_unlock(&domain->lock); + } - spin_unlock_irqrestore(&domain->lock, flags); + spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); +} + +static void reset_iommu_command_buffer(struct amd_iommu *iommu) +{ + pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); + + if (iommu->reset_in_progress) + panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); + + amd_iommu_reset_cmd_buffer(iommu); + amd_iommu_flush_all_devices(); + amd_iommu_flush_all_domains(); + + iommu->reset_in_progress = false; } /**************************************************************************** @@ -1477,22 +1410,17 @@ static bool dma_ops_domain(struct protection_domain *domain) return domain->flags & PD_DMA_OPS_MASK; } -static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) +static void set_dte_entry(u16 devid, struct protection_domain *domain) { u64 pte_root = virt_to_phys(domain->pt_root); - u32 flags = 0; pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) << DEV_ENTRY_MODE_SHIFT; pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; - if (ats) - flags |= DTE_FLAG_IOTLB; - - amd_iommu_dev_table[devid].data[3] |= flags; - amd_iommu_dev_table[devid].data[2] = domain->id; - amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); - amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); + amd_iommu_dev_table[devid].data[2] = domain->id; + amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); + amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); } static void clear_dte_entry(u16 devid) @@ -1509,42 +1437,34 @@ static void do_attach(struct device *dev, struct protection_domain *domain) { struct iommu_dev_data *dev_data; struct amd_iommu *iommu; - struct pci_dev *pdev; - bool ats = false; u16 devid; devid = get_device_id(dev); iommu = amd_iommu_rlookup_table[devid]; dev_data = get_dev_data(dev); - pdev = to_pci_dev(dev); - - if (amd_iommu_iotlb_sup) - ats = pci_ats_enabled(pdev); /* Update data structures */ dev_data->domain = domain; list_add(&dev_data->list, &domain->dev_list); - set_dte_entry(devid, domain, ats); + set_dte_entry(devid, domain); /* Do reference counting */ domain->dev_iommu[iommu->index] += 1; domain->dev_cnt += 1; /* Flush the DTE entry */ - device_flush_dte(dev); + iommu_flush_device(dev); } static void do_detach(struct device *dev) { struct iommu_dev_data *dev_data; struct amd_iommu *iommu; - struct pci_dev *pdev; u16 devid; devid = get_device_id(dev); iommu = amd_iommu_rlookup_table[devid]; dev_data = get_dev_data(dev); - pdev = to_pci_dev(dev); /* decrease reference counters */ dev_data->domain->dev_iommu[iommu->index] -= 1; @@ -1556,7 +1476,7 @@ static void do_detach(struct device *dev) clear_dte_entry(devid); /* Flush the DTE entry */ - device_flush_dte(dev); + iommu_flush_device(dev); } /* @@ -1619,13 +1539,9 @@ static int __attach_device(struct device *dev, static int attach_device(struct device *dev, struct protection_domain *domain) { - struct pci_dev *pdev = to_pci_dev(dev); unsigned long flags; int ret; - if (amd_iommu_iotlb_sup) - pci_enable_ats(pdev, PAGE_SHIFT); - write_lock_irqsave(&amd_iommu_devtable_lock, flags); ret = __attach_device(dev, domain); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); @@ -1635,7 +1551,7 @@ static int attach_device(struct device *dev, * left the caches in the IOMMU dirty. So we have to flush * here to evict all dirty stuff. */ - domain_flush_tlb_pde(domain); + iommu_flush_tlb_pde(domain); return ret; } @@ -1682,16 +1598,12 @@ static void __detach_device(struct device *dev) */ static void detach_device(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); unsigned long flags; /* lock device table */ write_lock_irqsave(&amd_iommu_devtable_lock, flags); __detach_device(dev); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); - - if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev)) - pci_disable_ats(pdev); } /* @@ -1780,7 +1692,7 @@ static int device_change_notifier(struct notifier_block *nb, goto out; } - device_flush_dte(dev); + iommu_flush_device(dev); iommu_completion_wait(iommu); out: @@ -1841,9 +1753,8 @@ static void update_device_table(struct protection_domain *domain) struct iommu_dev_data *dev_data; list_for_each_entry(dev_data, &domain->dev_list, list) { - struct pci_dev *pdev = to_pci_dev(dev_data->dev); u16 devid = get_device_id(dev_data->dev); - set_dte_entry(devid, domain, pci_ats_enabled(pdev)); + set_dte_entry(devid, domain); } } @@ -1853,9 +1764,8 @@ static void update_domain(struct protection_domain *domain) return; update_device_table(domain); - - domain_flush_devices(domain); - domain_flush_tlb_pde(domain); + iommu_flush_domain_devices(domain); + iommu_flush_tlb_pde(domain); domain->updated = false; } @@ -2014,10 +1924,10 @@ static dma_addr_t __map_single(struct device *dev, ADD_STATS_COUNTER(alloced_io_mem, size); if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { - domain_flush_tlb(&dma_dom->domain); + iommu_flush_tlb(&dma_dom->domain); dma_dom->need_flush = false; } else if (unlikely(amd_iommu_np_cache)) - domain_flush_pages(&dma_dom->domain, address, size); + iommu_flush_pages(&dma_dom->domain, address, size); out: return address; @@ -2066,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, dma_ops_free_addresses(dma_dom, dma_addr, pages); if (amd_iommu_unmap_flush || dma_dom->need_flush) { - domain_flush_pages(&dma_dom->domain, flush_addr, size); + iommu_flush_pages(&dma_dom->domain, flush_addr, size); dma_dom->need_flush = false; } } @@ -2102,7 +2012,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, if (addr == DMA_ERROR_CODE) goto out; - domain_flush_complete(domain); + iommu_flush_complete(domain); out: spin_unlock_irqrestore(&domain->lock, flags); @@ -2129,7 +2039,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, __unmap_single(domain->priv, dma_addr, size, dir); - domain_flush_complete(domain); + iommu_flush_complete(domain); spin_unlock_irqrestore(&domain->lock, flags); } @@ -2194,7 +2104,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, goto unmap; } - domain_flush_complete(domain); + iommu_flush_complete(domain); out: spin_unlock_irqrestore(&domain->lock, flags); @@ -2240,7 +2150,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, s->dma_address = s->dma_length = 0; } - domain_flush_complete(domain); + iommu_flush_complete(domain); spin_unlock_irqrestore(&domain->lock, flags); } @@ -2290,7 +2200,7 @@ static void *alloc_coherent(struct device *dev, size_t size, goto out_free; } - domain_flush_complete(domain); + iommu_flush_complete(domain); spin_unlock_irqrestore(&domain->lock, flags); @@ -2322,7 +2232,7 @@ static void free_coherent(struct device *dev, size_t size, __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); - domain_flush_complete(domain); + iommu_flush_complete(domain); spin_unlock_irqrestore(&domain->lock, flags); @@ -2566,7 +2476,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom, if (!iommu) return; - device_flush_dte(dev); + iommu_flush_device(dev); iommu_completion_wait(iommu); } @@ -2632,7 +2542,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, unmap_size = iommu_unmap_page(domain, iova, page_size); mutex_unlock(&domain->api_lock); - domain_flush_tlb_pde(domain); + iommu_flush_tlb_pde(domain); return get_order(unmap_size); } diff --git a/trunk/arch/x86/kernel/amd_iommu_init.c b/trunk/arch/x86/kernel/amd_iommu_init.c index 9179c21120a8..246d727b65b7 100644 --- a/trunk/arch/x86/kernel/amd_iommu_init.c +++ b/trunk/arch/x86/kernel/amd_iommu_init.c @@ -137,7 +137,6 @@ int amd_iommus_present; /* IOMMUs have a non-present cache? */ bool amd_iommu_np_cache __read_mostly; -bool amd_iommu_iotlb_sup __read_mostly = true; /* * The ACPI table parsing functions set this variable on an error @@ -181,12 +180,6 @@ static u32 dev_table_size; /* size of the device table */ static u32 alias_table_size; /* size of the alias table */ static u32 rlookup_table_size; /* size if the rlookup table */ -/* - * This function flushes all internal caches of - * the IOMMU used by this driver. - */ -extern void iommu_flush_all_caches(struct amd_iommu *iommu); - static inline void update_last_devid(u16 devid) { if (devid > amd_iommu_last_bdf) @@ -300,23 +293,9 @@ static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) /* Function to enable the hardware */ static void iommu_enable(struct amd_iommu *iommu) { - static const char * const feat_str[] = { - "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", - "IA", "GA", "HE", "PC", NULL - }; - int i; - - printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx", + printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n", dev_name(&iommu->dev->dev), iommu->cap_ptr); - if (iommu->cap & (1 << IOMMU_CAP_EFR)) { - printk(KERN_CONT " extended features: "); - for (i = 0; feat_str[i]; ++i) - if (iommu_feature(iommu, (1ULL << i))) - printk(KERN_CONT " %s", feat_str[i]); - } - printk(KERN_CONT "\n"); - iommu_feature_enable(iommu, CONTROL_IOMMU_EN); } @@ -672,7 +651,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) static void __init init_iommu_from_pci(struct amd_iommu *iommu) { int cap_ptr = iommu->cap_ptr; - u32 range, misc, low, high; + u32 range, misc; int i, j; pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, @@ -688,15 +667,6 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) MMIO_GET_LD(range)); iommu->evt_msi_num = MMIO_MSI_NUM(misc); - if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) - amd_iommu_iotlb_sup = false; - - /* read extended feature bits */ - low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); - high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); - - iommu->features = ((u64)high << 32) | low; - if (!is_rd890_iommu(iommu->dev)) return; @@ -1034,11 +1004,10 @@ static int iommu_setup_msi(struct amd_iommu *iommu) if (pci_enable_msi(iommu->dev)) return 1; - r = request_threaded_irq(iommu->dev->irq, - amd_iommu_int_handler, - amd_iommu_int_thread, - 0, "AMD-Vi", - iommu->dev); + r = request_irq(iommu->dev->irq, amd_iommu_int_handler, + IRQF_SAMPLE_RANDOM, + "AMD-Vi", + NULL); if (r) { pci_disable_msi(iommu->dev); @@ -1275,7 +1244,6 @@ static void enable_iommus(void) iommu_set_exclusion_range(iommu); iommu_init_msi(iommu); iommu_enable(iommu); - iommu_flush_all_caches(iommu); } } @@ -1306,8 +1274,8 @@ static void amd_iommu_resume(void) * we have to flush after the IOMMUs are enabled because a * disabled IOMMU will never execute the commands we send */ - for_each_iommu(iommu) - iommu_flush_all_caches(iommu); + amd_iommu_flush_all_devices(); + amd_iommu_flush_all_domains(); } static int amd_iommu_suspend(void) diff --git a/trunk/arch/x86/kernel/apb_timer.c b/trunk/arch/x86/kernel/apb_timer.c index 289e92862fd9..cd1ffed4ee22 100644 --- a/trunk/arch/x86/kernel/apb_timer.c +++ b/trunk/arch/x86/kernel/apb_timer.c @@ -177,6 +177,7 @@ static struct clocksource clocksource_apbt = { .rating = APBT_CLOCKSOURCE_RATING, .read = apbt_read_clocksource, .mask = APBT_MASK, + .shift = APBT_SHIFT, .flags = CLOCK_SOURCE_IS_CONTINUOUS, .resume = apbt_restart_clocksource, }; @@ -542,7 +543,14 @@ static int apbt_clocksource_register(void) if (t1 == apbt_read_clocksource(&clocksource_apbt)) panic("APBT counter not counting. APBT disabled\n"); - clocksource_register_khz(&clocksource_apbt, (u32)apbt_freq*1000); + /* + * initialize and register APBT clocksource + * convert that to ns/clock cycle + * mult = (ns/c) * 2^APBT_SHIFT + */ + clocksource_apbt.mult = div_sc(MSEC_PER_SEC, + (unsigned long) apbt_freq, APBT_SHIFT); + clocksource_register(&clocksource_apbt); return 0; } diff --git a/trunk/arch/x86/kernel/apic/io_apic.c b/trunk/arch/x86/kernel/apic/io_apic.c index 45fd33d1fd3a..68df09bba92e 100644 --- a/trunk/arch/x86/kernel/apic/io_apic.c +++ b/trunk/arch/x86/kernel/apic/io_apic.c @@ -128,8 +128,8 @@ static int __init parse_noapic(char *str) } early_param("noapic", parse_noapic); -static int io_apic_setup_irq_pin(unsigned int irq, int node, - struct io_apic_irq_attr *attr); +static int io_apic_setup_irq_pin_once(unsigned int irq, int node, + struct io_apic_irq_attr *attr); /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ void mp_save_irq(struct mpc_intsrc *m) @@ -3570,7 +3570,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) } #endif /* CONFIG_HT_IRQ */ -static int +int io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) { struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); @@ -3585,8 +3585,8 @@ io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) return ret; } -int io_apic_setup_irq_pin_once(unsigned int irq, int node, - struct io_apic_irq_attr *attr) +static int io_apic_setup_irq_pin_once(unsigned int irq, int node, + struct io_apic_irq_attr *attr) { unsigned int id = attr->ioapic, pin = attr->ioapic_pin; int ret; diff --git a/trunk/arch/x86/kernel/apic/x2apic_uv_x.c b/trunk/arch/x86/kernel/apic/x2apic_uv_x.c index 7acd2d2ac965..33b10a0fc095 100644 --- a/trunk/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/trunk/arch/x86/kernel/apic/x2apic_uv_x.c @@ -37,13 +37,6 @@ #include #include #include -#include - -/* BMC sets a bit this MMR non-zero before sending an NMI */ -#define UVH_NMI_MMR UVH_SCRATCH5 -#define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8) -#define UV_NMI_PENDING_MASK (1UL << 63) -DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count); DEFINE_PER_CPU(int, x2apic_extra_bits); @@ -649,46 +642,18 @@ void __cpuinit uv_cpu_init(void) */ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) { - unsigned long real_uv_nmi; - int bid; - if (reason != DIE_NMIUNKNOWN) return NOTIFY_OK; if (in_crash_kexec) /* do nothing if entering the crash kernel */ return NOTIFY_OK; - /* - * Each blade has an MMR that indicates when an NMI has been sent - * to cpus on the blade. If an NMI is detected, atomically - * clear the MMR and update a per-blade NMI count used to - * cause each cpu on the blade to notice a new NMI. - */ - bid = uv_numa_blade_id(); - real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); - - if (unlikely(real_uv_nmi)) { - spin_lock(&uv_blade_info[bid].nmi_lock); - real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); - if (real_uv_nmi) { - uv_blade_info[bid].nmi_count++; - uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK); - } - spin_unlock(&uv_blade_info[bid].nmi_lock); - } - - if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count)) - return NOTIFY_DONE; - - __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; - - /* - * Use a lock so only one cpu prints at a time. - * This prevents intermixed output. + * Use a lock so only one cpu prints at a time + * to prevent intermixed output. */ spin_lock(&uv_nmi_lock); - pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id()); + pr_info("NMI stack dump cpu %u:\n", smp_processor_id()); dump_stack(); spin_unlock(&uv_nmi_lock); @@ -696,8 +661,7 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) } static struct notifier_block uv_dump_stack_nmi_nb = { - .notifier_call = uv_handle_nmi, - .priority = NMI_LOCAL_LOW_PRIOR - 1, + .notifier_call = uv_handle_nmi }; void uv_register_nmi_notifier(void) @@ -756,9 +720,8 @@ void __init uv_system_init(void) printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); - uv_blade_info = kzalloc(bytes, GFP_KERNEL); + uv_blade_info = kmalloc(bytes, GFP_KERNEL); BUG_ON(!uv_blade_info); - for (blade = 0; blade < uv_num_possible_blades(); blade++) uv_blade_info[blade].memory_nid = -1; @@ -784,7 +747,6 @@ void __init uv_system_init(void) uv_blade_info[blade].pnode = pnode; uv_blade_info[blade].nr_possible_cpus = 0; uv_blade_info[blade].nr_online_cpus = 0; - spin_lock_init(&uv_blade_info[blade].nmi_lock); max_pnode = max(pnode, max_pnode); blade++; } diff --git a/trunk/arch/x86/kernel/apm_32.c b/trunk/arch/x86/kernel/apm_32.c index 3bfa02235965..adee12e0da1f 100644 --- a/trunk/arch/x86/kernel/apm_32.c +++ b/trunk/arch/x86/kernel/apm_32.c @@ -1238,6 +1238,7 @@ static int suspend(int vetoable) dpm_suspend_noirq(PMSG_SUSPEND); local_irq_disable(); + sysdev_suspend(PMSG_SUSPEND); syscore_suspend(); local_irq_enable(); @@ -1257,6 +1258,7 @@ static int suspend(int vetoable) err = (err == APM_SUCCESS) ? 0 : -EIO; syscore_resume(); + sysdev_resume(); local_irq_enable(); dpm_resume_noirq(PMSG_RESUME); @@ -1280,6 +1282,7 @@ static void standby(void) dpm_suspend_noirq(PMSG_SUSPEND); local_irq_disable(); + sysdev_suspend(PMSG_SUSPEND); syscore_suspend(); local_irq_enable(); @@ -1289,6 +1292,7 @@ static void standby(void) local_irq_disable(); syscore_resume(); + sysdev_resume(); local_irq_enable(); dpm_resume_noirq(PMSG_RESUME); diff --git a/trunk/arch/x86/kernel/cpu/Makefile b/trunk/arch/x86/kernel/cpu/Makefile index 6042981d0309..3f0ebe429a01 100644 --- a/trunk/arch/x86/kernel/cpu/Makefile +++ b/trunk/arch/x86/kernel/cpu/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_X86_MCE) += mcheck/ obj-$(CONFIG_MTRR) += mtrr/ +obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o diff --git a/trunk/arch/x86/kernel/cpu/amd.c b/trunk/arch/x86/kernel/cpu/amd.c index 6f9d1f6063e9..3532d3bf8105 100644 --- a/trunk/arch/x86/kernel/cpu/amd.c +++ b/trunk/arch/x86/kernel/cpu/amd.c @@ -613,7 +613,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) #endif /* As a rule processors have APIC timer running in deep C states */ - if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400)) + if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) set_cpu_cap(c, X86_FEATURE_ARAT); /* diff --git a/trunk/arch/x86/kernel/cpu/common.c b/trunk/arch/x86/kernel/cpu/common.c index 173f3a3fa1a6..e2ced0074a45 100644 --- a/trunk/arch/x86/kernel/cpu/common.c +++ b/trunk/arch/x86/kernel/cpu/common.c @@ -565,7 +565,8 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); - c->x86_capability[9] = ebx; + if (eax > 0) + c->x86_capability[9] = ebx; } /* AMD-defined flags: level 0x80000001 */ diff --git a/trunk/drivers/cpufreq/Kconfig.x86 b/trunk/arch/x86/kernel/cpu/cpufreq/Kconfig similarity index 97% rename from trunk/drivers/cpufreq/Kconfig.x86 rename to trunk/arch/x86/kernel/cpu/cpufreq/Kconfig index 343f84760487..870e6cc6ad28 100644 --- a/trunk/drivers/cpufreq/Kconfig.x86 +++ b/trunk/arch/x86/kernel/cpu/cpufreq/Kconfig @@ -1,7 +1,15 @@ # -# x86 CPU Frequency scaling drivers +# CPU Frequency scaling # +menu "CPU Frequency scaling" + +source "drivers/cpufreq/Kconfig" + +if CPU_FREQ + +comment "CPUFreq processor drivers" + config X86_PCC_CPUFREQ tristate "Processor Clocking Control interface driver" depends on ACPI && ACPI_PROCESSOR @@ -253,3 +261,6 @@ config X86_SPEEDSTEP_RELAXED_CAP_CHECK option lets the probing code bypass some of those checks if the parameter "relaxed_check=1" is passed to the module. +endif # CPU_FREQ + +endmenu diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/Makefile b/trunk/arch/x86/kernel/cpu/cpufreq/Makefile new file mode 100644 index 000000000000..bd54bf67e6fb --- /dev/null +++ b/trunk/arch/x86/kernel/cpu/cpufreq/Makefile @@ -0,0 +1,21 @@ +# Link order matters. K8 is preferred to ACPI because of firmware bugs in early +# K8 systems. ACPI is preferred to all other hardware-specific drivers. +# speedstep-* is preferred over p4-clockmod. + +obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o +obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o +obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o +obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o +obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o +obj-$(CONFIG_X86_LONGHAUL) += longhaul.o +obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o +obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o +obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o +obj-$(CONFIG_X86_LONGRUN) += longrun.o +obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o +obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o +obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o +obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o +obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o +obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o +obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o diff --git a/trunk/drivers/cpufreq/acpi-cpufreq.c b/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c similarity index 94% rename from trunk/drivers/cpufreq/acpi-cpufreq.c rename to trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4e04e1274388..a2baafb2fe6d 100644 --- a/trunk/drivers/cpufreq/acpi-cpufreq.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -47,6 +47,9 @@ #include #include "mperf.h" +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "acpi-cpufreq", msg) + MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_LICENSE("GPL"); @@ -230,7 +233,7 @@ static u32 get_cur_val(const struct cpumask *mask) cmd.mask = mask; drv_read(&cmd); - pr_debug("get_cur_val = %u\n", cmd.val); + dprintk("get_cur_val = %u\n", cmd.val); return cmd.val; } @@ -241,7 +244,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) unsigned int freq; unsigned int cached_freq; - pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); + dprintk("get_cur_freq_on_cpu (%d)\n", cpu); if (unlikely(data == NULL || data->acpi_data == NULL || data->freq_table == NULL)) { @@ -258,7 +261,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) data->resume = 1; } - pr_debug("cur freq = %u\n", freq); + dprintk("cur freq = %u\n", freq); return freq; } @@ -290,7 +293,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, unsigned int i; int result = 0; - pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); + dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); if (unlikely(data == NULL || data->acpi_data == NULL || data->freq_table == NULL)) { @@ -310,11 +313,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, next_perf_state = data->freq_table[next_state].index; if (perf->state == next_perf_state) { if (unlikely(data->resume)) { - pr_debug("Called after resume, resetting to P%d\n", + dprintk("Called after resume, resetting to P%d\n", next_perf_state); data->resume = 0; } else { - pr_debug("Already at target state (P%d)\n", + dprintk("Already at target state (P%d)\n", next_perf_state); goto out; } @@ -354,7 +357,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, if (acpi_pstate_strict) { if (!check_freqs(cmd.mask, freqs.new, data)) { - pr_debug("acpi_cpufreq_target failed (%d)\n", + dprintk("acpi_cpufreq_target failed (%d)\n", policy->cpu); result = -EAGAIN; goto out; @@ -375,7 +378,7 @@ static int acpi_cpufreq_verify(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); - pr_debug("acpi_cpufreq_verify\n"); + dprintk("acpi_cpufreq_verify\n"); return cpufreq_frequency_table_verify(policy, data->freq_table); } @@ -430,11 +433,11 @@ static void free_acpi_perf_data(void) static int __init acpi_cpufreq_early_init(void) { unsigned int i; - pr_debug("acpi_cpufreq_early_init\n"); + dprintk("acpi_cpufreq_early_init\n"); acpi_perf_data = alloc_percpu(struct acpi_processor_performance); if (!acpi_perf_data) { - pr_debug("Memory allocation error for acpi_perf_data.\n"); + dprintk("Memory allocation error for acpi_perf_data.\n"); return -ENOMEM; } for_each_possible_cpu(i) { @@ -516,7 +519,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) static int blacklisted; #endif - pr_debug("acpi_cpufreq_cpu_init\n"); + dprintk("acpi_cpufreq_cpu_init\n"); #ifdef CONFIG_SMP if (blacklisted) @@ -563,7 +566,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) /* capability check */ if (perf->state_count <= 1) { - pr_debug("No P-States\n"); + dprintk("No P-States\n"); result = -ENODEV; goto err_unreg; } @@ -575,11 +578,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) switch (perf->control_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: - pr_debug("SYSTEM IO addr space\n"); + dprintk("SYSTEM IO addr space\n"); data->cpu_feature = SYSTEM_IO_CAPABLE; break; case ACPI_ADR_SPACE_FIXED_HARDWARE: - pr_debug("HARDWARE addr space\n"); + dprintk("HARDWARE addr space\n"); if (!check_est_cpu(cpu)) { result = -ENODEV; goto err_unreg; @@ -587,7 +590,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; break; default: - pr_debug("Unknown addr space %d\n", + dprintk("Unknown addr space %d\n", (u32) (perf->control_register.space_id)); result = -ENODEV; goto err_unreg; @@ -658,9 +661,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) if (cpu_has(c, X86_FEATURE_APERFMPERF)) acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; - pr_debug("CPU%u - ACPI performance management activated.\n", cpu); + dprintk("CPU%u - ACPI performance management activated.\n", cpu); for (i = 0; i < perf->state_count; i++) - pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", + dprintk(" %cP%d: %d MHz, %d mW, %d uS\n", (i == perf->state ? '*' : ' '), i, (u32) perf->states[i].core_frequency, (u32) perf->states[i].power, @@ -691,7 +694,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); - pr_debug("acpi_cpufreq_cpu_exit\n"); + dprintk("acpi_cpufreq_cpu_exit\n"); if (data) { cpufreq_frequency_table_put_attr(policy->cpu); @@ -709,7 +712,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); - pr_debug("acpi_cpufreq_resume\n"); + dprintk("acpi_cpufreq_resume\n"); data->resume = 1; @@ -740,7 +743,7 @@ static int __init acpi_cpufreq_init(void) if (acpi_disabled) return 0; - pr_debug("acpi_cpufreq_init\n"); + dprintk("acpi_cpufreq_init\n"); ret = acpi_cpufreq_early_init(); if (ret) @@ -755,7 +758,7 @@ static int __init acpi_cpufreq_init(void) static void __exit acpi_cpufreq_exit(void) { - pr_debug("acpi_cpufreq_exit\n"); + dprintk("acpi_cpufreq_exit\n"); cpufreq_unregister_driver(&acpi_cpufreq_driver); diff --git a/trunk/drivers/cpufreq/cpufreq-nforce2.c b/trunk/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c similarity index 97% rename from trunk/drivers/cpufreq/cpufreq-nforce2.c rename to trunk/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c index 7bac808804f3..141abebc4516 100644 --- a/trunk/drivers/cpufreq/cpufreq-nforce2.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c @@ -57,6 +57,8 @@ MODULE_PARM_DESC(min_fsb, "Minimum FSB to use, if not defined: current FSB - 50"); #define PFX "cpufreq-nforce2: " +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "cpufreq-nforce2", msg) /** * nforce2_calc_fsb - calculate FSB @@ -268,7 +270,7 @@ static int nforce2_target(struct cpufreq_policy *policy, if (freqs.old == freqs.new) return 0; - pr_debug("Old CPU frequency %d kHz, new %d kHz\n", + dprintk("Old CPU frequency %d kHz, new %d kHz\n", freqs.old, freqs.new); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); @@ -280,7 +282,7 @@ static int nforce2_target(struct cpufreq_policy *policy, printk(KERN_ERR PFX "Changing FSB to %d failed\n", target_fsb); else - pr_debug("Changed FSB successfully to %d\n", + dprintk("Changed FSB successfully to %d\n", target_fsb); /* Enable IRQs */ diff --git a/trunk/drivers/cpufreq/e_powersaver.c b/trunk/arch/x86/kernel/cpu/cpufreq/e_powersaver.c similarity index 100% rename from trunk/drivers/cpufreq/e_powersaver.c rename to trunk/arch/x86/kernel/cpu/cpufreq/e_powersaver.c diff --git a/trunk/drivers/cpufreq/elanfreq.c b/trunk/arch/x86/kernel/cpu/cpufreq/elanfreq.c similarity index 100% rename from trunk/drivers/cpufreq/elanfreq.c rename to trunk/arch/x86/kernel/cpu/cpufreq/elanfreq.c diff --git a/trunk/drivers/cpufreq/gx-suspmod.c b/trunk/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c similarity index 95% rename from trunk/drivers/cpufreq/gx-suspmod.c rename to trunk/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c index ffe1f2c92ed3..32974cf84232 100644 --- a/trunk/drivers/cpufreq/gx-suspmod.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c @@ -142,6 +142,9 @@ module_param(max_duration, int, 0444); #define POLICY_MIN_DIV 20 +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "gx-suspmod", msg) + /** * we can detect a core multipiler from dir0_lsb * from GX1 datasheet p.56, @@ -188,7 +191,7 @@ static __init struct pci_dev *gx_detect_chipset(void) /* check if CPU is a MediaGX or a Geode. */ if ((boot_cpu_data.x86_vendor != X86_VENDOR_NSC) && (boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { - pr_debug("error: no MediaGX/Geode processor found!\n"); + dprintk("error: no MediaGX/Geode processor found!\n"); return NULL; } @@ -198,7 +201,7 @@ static __init struct pci_dev *gx_detect_chipset(void) return gx_pci; } - pr_debug("error: no supported chipset found!\n"); + dprintk("error: no supported chipset found!\n"); return NULL; } @@ -302,14 +305,14 @@ static void gx_set_cpuspeed(unsigned int khz) break; default: local_irq_restore(flags); - pr_debug("fatal: try to set unknown chipset.\n"); + dprintk("fatal: try to set unknown chipset.\n"); return; } } else { suscfg = gx_params->pci_suscfg & ~(SUSMOD); gx_params->off_duration = 0; gx_params->on_duration = 0; - pr_debug("suspend modulation disabled: cpu runs 100%% speed.\n"); + dprintk("suspend modulation disabled: cpu runs 100%% speed.\n"); } gx_write_byte(PCI_MODOFF, gx_params->off_duration); @@ -324,9 +327,9 @@ static void gx_set_cpuspeed(unsigned int khz) cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", + dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", gx_params->on_duration * 32, gx_params->off_duration * 32); - pr_debug("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); + dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); } /**************************************************************** @@ -425,8 +428,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) stock_freq = maxfreq; curfreq = gx_get_cpuspeed(0); - pr_debug("cpu max frequency is %d.\n", maxfreq); - pr_debug("cpu current frequency is %dkHz.\n", curfreq); + dprintk("cpu max frequency is %d.\n", maxfreq); + dprintk("cpu current frequency is %dkHz.\n", curfreq); /* setup basic struct for cpufreq API */ policy->cpu = 0; @@ -472,7 +475,7 @@ static int __init cpufreq_gx_init(void) if (max_duration > 0xff) max_duration = 0xff; - pr_debug("geode suspend modulation available.\n"); + dprintk("geode suspend modulation available.\n"); params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL); if (params == NULL) diff --git a/trunk/drivers/cpufreq/longhaul.c b/trunk/arch/x86/kernel/cpu/cpufreq/longhaul.c similarity index 98% rename from trunk/drivers/cpufreq/longhaul.c rename to trunk/arch/x86/kernel/cpu/cpufreq/longhaul.c index f47d26e2a135..cf48cdd6907d 100644 --- a/trunk/drivers/cpufreq/longhaul.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/longhaul.c @@ -77,6 +77,9 @@ static int scale_voltage; static int disable_acpi_c3; static int revid_errata; +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "longhaul", msg) + /* Clock ratios multiplied by 10 */ static int mults[32]; @@ -84,6 +87,7 @@ static int eblcr[32]; static int longhaul_version; static struct cpufreq_frequency_table *longhaul_table; +#ifdef CONFIG_CPU_FREQ_DEBUG static char speedbuffer[8]; static char *print_speed(int speed) @@ -102,6 +106,7 @@ static char *print_speed(int speed) return speedbuffer; } +#endif static unsigned int calc_speed(int mult) @@ -270,7 +275,7 @@ static void longhaul_setstate(unsigned int table_index) cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", + dprintk("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); retry_loop: preempt_disable(); @@ -455,12 +460,12 @@ static int __cpuinit longhaul_get_ranges(void) break; } - pr_debug("MinMult:%d.%dx MaxMult:%d.%dx\n", + dprintk("MinMult:%d.%dx MaxMult:%d.%dx\n", minmult/10, minmult%10, maxmult/10, maxmult%10); highest_speed = calc_speed(maxmult); lowest_speed = calc_speed(minmult); - pr_debug("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, + dprintk("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, print_speed(lowest_speed/1000), print_speed(highest_speed/1000)); diff --git a/trunk/drivers/cpufreq/longhaul.h b/trunk/arch/x86/kernel/cpu/cpufreq/longhaul.h similarity index 100% rename from trunk/drivers/cpufreq/longhaul.h rename to trunk/arch/x86/kernel/cpu/cpufreq/longhaul.h diff --git a/trunk/drivers/cpufreq/longrun.c b/trunk/arch/x86/kernel/cpu/cpufreq/longrun.c similarity index 94% rename from trunk/drivers/cpufreq/longrun.c rename to trunk/arch/x86/kernel/cpu/cpufreq/longrun.c index 34ea359b370e..d9f51367666b 100644 --- a/trunk/drivers/cpufreq/longrun.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/longrun.c @@ -15,6 +15,9 @@ #include #include +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "longrun", msg) + static struct cpufreq_driver longrun_driver; /** @@ -37,14 +40,14 @@ static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy) u32 msr_lo, msr_hi; rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); - pr_debug("longrun flags are %x - %x\n", msr_lo, msr_hi); + dprintk("longrun flags are %x - %x\n", msr_lo, msr_hi); if (msr_lo & 0x01) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else policy->policy = CPUFREQ_POLICY_POWERSAVE; rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); - pr_debug("longrun ctrl is %x - %x\n", msr_lo, msr_hi); + dprintk("longrun ctrl is %x - %x\n", msr_lo, msr_hi); msr_lo &= 0x0000007F; msr_hi &= 0x0000007F; @@ -147,7 +150,7 @@ static unsigned int longrun_get(unsigned int cpu) return 0; cpuid(0x80860007, &eax, &ebx, &ecx, &edx); - pr_debug("cpuid eax is %u\n", eax); + dprintk("cpuid eax is %u\n", eax); return eax * 1000; } @@ -193,7 +196,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); *high_freq = msr_lo * 1000; /* to kHz */ - pr_debug("longrun table interface told %u - %u kHz\n", + dprintk("longrun table interface told %u - %u kHz\n", *low_freq, *high_freq); if (*low_freq > *high_freq) @@ -204,7 +207,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, /* set the upper border to the value determined during TSC init */ *high_freq = (cpu_khz / 1000); *high_freq = *high_freq * 1000; - pr_debug("high frequency is %u kHz\n", *high_freq); + dprintk("high frequency is %u kHz\n", *high_freq); /* get current borders */ rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); @@ -230,7 +233,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, /* restore values */ wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi); } - pr_debug("percentage is %u %%, freq is %u MHz\n", ecx, eax); + dprintk("percentage is %u %%, freq is %u MHz\n", ecx, eax); /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) * eqals @@ -246,7 +249,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, edx = ((eax - ebx) * 100) / (100 - ecx); *low_freq = edx * 1000; /* back to kHz */ - pr_debug("low frequency is %u kHz\n", *low_freq); + dprintk("low frequency is %u kHz\n", *low_freq); if (*low_freq > *high_freq) *low_freq = *high_freq; diff --git a/trunk/drivers/cpufreq/mperf.c b/trunk/arch/x86/kernel/cpu/cpufreq/mperf.c similarity index 100% rename from trunk/drivers/cpufreq/mperf.c rename to trunk/arch/x86/kernel/cpu/cpufreq/mperf.c diff --git a/trunk/drivers/cpufreq/mperf.h b/trunk/arch/x86/kernel/cpu/cpufreq/mperf.h similarity index 100% rename from trunk/drivers/cpufreq/mperf.h rename to trunk/arch/x86/kernel/cpu/cpufreq/mperf.h diff --git a/trunk/drivers/cpufreq/p4-clockmod.c b/trunk/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c similarity index 96% rename from trunk/drivers/cpufreq/p4-clockmod.c rename to trunk/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 6be3e0760c26..52c93648e492 100644 --- a/trunk/drivers/cpufreq/p4-clockmod.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -35,6 +35,8 @@ #include "speedstep-lib.h" #define PFX "p4-clockmod: " +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "p4-clockmod", msg) /* * Duty Cycle (3bits), note DC_DISABLE is not specified in @@ -64,7 +66,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); if (l & 0x01) - pr_debug("CPU#%d currently thermal throttled\n", cpu); + dprintk("CPU#%d currently thermal throttled\n", cpu); if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT)) @@ -72,10 +74,10 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); if (newstate == DC_DISABLE) { - pr_debug("CPU#%d disabling modulation\n", cpu); + dprintk("CPU#%d disabling modulation\n", cpu); wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); } else { - pr_debug("CPU#%d setting duty cycle to %d%%\n", + dprintk("CPU#%d setting duty cycle to %d%%\n", cpu, ((125 * newstate) / 10)); /* bits 63 - 5 : reserved * bit 4 : enable/disable @@ -215,7 +217,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) case 0x0f11: case 0x0f12: has_N44_O17_errata[policy->cpu] = 1; - pr_debug("has errata -- disabling low frequencies\n"); + dprintk("has errata -- disabling low frequencies\n"); } if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D && diff --git a/trunk/drivers/cpufreq/pcc-cpufreq.c b/trunk/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c similarity index 91% rename from trunk/drivers/cpufreq/pcc-cpufreq.c rename to trunk/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c index 7b0603eb0129..755a31e0f5b0 100644 --- a/trunk/drivers/cpufreq/pcc-cpufreq.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c @@ -39,7 +39,7 @@ #include -#define PCC_VERSION "1.10.00" +#define PCC_VERSION "1.00.00" #define POLL_LOOPS 300 #define CMD_COMPLETE 0x1 @@ -48,6 +48,9 @@ #define BUF_SZ 4 +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "pcc-cpufreq", msg) + struct pcc_register_resource { u8 descriptor; u16 length; @@ -99,7 +102,7 @@ static struct acpi_generic_address doorbell; static u64 doorbell_preserve; static u64 doorbell_write; -static u8 OSC_UUID[16] = {0x9F, 0x2C, 0x9B, 0x63, 0x91, 0x70, 0x1f, 0x49, +static u8 OSC_UUID[16] = {0x63, 0x9B, 0x2C, 0x9F, 0x70, 0x91, 0x49, 0x1f, 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46}; struct pcc_cpu { @@ -149,7 +152,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) spin_lock(&pcc_lock); - pr_debug("get: get_freq for CPU %d\n", cpu); + dprintk("get: get_freq for CPU %d\n", cpu); pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); input_buffer = 0x1; @@ -167,7 +170,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) status = ioread16(&pcch_hdr->status); if (status != CMD_COMPLETE) { - pr_debug("get: FAILED: for CPU %d, status is %d\n", + dprintk("get: FAILED: for CPU %d, status is %d\n", cpu, status); goto cmd_incomplete; } @@ -175,14 +178,14 @@ static unsigned int pcc_get_freq(unsigned int cpu) curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff)) / 100) * 1000); - pr_debug("get: SUCCESS: (virtual) output_offset for cpu %d is " - "0x%p, contains a value of: 0x%x. Speed is: %d MHz\n", + dprintk("get: SUCCESS: (virtual) output_offset for cpu %d is " + "0x%x, contains a value of: 0x%x. Speed is: %d MHz\n", cpu, (pcch_virt_addr + pcc_cpu_data->output_offset), output_buffer, curr_freq); freq_limit = (output_buffer >> 8) & 0xff; if (freq_limit != 0xff) { - pr_debug("get: frequency for cpu %d is being temporarily" + dprintk("get: frequency for cpu %d is being temporarily" " capped at %d\n", cpu, curr_freq); } @@ -209,8 +212,8 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, cpu = policy->cpu; pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); - pr_debug("target: CPU %d should go to target freq: %d " - "(virtual) input_offset is 0x%p\n", + dprintk("target: CPU %d should go to target freq: %d " + "(virtual) input_offset is 0x%x\n", cpu, target_freq, (pcch_virt_addr + pcc_cpu_data->input_offset)); @@ -231,14 +234,14 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, status = ioread16(&pcch_hdr->status); if (status != CMD_COMPLETE) { - pr_debug("target: FAILED for cpu %d, with status: 0x%x\n", + dprintk("target: FAILED for cpu %d, with status: 0x%x\n", cpu, status); goto cmd_incomplete; } iowrite16(0, &pcch_hdr->status); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu); + dprintk("target: was SUCCESSFUL for cpu %d\n", cpu); spin_unlock(&pcc_lock); return 0; @@ -290,7 +293,7 @@ static int pcc_get_offset(int cpu) memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ); - pr_debug("pcc_get_offset: for CPU %d: pcc_cpu_data " + dprintk("pcc_get_offset: for CPU %d: pcc_cpu_data " "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n", cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset); out_free: @@ -407,7 +410,7 @@ static int __init pcc_cpufreq_probe(void) if (ACPI_SUCCESS(status)) { ret = pcc_cpufreq_do_osc(&osc_handle); if (ret) - pr_debug("probe: _OSC evaluation did not succeed\n"); + dprintk("probe: _OSC evaluation did not succeed\n"); /* Firmware's use of _OSC is optional */ ret = 0; } @@ -430,7 +433,7 @@ static int __init pcc_cpufreq_probe(void) mem_resource = (struct pcc_memory_resource *)member->buffer.pointer; - pr_debug("probe: mem_resource descriptor: 0x%x," + dprintk("probe: mem_resource descriptor: 0x%x," " length: %d, space_id: %d, resource_usage: %d," " type_specific: %d, granularity: 0x%llx," " minimum: 0x%llx, maximum: 0x%llx," @@ -450,13 +453,13 @@ static int __init pcc_cpufreq_probe(void) pcch_virt_addr = ioremap_nocache(mem_resource->minimum, mem_resource->address_length); if (pcch_virt_addr == NULL) { - pr_debug("probe: could not map shared mem region\n"); + dprintk("probe: could not map shared mem region\n"); goto out_free; } pcch_hdr = pcch_virt_addr; - pr_debug("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr); - pr_debug("probe: PCCH header is at physical address: 0x%llx," + dprintk("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr); + dprintk("probe: PCCH header is at physical address: 0x%llx," " signature: 0x%x, length: %d bytes, major: %d, minor: %d," " supported features: 0x%x, command field: 0x%x," " status field: 0x%x, nominal latency: %d us\n", @@ -466,7 +469,7 @@ static int __init pcc_cpufreq_probe(void) ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status), ioread32(&pcch_hdr->latency)); - pr_debug("probe: min time between commands: %d us," + dprintk("probe: min time between commands: %d us," " max time between commands: %d us," " nominal CPU frequency: %d MHz," " minimum CPU frequency: %d MHz," @@ -491,7 +494,7 @@ static int __init pcc_cpufreq_probe(void) doorbell.access_width = 64; doorbell.address = reg_resource->address; - pr_debug("probe: doorbell: space_id is %d, bit_width is %d, " + dprintk("probe: doorbell: space_id is %d, bit_width is %d, " "bit_offset is %d, access_width is %d, address is 0x%llx\n", doorbell.space_id, doorbell.bit_width, doorbell.bit_offset, doorbell.access_width, reg_resource->address); @@ -512,7 +515,7 @@ static int __init pcc_cpufreq_probe(void) doorbell_write = member->integer.value; - pr_debug("probe: doorbell_preserve: 0x%llx," + dprintk("probe: doorbell_preserve: 0x%llx," " doorbell_write: 0x%llx\n", doorbell_preserve, doorbell_write); @@ -547,7 +550,7 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) result = pcc_get_offset(cpu); if (result) { - pr_debug("init: PCCP evaluation failed\n"); + dprintk("init: PCCP evaluation failed\n"); goto out; } @@ -558,12 +561,12 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->cur = pcc_get_freq(cpu); if (!policy->cur) { - pr_debug("init: Unable to get current CPU frequency\n"); + dprintk("init: Unable to get current CPU frequency\n"); result = -EINVAL; goto out; } - pr_debug("init: policy->max is %d, policy->min is %d\n", + dprintk("init: policy->max is %d, policy->min is %d\n", policy->max, policy->min); out: return result; @@ -594,7 +597,7 @@ static int __init pcc_cpufreq_init(void) ret = pcc_cpufreq_probe(); if (ret) { - pr_debug("pcc_cpufreq_init: PCCH evaluation failed\n"); + dprintk("pcc_cpufreq_init: PCCH evaluation failed\n"); return ret; } diff --git a/trunk/drivers/cpufreq/powernow-k6.c b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k6.c similarity index 100% rename from trunk/drivers/cpufreq/powernow-k6.c rename to trunk/arch/x86/kernel/cpu/cpufreq/powernow-k6.c diff --git a/trunk/drivers/cpufreq/powernow-k7.c b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k7.c similarity index 95% rename from trunk/drivers/cpufreq/powernow-k7.c rename to trunk/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index d71d9f372359..4a45fd6e41ba 100644 --- a/trunk/drivers/cpufreq/powernow-k7.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k7.c @@ -68,6 +68,7 @@ union powernow_acpi_control_t { }; #endif +#ifdef CONFIG_CPU_FREQ_DEBUG /* divide by 1000 to get VCore voltage in V. */ static const int mobile_vid_table[32] = { 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, @@ -75,6 +76,7 @@ static const int mobile_vid_table[32] = { 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, 1075, 1050, 1025, 1000, 975, 950, 925, 0, }; +#endif /* divide by 10 to get FID. */ static const int fid_codes[32] = { @@ -101,6 +103,9 @@ static unsigned int fsb; static unsigned int latency; static char have_a0; +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "powernow-k7", msg) + static int check_fsb(unsigned int fsbspeed) { int delta; @@ -204,7 +209,7 @@ static int get_ranges(unsigned char *pst) vid = *pst++; powernow_table[j].index |= (vid << 8); /* upper 8 bits */ - pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " + dprintk(" FID: 0x%x (%d.%dx [%dMHz]) " "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, fid_codes[fid] % 10, speed/1000, vid, mobile_vid_table[vid]/1000, @@ -362,7 +367,7 @@ static int powernow_acpi_init(void) unsigned int speed, speed_mhz; pc.val = (unsigned long) state->control; - pr_debug("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", + dprintk("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", i, (u32) state->core_frequency, (u32) state->power, @@ -396,7 +401,7 @@ static int powernow_acpi_init(void) invalidate_entry(i); } - pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " + dprintk(" FID: 0x%x (%d.%dx [%dMHz]) " "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, fid_codes[fid] % 10, speed_mhz, vid, mobile_vid_table[vid]/1000, @@ -404,7 +409,7 @@ static int powernow_acpi_init(void) if (state->core_frequency != speed_mhz) { state->core_frequency = speed_mhz; - pr_debug(" Corrected ACPI frequency to %d\n", + dprintk(" Corrected ACPI frequency to %d\n", speed_mhz); } @@ -448,8 +453,8 @@ static int powernow_acpi_init(void) static void print_pst_entry(struct pst_s *pst, unsigned int j) { - pr_debug("PST:%d (@%p)\n", j, pst); - pr_debug(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", + dprintk("PST:%d (@%p)\n", j, pst); + dprintk(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); } @@ -469,20 +474,20 @@ static int powernow_decode_bios(int maxfid, int startvid) p = phys_to_virt(i); if (memcmp(p, "AMDK7PNOW!", 10) == 0) { - pr_debug("Found PSB header at %p\n", p); + dprintk("Found PSB header at %p\n", p); psb = (struct psb_s *) p; - pr_debug("Table version: 0x%x\n", psb->tableversion); + dprintk("Table version: 0x%x\n", psb->tableversion); if (psb->tableversion != 0x12) { printk(KERN_INFO PFX "Sorry, only v1.2 tables" " supported right now\n"); return -ENODEV; } - pr_debug("Flags: 0x%x\n", psb->flags); + dprintk("Flags: 0x%x\n", psb->flags); if ((psb->flags & 1) == 0) - pr_debug("Mobile voltage regulator\n"); + dprintk("Mobile voltage regulator\n"); else - pr_debug("Desktop voltage regulator\n"); + dprintk("Desktop voltage regulator\n"); latency = psb->settlingtime; if (latency < 100) { @@ -492,9 +497,9 @@ static int powernow_decode_bios(int maxfid, int startvid) "Correcting.\n", latency); latency = 100; } - pr_debug("Settling Time: %d microseconds.\n", + dprintk("Settling Time: %d microseconds.\n", psb->settlingtime); - pr_debug("Has %d PST tables. (Only dumping ones " + dprintk("Has %d PST tables. (Only dumping ones " "relevant to this CPU).\n", psb->numpst); @@ -645,7 +650,7 @@ static int __cpuinit powernow_cpu_init(struct cpufreq_policy *policy) printk(KERN_WARNING PFX "can not determine bus frequency\n"); return -EINVAL; } - pr_debug("FSB: %3dMHz\n", fsb/1000); + dprintk("FSB: %3dMHz\n", fsb/1000); if (dmi_check_system(powernow_dmi_table) || acpi_force) { printk(KERN_INFO PFX "PSB/PST known to be broken. " diff --git a/trunk/drivers/cpufreq/powernow-k7.h b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k7.h similarity index 100% rename from trunk/drivers/cpufreq/powernow-k7.h rename to trunk/arch/x86/kernel/cpu/cpufreq/powernow-k7.h diff --git a/trunk/drivers/cpufreq/powernow-k8.c b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c similarity index 93% rename from trunk/drivers/cpufreq/powernow-k8.c rename to trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 83479b6fb9a1..2368e38327b3 100644 --- a/trunk/drivers/cpufreq/powernow-k8.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -139,7 +139,7 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data) } do { if (i++ > 10000) { - pr_debug("detected change pending stuck\n"); + dprintk("detected change pending stuck\n"); return 1; } rdmsr(MSR_FIDVID_STATUS, lo, hi); @@ -176,7 +176,7 @@ static void fidvid_msr_init(void) fid = lo & MSR_S_LO_CURRENT_FID; lo = fid | (vid << MSR_C_LO_VID_SHIFT); hi = MSR_C_HI_STP_GNT_BENIGN; - pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); + dprintk("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); wrmsr(MSR_FIDVID_CTL, lo, hi); } @@ -196,7 +196,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) lo |= (data->currvid << MSR_C_LO_VID_SHIFT); lo |= MSR_C_LO_INIT_FID_VID; - pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n", + dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n", fid, lo, data->plllock * PLL_LOCK_CONVERSION); do { @@ -244,7 +244,7 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) lo |= (vid << MSR_C_LO_VID_SHIFT); lo |= MSR_C_LO_INIT_FID_VID; - pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n", + dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n", vid, lo, STOP_GRANT_5NS); do { @@ -325,7 +325,7 @@ static int transition_fid_vid(struct powernow_k8_data *data, return 1; } - pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", + dprintk("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", smp_processor_id(), data->currfid, data->currvid); return 0; @@ -339,7 +339,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 savefid = data->currfid; u32 maxvid, lo, rvomult = 1; - pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " + dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " "reqvid 0x%x, rvo 0x%x\n", smp_processor_id(), data->currfid, data->currvid, reqvid, data->rvo); @@ -349,12 +349,12 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, rvosteps *= rvomult; rdmsr(MSR_FIDVID_STATUS, lo, maxvid); maxvid = 0x1f & (maxvid >> 16); - pr_debug("ph1 maxvid=0x%x\n", maxvid); + dprintk("ph1 maxvid=0x%x\n", maxvid); if (reqvid < maxvid) /* lower numbers are higher voltages */ reqvid = maxvid; while (data->currvid > reqvid) { - pr_debug("ph1: curr 0x%x, req vid 0x%x\n", + dprintk("ph1: curr 0x%x, req vid 0x%x\n", data->currvid, reqvid); if (decrease_vid_code_by_step(data, reqvid, data->vidmvs)) return 1; @@ -365,7 +365,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, if (data->currvid == maxvid) { rvosteps = 0; } else { - pr_debug("ph1: changing vid for rvo, req 0x%x\n", + dprintk("ph1: changing vid for rvo, req 0x%x\n", data->currvid - 1); if (decrease_vid_code_by_step(data, data->currvid-1, 1)) return 1; @@ -382,7 +382,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, return 1; } - pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n", + dprintk("ph1 complete, currfid 0x%x, currvid 0x%x\n", data->currfid, data->currvid); return 0; @@ -400,7 +400,7 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) return 0; } - pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " + dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " "reqfid 0x%x\n", smp_processor_id(), data->currfid, data->currvid, reqfid); @@ -457,7 +457,7 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) return 1; } - pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n", + dprintk("ph2 complete, currfid 0x%x, currvid 0x%x\n", data->currfid, data->currvid); return 0; @@ -470,7 +470,7 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 savefid = data->currfid; u32 savereqvid = reqvid; - pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", + dprintk("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", smp_processor_id(), data->currfid, data->currvid); @@ -498,17 +498,17 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, return 1; if (savereqvid != data->currvid) { - pr_debug("ph3 failed, currvid 0x%x\n", data->currvid); + dprintk("ph3 failed, currvid 0x%x\n", data->currvid); return 1; } if (savefid != data->currfid) { - pr_debug("ph3 failed, currfid changed 0x%x\n", + dprintk("ph3 failed, currfid changed 0x%x\n", data->currfid); return 1; } - pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n", + dprintk("ph3 complete, currfid 0x%x, currvid 0x%x\n", data->currfid, data->currvid); return 0; @@ -707,7 +707,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, return -EIO; } - pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); + dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); data->powernow_table = powernow_table; if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) print_basics(data); @@ -717,7 +717,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, (pst[j].vid == data->currvid)) return 0; - pr_debug("currfid/vid do not match PST, ignoring\n"); + dprintk("currfid/vid do not match PST, ignoring\n"); return 0; } @@ -739,36 +739,36 @@ static int find_psb_table(struct powernow_k8_data *data) if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0) continue; - pr_debug("found PSB header at 0x%p\n", psb); + dprintk("found PSB header at 0x%p\n", psb); - pr_debug("table vers: 0x%x\n", psb->tableversion); + dprintk("table vers: 0x%x\n", psb->tableversion); if (psb->tableversion != PSB_VERSION_1_4) { printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n"); return -ENODEV; } - pr_debug("flags: 0x%x\n", psb->flags1); + dprintk("flags: 0x%x\n", psb->flags1); if (psb->flags1) { printk(KERN_ERR FW_BUG PFX "unknown flags\n"); return -ENODEV; } data->vstable = psb->vstable; - pr_debug("voltage stabilization time: %d(*20us)\n", + dprintk("voltage stabilization time: %d(*20us)\n", data->vstable); - pr_debug("flags2: 0x%x\n", psb->flags2); + dprintk("flags2: 0x%x\n", psb->flags2); data->rvo = psb->flags2 & 3; data->irt = ((psb->flags2) >> 2) & 3; mvs = ((psb->flags2) >> 4) & 3; data->vidmvs = 1 << mvs; data->batps = ((psb->flags2) >> 6) & 3; - pr_debug("ramp voltage offset: %d\n", data->rvo); - pr_debug("isochronous relief time: %d\n", data->irt); - pr_debug("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); + dprintk("ramp voltage offset: %d\n", data->rvo); + dprintk("isochronous relief time: %d\n", data->irt); + dprintk("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); - pr_debug("numpst: 0x%x\n", psb->num_tables); + dprintk("numpst: 0x%x\n", psb->num_tables); cpst = psb->num_tables; if ((psb->cpuid == 0x00000fc0) || (psb->cpuid == 0x00000fe0)) { @@ -783,13 +783,13 @@ static int find_psb_table(struct powernow_k8_data *data) } data->plllock = psb->plllocktime; - pr_debug("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); - pr_debug("maxfid: 0x%x\n", psb->maxfid); - pr_debug("maxvid: 0x%x\n", psb->maxvid); + dprintk("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); + dprintk("maxfid: 0x%x\n", psb->maxfid); + dprintk("maxvid: 0x%x\n", psb->maxvid); maxvid = psb->maxvid; data->numps = psb->numps; - pr_debug("numpstates: 0x%x\n", data->numps); + dprintk("numpstates: 0x%x\n", data->numps); return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid); } @@ -834,13 +834,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) u64 control, status; if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { - pr_debug("register performance failed: bad ACPI data\n"); + dprintk("register performance failed: bad ACPI data\n"); return -EIO; } /* verify the data contained in the ACPI structures */ if (data->acpi_data.state_count <= 1) { - pr_debug("No ACPI P-States\n"); + dprintk("No ACPI P-States\n"); goto err_out; } @@ -849,7 +849,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) || (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) { - pr_debug("Invalid control/status registers (%llx - %llx)\n", + dprintk("Invalid control/status registers (%x - %x)\n", control, status); goto err_out; } @@ -858,7 +858,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) * (data->acpi_data.state_count + 1)), GFP_KERNEL); if (!powernow_table) { - pr_debug("powernow_table memory alloc failure\n"); + dprintk("powernow_table memory alloc failure\n"); goto err_out; } @@ -928,7 +928,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, } rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); if (!(hi & HW_PSTATE_VALID_MASK)) { - pr_debug("invalid pstate %d, ignoring\n", index); + dprintk("invalid pstate %d, ignoring\n", index); invalidate_entry(powernow_table, i); continue; } @@ -968,7 +968,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, vid = (control >> VID_SHIFT) & VID_MASK; } - pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); + dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); index = fid | (vid<<8); powernow_table[i].index = index; @@ -978,7 +978,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, /* verify frequency is OK */ if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { - pr_debug("invalid freq %u kHz, ignoring\n", freq); + dprintk("invalid freq %u kHz, ignoring\n", freq); invalidate_entry(powernow_table, i); continue; } @@ -986,7 +986,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, /* verify voltage is OK - * BIOSs are using "off" to indicate invalid */ if (vid == VID_OFF) { - pr_debug("invalid vid %u, ignoring\n", vid); + dprintk("invalid vid %u, ignoring\n", vid); invalidate_entry(powernow_table, i); continue; } @@ -1047,7 +1047,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, int res, i; struct cpufreq_freqs freqs; - pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); + dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); /* fid/vid correctness check for k8 */ /* fid are the lower 8 bits of the index we stored into @@ -1057,18 +1057,18 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, fid = data->powernow_table[index].index & 0xFF; vid = (data->powernow_table[index].index & 0xFF00) >> 8; - pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); + dprintk("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); if (query_current_values_with_pending_wait(data)) return 1; if ((data->currvid == vid) && (data->currfid == fid)) { - pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n", + dprintk("target matches current values (fid 0x%x, vid 0x%x)\n", fid, vid); return 0; } - pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n", + dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n", smp_processor_id(), fid, vid); freqs.old = find_khz_freq_from_fid(data->currfid); freqs.new = find_khz_freq_from_fid(fid); @@ -1096,7 +1096,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, int res, i; struct cpufreq_freqs freqs; - pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); + dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); /* get MSR index for hardware pstate transition */ pstate = index & HW_PSTATE_MASK; @@ -1156,14 +1156,14 @@ static int powernowk8_target(struct cpufreq_policy *pol, goto err_out; } - pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", + dprintk("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", pol->cpu, targfreq, pol->min, pol->max, relation); if (query_current_values_with_pending_wait(data)) goto err_out; if (cpu_family != CPU_HW_PSTATE) { - pr_debug("targ: curr fid 0x%x, vid 0x%x\n", + dprintk("targ: curr fid 0x%x, vid 0x%x\n", data->currfid, data->currvid); if ((checkvid != data->currvid) || @@ -1319,7 +1319,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) data->currpstate); else pol->cur = find_khz_freq_from_fid(data->currfid); - pr_debug("policy current frequency %d kHz\n", pol->cur); + dprintk("policy current frequency %d kHz\n", pol->cur); /* min/max the cpu is capable of */ if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { @@ -1337,10 +1337,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); if (cpu_family == CPU_HW_PSTATE) - pr_debug("cpu_init done, current pstate 0x%x\n", + dprintk("cpu_init done, current pstate 0x%x\n", data->currpstate); else - pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", + dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n", data->currfid, data->currvid); per_cpu(powernow_data, pol->cpu) = data; @@ -1586,7 +1586,7 @@ static int __cpuinit powernowk8_init(void) /* driver entry point for term */ static void __exit powernowk8_exit(void) { - pr_debug("exit\n"); + dprintk("exit\n"); if (boot_cpu_has(X86_FEATURE_CPB)) { msrs_free(msrs); diff --git a/trunk/drivers/cpufreq/powernow-k8.h b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.h similarity index 98% rename from trunk/drivers/cpufreq/powernow-k8.h rename to trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index 3744d26cdc2b..df3529b1c02d 100644 --- a/trunk/drivers/cpufreq/powernow-k8.h +++ b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.h @@ -211,6 +211,8 @@ struct pst_s { u8 vid; }; +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k8", msg) + static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid, u32 regfid); static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid); diff --git a/trunk/drivers/cpufreq/sc520_freq.c b/trunk/arch/x86/kernel/cpu/cpufreq/sc520_freq.c similarity index 95% rename from trunk/drivers/cpufreq/sc520_freq.c rename to trunk/arch/x86/kernel/cpu/cpufreq/sc520_freq.c index 1e205e6b1727..435a996a613a 100644 --- a/trunk/drivers/cpufreq/sc520_freq.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/sc520_freq.c @@ -29,6 +29,8 @@ static __u8 __iomem *cpuctl; +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "sc520_freq", msg) #define PFX "sc520_freq: " static struct cpufreq_frequency_table sc520_freq_table[] = { @@ -64,7 +66,7 @@ static void sc520_freq_set_cpu_state(unsigned int state) cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - pr_debug("attempting to set frequency to %i kHz\n", + dprintk("attempting to set frequency to %i kHz\n", sc520_freq_table[state].frequency); local_irq_disable(); @@ -159,7 +161,7 @@ static int __init sc520_freq_init(void) /* Test if we have the right hardware */ if (c->x86_vendor != X86_VENDOR_AMD || c->x86 != 4 || c->x86_model != 9) { - pr_debug("no Elan SC520 processor found!\n"); + dprintk("no Elan SC520 processor found!\n"); return -ENODEV; } cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1); diff --git a/trunk/drivers/cpufreq/speedstep-centrino.c b/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c similarity index 96% rename from trunk/drivers/cpufreq/speedstep-centrino.c rename to trunk/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 6ea3455def21..9b1ff37de46a 100644 --- a/trunk/drivers/cpufreq/speedstep-centrino.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -29,6 +29,9 @@ #define PFX "speedstep-centrino: " #define MAINTAINER "cpufreq@vger.kernel.org" +#define dprintk(msg...) \ + cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) + #define INTEL_MSR_RANGE (0xffff) struct cpu_id @@ -241,7 +244,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) if (model->cpu_id == NULL) { /* No match at all */ - pr_debug("no support for CPU model \"%s\": " + dprintk("no support for CPU model \"%s\": " "send /proc/cpuinfo to " MAINTAINER "\n", cpu->x86_model_id); return -ENOENT; @@ -249,15 +252,15 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) if (model->op_points == NULL) { /* Matched a non-match */ - pr_debug("no table support for CPU model \"%s\"\n", + dprintk("no table support for CPU model \"%s\"\n", cpu->x86_model_id); - pr_debug("try using the acpi-cpufreq driver\n"); + dprintk("try using the acpi-cpufreq driver\n"); return -ENOENT; } per_cpu(centrino_model, policy->cpu) = model; - pr_debug("found \"%s\": max frequency: %dkHz\n", + dprintk("found \"%s\": max frequency: %dkHz\n", model->model_name, model->max_freq); return 0; @@ -366,7 +369,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; if (!per_cpu(centrino_cpu, policy->cpu)) { - pr_debug("found unsupported CPU with " + dprintk("found unsupported CPU with " "Enhanced SpeedStep: send /proc/cpuinfo to " MAINTAINER "\n"); return -ENODEV; @@ -382,7 +385,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; - pr_debug("trying to enable Enhanced SpeedStep (%x)\n", l); + dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); wrmsr(MSR_IA32_MISC_ENABLE, l, h); /* check to see if it stuck */ @@ -399,7 +402,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) /* 10uS transition latency */ policy->cur = freq; - pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur); + dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur); ret = cpufreq_frequency_table_cpuinfo(policy, per_cpu(centrino_model, policy->cpu)->op_points); @@ -495,7 +498,7 @@ static int centrino_target (struct cpufreq_policy *policy, good_cpu = j; if (good_cpu >= nr_cpu_ids) { - pr_debug("couldn't limit to CPUs in this domain\n"); + dprintk("couldn't limit to CPUs in this domain\n"); retval = -EAGAIN; if (first_cpu) { /* We haven't started the transition yet. */ @@ -509,7 +512,7 @@ static int centrino_target (struct cpufreq_policy *policy, if (first_cpu) { rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); if (msr == (oldmsr & 0xffff)) { - pr_debug("no change needed - msr was and needs " + dprintk("no change needed - msr was and needs " "to be %x\n", oldmsr); retval = 0; goto out; @@ -518,7 +521,7 @@ static int centrino_target (struct cpufreq_policy *policy, freqs.old = extract_clock(oldmsr, cpu, 0); freqs.new = extract_clock(msr, cpu, 0); - pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", + dprintk("target=%dkHz old=%d new=%d msr=%04x\n", target_freq, freqs.old, freqs.new, msr); for_each_cpu(k, policy->cpus) { diff --git a/trunk/drivers/cpufreq/speedstep-ich.c b/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c similarity index 92% rename from trunk/drivers/cpufreq/speedstep-ich.c rename to trunk/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index a748ce782fee..561758e95180 100644 --- a/trunk/drivers/cpufreq/speedstep-ich.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c @@ -53,6 +53,10 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { }; +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "speedstep-ich", msg) + + /** * speedstep_find_register - read the PMBASE address * @@ -76,7 +80,7 @@ static int speedstep_find_register(void) return -ENODEV; } - pr_debug("pmbase is 0x%x\n", pmbase); + dprintk("pmbase is 0x%x\n", pmbase); return 0; } @@ -102,13 +106,13 @@ static void speedstep_set_state(unsigned int state) /* read state */ value = inb(pmbase + 0x50); - pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); /* write new state */ value &= 0xFE; value |= state; - pr_debug("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); + dprintk("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); /* Disable bus master arbitration */ pm2_blk = inb(pmbase + 0x20); @@ -128,10 +132,10 @@ static void speedstep_set_state(unsigned int state) /* Enable IRQs */ local_irq_restore(flags); - pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); if (state == (value & 0x1)) - pr_debug("change to %u MHz succeeded\n", + dprintk("change to %u MHz succeeded\n", speedstep_get_frequency(speedstep_processor) / 1000); else printk(KERN_ERR "cpufreq: change failed - I/O error\n"); @@ -161,7 +165,7 @@ static int speedstep_activate(void) pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value); if (!(value & 0x08)) { value |= 0x08; - pr_debug("activating SpeedStep (TM) registers\n"); + dprintk("activating SpeedStep (TM) registers\n"); pci_write_config_word(speedstep_chipset_dev, 0x00A0, value); } @@ -214,7 +218,7 @@ static unsigned int speedstep_detect_chipset(void) return 2; /* 2-M */ if (hostbridge->revision < 5) { - pr_debug("hostbridge does not support speedstep\n"); + dprintk("hostbridge does not support speedstep\n"); speedstep_chipset_dev = NULL; pci_dev_put(hostbridge); return 0; @@ -242,7 +246,7 @@ static unsigned int speedstep_get(unsigned int cpu) if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) BUG(); - pr_debug("detected %u kHz as current frequency\n", speed); + dprintk("detected %u kHz as current frequency\n", speed); return speed; } @@ -272,7 +276,7 @@ static int speedstep_target(struct cpufreq_policy *policy, freqs.new = speedstep_freqs[newstate].frequency; freqs.cpu = policy->cpu; - pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); + dprintk("transiting from %u to %u kHz\n", freqs.old, freqs.new); /* no transition necessary */ if (freqs.old == freqs.new) @@ -347,7 +351,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) if (!speed) return -EIO; - pr_debug("currently at %s speed setting - %i MHz\n", + dprintk("currently at %s speed setting - %i MHz\n", (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", (speed / 1000)); @@ -401,14 +405,14 @@ static int __init speedstep_init(void) /* detect processor */ speedstep_processor = speedstep_detect_processor(); if (!speedstep_processor) { - pr_debug("Intel(R) SpeedStep(TM) capable processor " + dprintk("Intel(R) SpeedStep(TM) capable processor " "not found\n"); return -ENODEV; } /* detect chipset */ if (!speedstep_detect_chipset()) { - pr_debug("Intel(R) SpeedStep(TM) for this chipset not " + dprintk("Intel(R) SpeedStep(TM) for this chipset not " "(yet) available.\n"); return -ENODEV; } diff --git a/trunk/drivers/cpufreq/speedstep-lib.c b/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c similarity index 90% rename from trunk/drivers/cpufreq/speedstep-lib.c rename to trunk/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index 8af2d2fd9d51..a94ec6be69fa 100644 --- a/trunk/drivers/cpufreq/speedstep-lib.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c @@ -18,6 +18,9 @@ #include #include "speedstep-lib.h" +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "speedstep-lib", msg) + #define PFX "speedstep-lib: " #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK @@ -72,7 +75,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) /* read MSR 0x2a - we only need the low 32 bits */ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); - pr_debug("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); + dprintk("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); msr_tmp = msr_lo; /* decode the FSB */ @@ -86,7 +89,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) /* decode the multiplier */ if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) { - pr_debug("workaround for early PIIIs\n"); + dprintk("workaround for early PIIIs\n"); msr_lo &= 0x03c00000; } else msr_lo &= 0x0bc00000; @@ -97,7 +100,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) j++; } - pr_debug("speed is %u\n", + dprintk("speed is %u\n", (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100)); return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100; @@ -109,7 +112,7 @@ static unsigned int pentiumM_get_frequency(void) u32 msr_lo, msr_tmp; rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); - pr_debug("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); + dprintk("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); /* see table B-2 of 24547212.pdf */ if (msr_lo & 0x00040000) { @@ -119,7 +122,7 @@ static unsigned int pentiumM_get_frequency(void) } msr_tmp = (msr_lo >> 22) & 0x1f; - pr_debug("bits 22-26 are 0x%x, speed is %u\n", + dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * 100 * 1000)); return msr_tmp * 100 * 1000; @@ -157,11 +160,11 @@ static unsigned int pentium_core_get_frequency(void) } rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); - pr_debug("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", + dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); msr_tmp = (msr_lo >> 22) & 0x1f; - pr_debug("bits 22-26 are 0x%x, speed is %u\n", + dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * fsb)); ret = (msr_tmp * fsb); @@ -187,7 +190,7 @@ static unsigned int pentium4_get_frequency(void) rdmsr(0x2c, msr_lo, msr_hi); - pr_debug("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); + dprintk("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); /* decode the FSB: see IA-32 Intel (C) Architecture Software * Developer's Manual, Volume 3: System Prgramming Guide, @@ -214,7 +217,7 @@ static unsigned int pentium4_get_frequency(void) /* Multiplier. */ mult = msr_lo >> 24; - pr_debug("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", + dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", fsb, mult, (fsb * mult)); ret = (fsb * mult); @@ -254,7 +257,7 @@ unsigned int speedstep_detect_processor(void) struct cpuinfo_x86 *c = &cpu_data(0); u32 ebx, msr_lo, msr_hi; - pr_debug("x86: %x, model: %x\n", c->x86, c->x86_model); + dprintk("x86: %x, model: %x\n", c->x86, c->x86_model); if ((c->x86_vendor != X86_VENDOR_INTEL) || ((c->x86 != 6) && (c->x86 != 0xF))) @@ -269,7 +272,7 @@ unsigned int speedstep_detect_processor(void) ebx = cpuid_ebx(0x00000001); ebx &= 0x000000FF; - pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); + dprintk("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); switch (c->x86_mask) { case 4: @@ -324,7 +327,7 @@ unsigned int speedstep_detect_processor(void) /* cpuid_ebx(1) is 0x04 for desktop PIII, * 0x06 for mobile PIII-M */ ebx = cpuid_ebx(0x00000001); - pr_debug("ebx is %x\n", ebx); + dprintk("ebx is %x\n", ebx); ebx &= 0x000000FF; @@ -341,7 +344,7 @@ unsigned int speedstep_detect_processor(void) /* all mobile PIII Coppermines have FSB 100 MHz * ==> sort out a few desktop PIIIs. */ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); - pr_debug("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", + dprintk("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", msr_lo, msr_hi); msr_lo &= 0x00c0000; if (msr_lo != 0x0080000) @@ -354,12 +357,12 @@ unsigned int speedstep_detect_processor(void) * bit 56 or 57 is set */ rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi); - pr_debug("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", + dprintk("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", msr_lo, msr_hi); if ((msr_hi & (1<<18)) && (relaxed_check ? 1 : (msr_hi & (3<<24)))) { if (c->x86_mask == 0x01) { - pr_debug("early PIII version\n"); + dprintk("early PIII version\n"); return SPEEDSTEP_CPU_PIII_C_EARLY; } else return SPEEDSTEP_CPU_PIII_C; @@ -390,14 +393,14 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) return -EINVAL; - pr_debug("trying to determine both speeds\n"); + dprintk("trying to determine both speeds\n"); /* get current speed */ prev_speed = speedstep_get_frequency(processor); if (!prev_speed) return -EIO; - pr_debug("previous speed is %u\n", prev_speed); + dprintk("previous speed is %u\n", prev_speed); local_irq_save(flags); @@ -409,7 +412,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, goto out; } - pr_debug("low speed is %u\n", *low_speed); + dprintk("low speed is %u\n", *low_speed); /* start latency measurement */ if (transition_latency) @@ -428,7 +431,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, goto out; } - pr_debug("high speed is %u\n", *high_speed); + dprintk("high speed is %u\n", *high_speed); if (*low_speed == *high_speed) { ret = -ENODEV; @@ -442,7 +445,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, if (transition_latency) { *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + tv2.tv_usec - tv1.tv_usec; - pr_debug("transition latency is %u uSec\n", *transition_latency); + dprintk("transition latency is %u uSec\n", *transition_latency); /* convert uSec to nSec and add 20% for safety reasons */ *transition_latency *= 1200; diff --git a/trunk/drivers/cpufreq/speedstep-lib.h b/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h similarity index 100% rename from trunk/drivers/cpufreq/speedstep-lib.h rename to trunk/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h diff --git a/trunk/drivers/cpufreq/speedstep-smi.c b/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c similarity index 90% rename from trunk/drivers/cpufreq/speedstep-smi.c rename to trunk/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c index c76ead3490bf..91bc25b67bc1 100644 --- a/trunk/drivers/cpufreq/speedstep-smi.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c @@ -55,6 +55,9 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { * of DMA activity going on? */ #define SMI_TRIES 5 +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ + "speedstep-smi", msg) + /** * speedstep_smi_ownership */ @@ -67,7 +70,7 @@ static int speedstep_smi_ownership(void) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); magic = virt_to_phys(magic_data); - pr_debug("trying to obtain ownership with command %x at port %x\n", + dprintk("trying to obtain ownership with command %x at port %x\n", command, smi_port); __asm__ __volatile__( @@ -82,7 +85,7 @@ static int speedstep_smi_ownership(void) : "memory" ); - pr_debug("result is %x\n", result); + dprintk("result is %x\n", result); return result; } @@ -103,13 +106,13 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) u32 function = GET_SPEEDSTEP_FREQS; if (!(ist_info.event & 0xFFFF)) { - pr_debug("bug #1422 -- can't read freqs from BIOS\n"); + dprintk("bug #1422 -- can't read freqs from BIOS\n"); return -ENODEV; } command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - pr_debug("trying to determine frequencies with command %x at port %x\n", + dprintk("trying to determine frequencies with command %x at port %x\n", command, smi_port); __asm__ __volatile__( @@ -126,7 +129,7 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) "d" (smi_port), "S" (0), "D" (0) ); - pr_debug("result %x, low_freq %u, high_freq %u\n", + dprintk("result %x, low_freq %u, high_freq %u\n", result, low_mhz, high_mhz); /* abort if results are obviously incorrect... */ @@ -151,7 +154,7 @@ static int speedstep_get_state(void) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - pr_debug("trying to determine current setting with command %x " + dprintk("trying to determine current setting with command %x " "at port %x\n", command, smi_port); __asm__ __volatile__( @@ -165,7 +168,7 @@ static int speedstep_get_state(void) "d" (smi_port), "S" (0), "D" (0) ); - pr_debug("state is %x, result is %x\n", state, result); + dprintk("state is %x, result is %x\n", state, result); return state & 1; } @@ -191,13 +194,13 @@ static void speedstep_set_state(unsigned int state) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - pr_debug("trying to set frequency to state %u " + dprintk("trying to set frequency to state %u " "with command %x at port %x\n", state, command, smi_port); do { if (retry) { - pr_debug("retry %u, previous result %u, waiting...\n", + dprintk("retry %u, previous result %u, waiting...\n", retry, result); mdelay(retry * 50); } @@ -218,7 +221,7 @@ static void speedstep_set_state(unsigned int state) local_irq_restore(flags); if (new_state == state) - pr_debug("change to %u MHz succeeded after %u tries " + dprintk("change to %u MHz succeeded after %u tries " "with result %u\n", (speedstep_freqs[new_state].frequency / 1000), retry, result); @@ -289,7 +292,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) result = speedstep_smi_ownership(); if (result) { - pr_debug("fails in acquiring ownership of a SMI interface.\n"); + dprintk("fails in acquiring ownership of a SMI interface.\n"); return -EINVAL; } @@ -301,7 +304,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) if (result) { /* fall back to speedstep_lib.c dection mechanism: * try both states out */ - pr_debug("could not detect low and high frequencies " + dprintk("could not detect low and high frequencies " "by SMI call.\n"); result = speedstep_get_freqs(speedstep_processor, low, high, @@ -309,18 +312,18 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) &speedstep_set_state); if (result) { - pr_debug("could not detect two different speeds" + dprintk("could not detect two different speeds" " -- aborting.\n"); return result; } else - pr_debug("workaround worked.\n"); + dprintk("workaround worked.\n"); } /* get current speed setting */ state = speedstep_get_state(); speed = speedstep_freqs[state].frequency; - pr_debug("currently at %s speed setting - %i MHz\n", + dprintk("currently at %s speed setting - %i MHz\n", (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", (speed / 1000)); @@ -357,7 +360,7 @@ static int speedstep_resume(struct cpufreq_policy *policy) int result = speedstep_smi_ownership(); if (result) - pr_debug("fails in re-acquiring ownership of a SMI interface.\n"); + dprintk("fails in re-acquiring ownership of a SMI interface.\n"); return result; } @@ -400,12 +403,12 @@ static int __init speedstep_init(void) } if (!speedstep_processor) { - pr_debug("No supported Intel CPU detected.\n"); + dprintk("No supported Intel CPU detected.\n"); return -ENODEV; } - pr_debug("signature:0x%.8ulx, command:0x%.8ulx, " - "event:0x%.8ulx, perf_level:0x%.8ulx.\n", + dprintk("signature:0x%.8lx, command:0x%.8lx, " + "event:0x%.8lx, perf_level:0x%.8lx.\n", ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level); diff --git a/trunk/arch/x86/kernel/cpu/intel.c b/trunk/arch/x86/kernel/cpu/intel.c index fc73a34ba8c9..df86bc8c859d 100644 --- a/trunk/arch/x86/kernel/cpu/intel.c +++ b/trunk/arch/x86/kernel/cpu/intel.c @@ -29,10 +29,10 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { - u64 misc_enable; - /* Unmask CPUID levels if masked: */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { + u64 misc_enable; + rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { @@ -118,6 +118,8 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) * (model 2) with the same problem. */ if (c->x86 == 15) { + u64 misc_enable; + rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { @@ -128,19 +130,6 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) } } #endif - - /* - * If fast string is not enabled in IA32_MISC_ENABLE for any reason, - * clear the fast string and enhanced fast string CPU capabilities. - */ - if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { - rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); - if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { - printk(KERN_INFO "Disabled fast string operations\n"); - setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); - setup_clear_cpu_cap(X86_FEATURE_ERMS); - } - } } #ifdef CONFIG_X86_32 diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c index bb0adad35143..167f97b5596e 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -509,7 +509,6 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, out_free: if (b) { kobject_put(&b->kobj); - list_del(&b->miscj); kfree(b); } return err; diff --git a/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c b/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c index 0f034460260d..6f8c5e9da97f 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -446,20 +446,18 @@ void intel_init_thermal(struct cpuinfo_x86 *c) */ rdmsr(MSR_IA32_MISC_ENABLE, l, h); - h = lvtthmr_init; /* * The initial value of thermal LVT entries on all APs always reads * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI * sequence to them and LVT registers are reset to 0s except for * the mask bits which are set to 1s when APs receive INIT IPI. - * If BIOS takes over the thermal interrupt and sets its interrupt - * delivery mode to SMI (not fixed), it restores the value that the - * BIOS has programmed on AP based on BSP's info we saved since BIOS - * is always setting the same value for all threads/cores. + * Always restore the value that BIOS has programmed on AP based on + * BSP's info we saved since BIOS is always setting the same value + * for all threads/cores */ - if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED) - apic_write(APIC_LVTTHMR, lvtthmr_init); + apic_write(APIC_LVTTHMR, lvtthmr_init); + h = lvtthmr_init; if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { printk(KERN_DEBUG diff --git a/trunk/arch/x86/kernel/cpu/perf_event.c b/trunk/arch/x86/kernel/cpu/perf_event.c index 3a0338b4b179..632e5dc9c9c0 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.c +++ b/trunk/arch/x86/kernel/cpu/perf_event.c @@ -31,7 +31,6 @@ #include #include #include -#include #if 0 #undef wrmsrl @@ -364,18 +363,12 @@ x86_perf_event_update(struct perf_event *event) return new_raw_count; } +/* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */ static inline int x86_pmu_addr_offset(int index) { - int offset; - - /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */ - alternative_io(ASM_NOP2, - "shll $1, %%eax", - X86_FEATURE_PERFCTR_CORE, - "=a" (offset), - "a" (index)); - - return offset; + if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) + return index << 1; + return index; } static inline unsigned int x86_pmu_config_addr(int index) @@ -620,8 +613,8 @@ static int x86_setup_perfctr(struct perf_event *event) /* * Branch tracing: */ - if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && - !attr->freq && hwc->sample_period == 1) { + if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && + (hwc->sample_period == 1)) { /* BTS is not supported by this architecture. */ if (!x86_pmu.bts_active) return -EOPNOTSUPP; @@ -1295,16 +1288,6 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) cpuc = &__get_cpu_var(cpu_hw_events); - /* - * Some chipsets need to unmask the LVTPC in a particular spot - * inside the nmi handler. As a result, the unmasking was pushed - * into all the nmi handlers. - * - * This generic handler doesn't seem to have any issues where the - * unmasking occurs so it was left at the top. - */ - apic_write(APIC_LVTPC, APIC_DM_NMI); - for (idx = 0; idx < x86_pmu.num_counters; idx++) { if (!test_bit(idx, cpuc->active_mask)) { /* @@ -1391,6 +1374,8 @@ perf_event_nmi_handler(struct notifier_block *self, return NOTIFY_DONE; } + apic_write(APIC_LVTPC, APIC_DM_NMI); + handled = x86_pmu.handle_irq(args->regs); if (!handled) return NOTIFY_DONE; @@ -1773,6 +1758,17 @@ static struct pmu pmu = { * callchain support */ +static void +backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) +{ + /* Ignore warnings */ +} + +static void backtrace_warning(void *data, char *msg) +{ + /* Ignore warnings */ +} + static int backtrace_stack(void *data, char *name) { return 0; @@ -1786,6 +1782,8 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops backtrace_ops = { + .warning = backtrace_warning, + .warning_symbol = backtrace_warning_symbol, .stack = backtrace_stack, .address = backtrace_address, .walk_stack = print_context_stack_bp, diff --git a/trunk/arch/x86/kernel/cpu/perf_event_amd.c b/trunk/arch/x86/kernel/cpu/perf_event_amd.c index fe29c1d2219e..cf4e369cea67 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_amd.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_amd.c @@ -96,14 +96,12 @@ static __initconst const u64 amd_hw_cache_event_ids */ static const u64 amd_perfmon_event_map[] = { - [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, - [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, - [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, - [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, - [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ - [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ + [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, + [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, }; static u64 amd_pmu_event_map(int hw_event) diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c index 41178c826c48..43fa20b13817 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c @@ -25,7 +25,7 @@ struct intel_percore { /* * Intel PerfMon, used on Core and later. */ -static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = +static const u64 intel_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, @@ -36,7 +36,7 @@ static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, }; -static struct event_constraint intel_core_event_constraints[] __read_mostly = +static struct event_constraint intel_core_event_constraints[] = { INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ @@ -47,7 +47,7 @@ static struct event_constraint intel_core_event_constraints[] __read_mostly = EVENT_CONSTRAINT_END }; -static struct event_constraint intel_core2_event_constraints[] __read_mostly = +static struct event_constraint intel_core2_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -70,7 +70,7 @@ static struct event_constraint intel_core2_event_constraints[] __read_mostly = EVENT_CONSTRAINT_END }; -static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = +static struct event_constraint intel_nehalem_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -86,19 +86,19 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = EVENT_CONSTRAINT_END }; -static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = +static struct extra_reg intel_nehalem_extra_regs[] = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), EVENT_EXTRA_END }; -static struct event_constraint intel_nehalem_percore_constraints[] __read_mostly = +static struct event_constraint intel_nehalem_percore_constraints[] = { INTEL_EVENT_CONSTRAINT(0xb7, 0), EVENT_CONSTRAINT_END }; -static struct event_constraint intel_westmere_event_constraints[] __read_mostly = +static struct event_constraint intel_westmere_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -110,7 +110,7 @@ static struct event_constraint intel_westmere_event_constraints[] __read_mostly EVENT_CONSTRAINT_END }; -static struct event_constraint intel_snb_event_constraints[] __read_mostly = +static struct event_constraint intel_snb_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -123,21 +123,21 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = EVENT_CONSTRAINT_END }; -static struct extra_reg intel_westmere_extra_regs[] __read_mostly = +static struct extra_reg intel_westmere_extra_regs[] = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), EVENT_EXTRA_END }; -static struct event_constraint intel_westmere_percore_constraints[] __read_mostly = +static struct event_constraint intel_westmere_percore_constraints[] = { INTEL_EVENT_CONSTRAINT(0xb7, 0), INTEL_EVENT_CONSTRAINT(0xbb, 0), EVENT_CONSTRAINT_END }; -static struct event_constraint intel_gen_event_constraints[] __read_mostly = +static struct event_constraint intel_gen_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -184,23 +184,26 @@ static __initconst const u64 snb_hw_cache_event_ids }, }, [ C(LL ) ] = { + /* + * TBD: Need Off-core Response Performance Monitoring support + */ [ C(OP_READ) ] = { - /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ + /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01b7, + /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01bb, }, [ C(OP_WRITE) ] = { - /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ + /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01b7, + /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01bb, }, [ C(OP_PREFETCH) ] = { - /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ + /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01b7, + /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01bb, }, }, [ C(DTLB) ] = { @@ -282,26 +285,26 @@ static __initconst const u64 westmere_hw_cache_event_ids }, [ C(LL ) ] = { [ C(OP_READ) ] = { - /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ + /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01b7, + /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01bb, }, /* * Use RFO, not WRITEBACK, because a write miss would typically occur * on RFO. */ [ C(OP_WRITE) ] = { - /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ - [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ + /* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01bb, + /* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */ [ C(RESULT_MISS) ] = 0x01b7, }, [ C(OP_PREFETCH) ] = { - /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ + /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01b7, + /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01bb, }, }, [ C(DTLB) ] = { @@ -349,36 +352,16 @@ static __initconst const u64 westmere_hw_cache_event_ids }; /* - * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits; - * See IA32 SDM Vol 3B 30.6.1.3 + * OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3 */ -#define NHM_DMND_DATA_RD (1 << 0) -#define NHM_DMND_RFO (1 << 1) -#define NHM_DMND_IFETCH (1 << 2) -#define NHM_DMND_WB (1 << 3) -#define NHM_PF_DATA_RD (1 << 4) -#define NHM_PF_DATA_RFO (1 << 5) -#define NHM_PF_IFETCH (1 << 6) -#define NHM_OFFCORE_OTHER (1 << 7) -#define NHM_UNCORE_HIT (1 << 8) -#define NHM_OTHER_CORE_HIT_SNP (1 << 9) -#define NHM_OTHER_CORE_HITM (1 << 10) - /* reserved */ -#define NHM_REMOTE_CACHE_FWD (1 << 12) -#define NHM_REMOTE_DRAM (1 << 13) -#define NHM_LOCAL_DRAM (1 << 14) -#define NHM_NON_DRAM (1 << 15) - -#define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM) - -#define NHM_DMND_READ (NHM_DMND_DATA_RD) -#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB) -#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO) - -#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM) -#define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD) -#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS) +#define DMND_DATA_RD (1 << 0) +#define DMND_RFO (1 << 1) +#define DMND_WB (1 << 3) +#define PF_DATA_RD (1 << 4) +#define PF_DATA_RFO (1 << 5) +#define RESP_UNCORE_HIT (1 << 8) +#define RESP_MISS (0xf600) /* non uncore hit */ static __initconst const u64 nehalem_hw_cache_extra_regs [PERF_COUNT_HW_CACHE_MAX] @@ -387,16 +370,16 @@ static __initconst const u64 nehalem_hw_cache_extra_regs { [ C(LL ) ] = { [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS, - [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS, + [ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT, + [ C(RESULT_MISS) ] = DMND_DATA_RD|RESP_MISS, }, [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS, - [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS, + [ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT, + [ C(RESULT_MISS) ] = DMND_RFO|DMND_WB|RESP_MISS, }, [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, - [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS, + [ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT, + [ C(RESULT_MISS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS, }, } }; @@ -950,16 +933,6 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) cpuc = &__get_cpu_var(cpu_hw_events); - /* - * Some chipsets need to unmask the LVTPC in a particular spot - * inside the nmi handler. As a result, the unmasking was pushed - * into all the nmi handlers. - * - * This handler doesn't seem to have any issues with the unmasking - * so it was left at the top. - */ - apic_write(APIC_LVTPC, APIC_DM_NMI); - intel_pmu_disable_all(); handled = intel_pmu_drain_bts_buffer(); status = intel_pmu_get_status(); @@ -1025,9 +998,6 @@ intel_bts_constraints(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; unsigned int hw_event, bts_event; - if (event->attr.freq) - return NULL; - hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); @@ -1335,7 +1305,7 @@ static void intel_clovertown_quirks(void) * AJ106 could possibly be worked around by not allowing LBR * usage from PEBS, including the fixup. * AJ68 could possibly be worked around by always programming - * a pebs_event_reset[0] value and coping with the lost events. + * a pebs_event_reset[0] value and coping with the lost events. * * But taken together it might just make sense to not enable PEBS on * these chips. @@ -1439,23 +1409,6 @@ static __init int intel_pmu_init(void) x86_pmu.percore_constraints = intel_nehalem_percore_constraints; x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.extra_regs = intel_nehalem_extra_regs; - - /* UOPS_ISSUED.STALLED_CYCLES */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; - /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; - - if (ebx & 0x40) { - /* - * Erratum AAJ80 detected, we work it around by using - * the BR_MISP_EXEC.ANY event. This will over-count - * branch-misses, but it's still much better than the - * architectural event which is often completely bogus: - */ - intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; - - pr_cont("erratum AAJ80 worked around, "); - } pr_cont("Nehalem events, "); break; @@ -1485,12 +1438,6 @@ static __init int intel_pmu_init(void) x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; x86_pmu.extra_regs = intel_westmere_extra_regs; - - /* UOPS_ISSUED.STALLED_CYCLES */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; - /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; - pr_cont("Westmere events, "); break; @@ -1502,12 +1449,6 @@ static __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_events; - - /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; - /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1; - pr_cont("SandyBridge events, "); break; diff --git a/trunk/arch/x86/kernel/cpu/perf_event_p4.c b/trunk/arch/x86/kernel/cpu/perf_event_p4.c index ead584fb6a7d..d1f77e2934a1 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_p4.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_p4.c @@ -468,7 +468,7 @@ static struct p4_event_bind p4_event_bind_map[] = { .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED), .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, .escr_emask = - P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS), + P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_X87_ASSIST] = { @@ -912,7 +912,8 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) int idx, handled = 0; u64 val; - perf_sample_data_init(&data, 0); + data.addr = 0; + data.raw = NULL; cpuc = &__get_cpu_var(cpu_hw_events); @@ -949,20 +950,11 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) x86_pmu_stop(event, 0); } - if (handled) + if (handled) { + /* p4 quirk: unmask it again */ + apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); inc_irq_stat(apic_perf_irqs); - - /* - * When dealing with the unmasking of the LVTPC on P4 perf hw, it has - * been observed that the OVF bit flag has to be cleared first _before_ - * the LVTPC can be unmasked. - * - * The reason is the NMI line will continue to be asserted while the OVF - * bit is set. This causes a second NMI to generate if the LVTPC is - * unmasked before the OVF bit is cleared, leading to unknown NMI - * messages. - */ - apic_write(APIC_LVTPC, APIC_DM_NMI); + } return handled; } @@ -1196,7 +1188,7 @@ static __init int p4_pmu_init(void) { unsigned int low, high; - /* If we get stripped -- indexing fails */ + /* If we get stripped -- indexig fails */ BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC); rdmsr(MSR_IA32_MISC_ENABLE, low, high); diff --git a/trunk/arch/x86/kernel/devicetree.c b/trunk/arch/x86/kernel/devicetree.c index e90f08458e6b..706a9fb46a58 100644 --- a/trunk/arch/x86/kernel/devicetree.c +++ b/trunk/arch/x86/kernel/devicetree.c @@ -391,7 +391,7 @@ static int ioapic_xlate(struct irq_domain *id, const u32 *intspec, u32 intsize, set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity); - return io_apic_setup_irq_pin_once(*out_hwirq, cpu_to_node(0), &attr); + return io_apic_setup_irq_pin(*out_hwirq, cpu_to_node(0), &attr); } static void __init ioapic_add_ofnode(struct device_node *np) diff --git a/trunk/arch/x86/kernel/dumpstack.c b/trunk/arch/x86/kernel/dumpstack.c index f478ff6877ef..e2a3f0606da4 100644 --- a/trunk/arch/x86/kernel/dumpstack.c +++ b/trunk/arch/x86/kernel/dumpstack.c @@ -135,6 +135,20 @@ print_context_stack_bp(struct thread_info *tinfo, } EXPORT_SYMBOL_GPL(print_context_stack_bp); + +static void +print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) +{ + printk(data); + print_symbol(msg, symbol); + printk("\n"); +} + +static void print_trace_warning(void *data, char *msg) +{ + printk("%s%s\n", (char *)data, msg); +} + static int print_trace_stack(void *data, char *name) { printk("%s <%s> ", (char *)data, name); @@ -152,6 +166,8 @@ static void print_trace_address(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops print_trace_ops = { + .warning = print_trace_warning, + .warning_symbol = print_trace_warning_symbol, .stack = print_trace_stack, .address = print_trace_address, .walk_stack = print_context_stack, diff --git a/trunk/arch/x86/kernel/hpet.c b/trunk/arch/x86/kernel/hpet.c index 6781765b3a0d..bfe8f729e086 100644 --- a/trunk/arch/x86/kernel/hpet.c +++ b/trunk/arch/x86/kernel/hpet.c @@ -217,7 +217,7 @@ static void hpet_reserve_platform_timers(unsigned int id) { } /* * Common hpet info */ -static unsigned long hpet_freq; +static unsigned long hpet_period; static void hpet_legacy_set_mode(enum clock_event_mode mode, struct clock_event_device *evt); @@ -232,6 +232,7 @@ static struct clock_event_device hpet_clockevent = { .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_mode = hpet_legacy_set_mode, .set_next_event = hpet_legacy_next_event, + .shift = 32, .irq = 0, .rating = 50, }; @@ -288,13 +289,29 @@ static void hpet_legacy_clockevent_register(void) /* Start HPET legacy interrupts */ hpet_enable_legacy_int(); + /* + * The mult factor is defined as (include/linux/clockchips.h) + * mult/2^shift = cyc/ns (in contrast to ns/cyc in clocksource.h) + * hpet_period is in units of femtoseconds (per cycle), so + * mult/2^shift = cyc/ns = 10^6/hpet_period + * mult = (10^6 * 2^shift)/hpet_period + * mult = (FSEC_PER_NSEC << hpet_clockevent.shift)/hpet_period + */ + hpet_clockevent.mult = div_sc((unsigned long) FSEC_PER_NSEC, + hpet_period, hpet_clockevent.shift); + /* Calculate the min / max delta */ + hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, + &hpet_clockevent); + /* Setup minimum reprogramming delta. */ + hpet_clockevent.min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, + &hpet_clockevent); + /* * Start hpet with the boot cpu mask and make it * global after the IO_APIC has been initialized. */ hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); - clockevents_config_and_register(&hpet_clockevent, hpet_freq, - HPET_MIN_PROG_DELTA, 0x7FFFFFFF); + clockevents_register_device(&hpet_clockevent); global_clock_event = &hpet_clockevent; printk(KERN_DEBUG "hpet clockevent registered\n"); } @@ -532,6 +549,7 @@ static int hpet_setup_irq(struct hpet_dev *dev) static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) { struct clock_event_device *evt = &hdev->evt; + uint64_t hpet_freq; WARN_ON(cpu != smp_processor_id()); if (!(hdev->flags & HPET_DEV_VALID)) @@ -553,10 +571,24 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) evt->set_mode = hpet_msi_set_mode; evt->set_next_event = hpet_msi_next_event; - evt->cpumask = cpumask_of(hdev->cpu); + evt->shift = 32; - clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA, - 0x7FFFFFFF); + /* + * The period is a femto seconds value. We need to calculate the + * scaled math multiplication factor for nanosecond to hpet tick + * conversion. + */ + hpet_freq = FSEC_PER_SEC; + do_div(hpet_freq, hpet_period); + evt->mult = div_sc((unsigned long) hpet_freq, + NSEC_PER_SEC, evt->shift); + /* Calculate the max delta */ + evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt); + /* 5 usec minimum reprogramming delta. */ + evt->min_delta_ns = 5000; + + evt->cpumask = cpumask_of(hdev->cpu); + clockevents_register_device(evt); } #ifdef CONFIG_HPET @@ -760,6 +792,7 @@ static struct clocksource clocksource_hpet = { static int hpet_clocksource_register(void) { u64 start, now; + u64 hpet_freq; cycle_t t1; /* Start the counter */ @@ -786,7 +819,24 @@ static int hpet_clocksource_register(void) return -ENODEV; } + /* + * The definition of mult is (include/linux/clocksource.h) + * mult/2^shift = ns/cyc and hpet_period is in units of fsec/cyc + * so we first need to convert hpet_period to ns/cyc units: + * mult/2^shift = ns/cyc = hpet_period/10^6 + * mult = (hpet_period * 2^shift)/10^6 + * mult = (hpet_period << shift)/FSEC_PER_NSEC + */ + + /* Need to convert hpet_period (fsec/cyc) to cyc/sec: + * + * cyc/sec = FSEC_PER_SEC/hpet_period(fsec/cyc) + * cyc/sec = (FSEC_PER_NSEC * NSEC_PER_SEC)/hpet_period + */ + hpet_freq = FSEC_PER_SEC; + do_div(hpet_freq, hpet_period); clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); + return 0; } @@ -795,9 +845,7 @@ static int hpet_clocksource_register(void) */ int __init hpet_enable(void) { - unsigned long hpet_period; unsigned int id; - u64 freq; int i; if (!is_hpet_capable()) @@ -835,14 +883,6 @@ int __init hpet_enable(void) if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) goto out_nohpet; - /* - * The period is a femto seconds value. Convert it to a - * frequency. - */ - freq = FSEC_PER_SEC; - do_div(freq, hpet_period); - hpet_freq = freq; - /* * Read the HPET ID register to retrieve the IRQ routing * information and the number of channels diff --git a/trunk/arch/x86/kernel/i8253.c b/trunk/arch/x86/kernel/i8253.c index fb66dc9e36cb..2dfd31597443 100644 --- a/trunk/arch/x86/kernel/i8253.c +++ b/trunk/arch/x86/kernel/i8253.c @@ -93,6 +93,7 @@ static struct clock_event_device pit_ce = { .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_mode = init_pit_timer, .set_next_event = pit_next_event, + .shift = 32, .irq = 0, }; @@ -107,12 +108,90 @@ void __init setup_pit_timer(void) * IO_APIC has been initialized. */ pit_ce.cpumask = cpumask_of(smp_processor_id()); + pit_ce.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, pit_ce.shift); + pit_ce.max_delta_ns = clockevent_delta2ns(0x7FFF, &pit_ce); + pit_ce.min_delta_ns = clockevent_delta2ns(0xF, &pit_ce); - clockevents_config_and_register(&pit_ce, CLOCK_TICK_RATE, 0xF, 0x7FFF); + clockevents_register_device(&pit_ce); global_clock_event = &pit_ce; } #ifndef CONFIG_X86_64 +/* + * Since the PIT overflows every tick, its not very useful + * to just read by itself. So use jiffies to emulate a free + * running counter: + */ +static cycle_t pit_read(struct clocksource *cs) +{ + static int old_count; + static u32 old_jifs; + unsigned long flags; + int count; + u32 jifs; + + raw_spin_lock_irqsave(&i8253_lock, flags); + /* + * Although our caller may have the read side of xtime_lock, + * this is now a seqlock, and we are cheating in this routine + * by having side effects on state that we cannot undo if + * there is a collision on the seqlock and our caller has to + * retry. (Namely, old_jifs and old_count.) So we must treat + * jiffies as volatile despite the lock. We read jiffies + * before latching the timer count to guarantee that although + * the jiffies value might be older than the count (that is, + * the counter may underflow between the last point where + * jiffies was incremented and the point where we latch the + * count), it cannot be newer. + */ + jifs = jiffies; + outb_pit(0x00, PIT_MODE); /* latch the count ASAP */ + count = inb_pit(PIT_CH0); /* read the latched count */ + count |= inb_pit(PIT_CH0) << 8; + + /* VIA686a test code... reset the latch if count > max + 1 */ + if (count > LATCH) { + outb_pit(0x34, PIT_MODE); + outb_pit(LATCH & 0xff, PIT_CH0); + outb_pit(LATCH >> 8, PIT_CH0); + count = LATCH - 1; + } + + /* + * It's possible for count to appear to go the wrong way for a + * couple of reasons: + * + * 1. The timer counter underflows, but we haven't handled the + * resulting interrupt and incremented jiffies yet. + * 2. Hardware problem with the timer, not giving us continuous time, + * the counter does small "jumps" upwards on some Pentium systems, + * (see c't 95/10 page 335 for Neptun bug.) + * + * Previous attempts to handle these cases intelligently were + * buggy, so we just do the simple thing now. + */ + if (count > old_count && jifs == old_jifs) + count = old_count; + + old_count = count; + old_jifs = jifs; + + raw_spin_unlock_irqrestore(&i8253_lock, flags); + + count = (LATCH - 1) - count; + + return (cycle_t)(jifs * LATCH) + count; +} + +static struct clocksource pit_cs = { + .name = "pit", + .rating = 110, + .read = pit_read, + .mask = CLOCKSOURCE_MASK(32), + .mult = 0, + .shift = 20, +}; + static int __init init_pit_clocksource(void) { /* @@ -126,7 +205,10 @@ static int __init init_pit_clocksource(void) pit_ce.mode != CLOCK_EVT_MODE_PERIODIC) return 0; - return clocksource_i8253_init(); + pit_cs.mult = clocksource_hz2mult(CLOCK_TICK_RATE, pit_cs.shift); + + return clocksource_register(&pit_cs); } arch_initcall(init_pit_clocksource); + #endif /* !CONFIG_X86_64 */ diff --git a/trunk/arch/x86/kernel/kprobes.c b/trunk/arch/x86/kernel/kprobes.c index f1a6244d7d93..c969fd9d1566 100644 --- a/trunk/arch/x86/kernel/kprobes.c +++ b/trunk/arch/x86/kernel/kprobes.c @@ -1183,13 +1183,12 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long flags; /* This is possible if op is under delayed unoptimizing */ if (kprobe_disabled(&op->kp)) return; - local_irq_save(flags); + preempt_disable(); if (kprobe_running()) { kprobes_inc_nmissed_count(&op->kp); } else { @@ -1208,7 +1207,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, opt_pre_handler(&op->kp, regs); __this_cpu_write(current_kprobe, NULL); } - local_irq_restore(flags); + preempt_enable_no_resched(); } static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) diff --git a/trunk/arch/x86/kernel/kvmclock.c b/trunk/arch/x86/kernel/kvmclock.c index 6389a6bca11b..f98d3eafe07a 100644 --- a/trunk/arch/x86/kernel/kvmclock.c +++ b/trunk/arch/x86/kernel/kvmclock.c @@ -26,6 +26,8 @@ #include #include +#define KVM_SCALE 22 + static int kvmclock = 1; static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME; static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK; @@ -118,6 +120,8 @@ static struct clocksource kvm_clock = { .read = kvm_clock_get_cycles, .rating = 400, .mask = CLOCKSOURCE_MASK(64), + .mult = 1 << KVM_SCALE, + .shift = KVM_SCALE, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -199,7 +203,7 @@ void __init kvmclock_init(void) machine_ops.crash_shutdown = kvm_crash_shutdown; #endif kvm_get_preset_lpj(); - clocksource_register_hz(&kvm_clock, NSEC_PER_SEC); + clocksource_register(&kvm_clock); pv_info.paravirt_enabled = 1; pv_info.name = "KVM"; diff --git a/trunk/arch/x86/kernel/module.c b/trunk/arch/x86/kernel/module.c index 52f256f2cc81..ab23f1ad4bf1 100644 --- a/trunk/arch/x86/kernel/module.c +++ b/trunk/arch/x86/kernel/module.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include diff --git a/trunk/arch/x86/kernel/amd_gart_64.c b/trunk/arch/x86/kernel/pci-gart_64.c similarity index 100% rename from trunk/arch/x86/kernel/amd_gart_64.c rename to trunk/arch/x86/kernel/pci-gart_64.c diff --git a/trunk/arch/x86/kernel/pci-iommu_table.c b/trunk/arch/x86/kernel/pci-iommu_table.c index 35ccf75696eb..55d745ec1181 100644 --- a/trunk/arch/x86/kernel/pci-iommu_table.c +++ b/trunk/arch/x86/kernel/pci-iommu_table.c @@ -50,14 +50,20 @@ void __init check_iommu_entries(struct iommu_table_entry *start, struct iommu_table_entry *finish) { struct iommu_table_entry *p, *q, *x; + char sym_p[KSYM_SYMBOL_LEN]; + char sym_q[KSYM_SYMBOL_LEN]; /* Simple cyclic dependency checker. */ for (p = start; p < finish; p++) { q = find_dependents_of(start, finish, p); x = find_dependents_of(start, finish, q); if (p == x) { - printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n", - p->detect, q->detect); + sprint_symbol(sym_p, (unsigned long)p->detect); + sprint_symbol(sym_q, (unsigned long)q->detect); + + printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %s depends" \ + " on %s and vice-versa. BREAKING IT.\n", + sym_p, sym_q); /* Heavy handed way..*/ x->depend = 0; } @@ -66,8 +72,12 @@ void __init check_iommu_entries(struct iommu_table_entry *start, for (p = start; p < finish; p++) { q = find_dependents_of(p, finish, p); if (q && q > p) { - printk(KERN_ERR "EXECUTION ORDER INVALID! %pS should be called before %pS!\n", - p->detect, q->detect); + sprint_symbol(sym_p, (unsigned long)p->detect); + sprint_symbol(sym_q, (unsigned long)q->detect); + + printk(KERN_ERR "EXECUTION ORDER INVALID! %s "\ + "should be called before %s!\n", + sym_p, sym_q); } } } diff --git a/trunk/arch/x86/kernel/ptrace.c b/trunk/arch/x86/kernel/ptrace.c index f65e5b521dbd..45892dc4b72a 100644 --- a/trunk/arch/x86/kernel/ptrace.c +++ b/trunk/arch/x86/kernel/ptrace.c @@ -608,9 +608,6 @@ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) unsigned len, type; struct perf_event *bp; - if (ptrace_get_breakpoints(tsk) < 0) - return -ESRCH; - data &= ~DR_CONTROL_RESERVED; old_dr7 = ptrace_get_dr7(thread->ptrace_bps); restore: @@ -658,9 +655,6 @@ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) } goto restore; } - - ptrace_put_breakpoints(tsk); - return ((orig_ret < 0) ? orig_ret : rc); } @@ -674,17 +668,10 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) if (n < HBP_NUM) { struct perf_event *bp; - - if (ptrace_get_breakpoints(tsk) < 0) - return -ESRCH; - bp = thread->ptrace_bps[n]; if (!bp) - val = 0; - else - val = bp->hw.info.address; - - ptrace_put_breakpoints(tsk); + return 0; + val = bp->hw.info.address; } else if (n == 6) { val = thread->debugreg6; } else if (n == 7) { @@ -699,10 +686,6 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, struct perf_event *bp; struct thread_struct *t = &tsk->thread; struct perf_event_attr attr; - int err = 0; - - if (ptrace_get_breakpoints(tsk) < 0) - return -ESRCH; if (!t->ptrace_bps[nr]) { ptrace_breakpoint_init(&attr); @@ -726,23 +709,24 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, * writing for the user. And anyway this is the previous * behaviour. */ - if (IS_ERR(bp)) { - err = PTR_ERR(bp); - goto put; - } + if (IS_ERR(bp)) + return PTR_ERR(bp); t->ptrace_bps[nr] = bp; } else { + int err; + bp = t->ptrace_bps[nr]; attr = bp->attr; attr.bp_addr = addr; err = modify_user_hw_breakpoint(bp, &attr); + if (err) + return err; } -put: - ptrace_put_breakpoints(tsk); - return err; + + return 0; } /* diff --git a/trunk/arch/x86/kernel/reboot_32.S b/trunk/arch/x86/kernel/reboot_32.S index 1d5c46df0d78..29092b38d816 100644 --- a/trunk/arch/x86/kernel/reboot_32.S +++ b/trunk/arch/x86/kernel/reboot_32.S @@ -21,26 +21,26 @@ r_base = . /* Get our own relocated address */ call 1f 1: popl %ebx - subl $(1b - r_base), %ebx + subl $1b, %ebx /* Compute the equivalent real-mode segment */ movl %ebx, %ecx shrl $4, %ecx /* Patch post-real-mode segment jump */ - movw (dispatch_table - r_base)(%ebx,%eax,2),%ax - movw %ax, (101f - r_base)(%ebx) - movw %cx, (102f - r_base)(%ebx) + movw dispatch_table(%ebx,%eax,2),%ax + movw %ax, 101f(%ebx) + movw %cx, 102f(%ebx) /* Set up the IDT for real mode. */ - lidtl (machine_real_restart_idt - r_base)(%ebx) + lidtl machine_real_restart_idt(%ebx) /* * Set up a GDT from which we can load segment descriptors for real * mode. The GDT is not used in real mode; it is just needed here to * prepare the descriptors. */ - lgdtl (machine_real_restart_gdt - r_base)(%ebx) + lgdtl machine_real_restart_gdt(%ebx) /* * Load the data segment registers with 16-bit compatible values diff --git a/trunk/arch/x86/kernel/smp.c b/trunk/arch/x86/kernel/smp.c index 013e7eba83bb..513deac7228d 100644 --- a/trunk/arch/x86/kernel/smp.c +++ b/trunk/arch/x86/kernel/smp.c @@ -194,13 +194,14 @@ static void native_stop_other_cpus(int wait) } /* - * Reschedule call back. + * Reschedule call back. Nothing to do, + * all the work is done automatically when + * we return from the interrupt. */ void smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); inc_irq_stat(irq_resched_count); - scheduler_ipi(); /* * KVM uses this interrupt to force a cpu out of guest mode */ diff --git a/trunk/arch/x86/kernel/stacktrace.c b/trunk/arch/x86/kernel/stacktrace.c index 55d9bc03f696..6515733a289d 100644 --- a/trunk/arch/x86/kernel/stacktrace.c +++ b/trunk/arch/x86/kernel/stacktrace.c @@ -9,6 +9,15 @@ #include #include +static void save_stack_warning(void *data, char *msg) +{ +} + +static void +save_stack_warning_symbol(void *data, char *msg, unsigned long symbol) +{ +} + static int save_stack_stack(void *data, char *name) { return 0; @@ -44,12 +53,16 @@ save_stack_address_nosched(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops save_stack_ops = { + .warning = save_stack_warning, + .warning_symbol = save_stack_warning_symbol, .stack = save_stack_stack, .address = save_stack_address, .walk_stack = print_context_stack, }; static const struct stacktrace_ops save_stack_ops_nosched = { + .warning = save_stack_warning, + .warning_symbol = save_stack_warning_symbol, .stack = save_stack_stack, .address = save_stack_address_nosched, .walk_stack = print_context_stack, diff --git a/trunk/arch/x86/kernel/x86_init.c b/trunk/arch/x86/kernel/x86_init.c index 75ef4b18e9b7..c11514e9128b 100644 --- a/trunk/arch/x86/kernel/x86_init.c +++ b/trunk/arch/x86/kernel/x86_init.c @@ -61,10 +61,6 @@ struct x86_init_ops x86_init __initdata = { .banner = default_banner, }, - .mapping = { - .pagetable_reserve = native_pagetable_reserve, - }, - .paging = { .pagetable_setup_start = native_pagetable_setup_start, .pagetable_setup_done = native_pagetable_setup_done, diff --git a/trunk/arch/x86/lguest/boot.c b/trunk/arch/x86/lguest/boot.c index e191c096ab90..1cd608973ce5 100644 --- a/trunk/arch/x86/lguest/boot.c +++ b/trunk/arch/x86/lguest/boot.c @@ -7,7 +7,7 @@ * kernel and insert a module (lg.ko) which allows us to run other Linux * kernels the same way we'd run processes. We call the first kernel the Host, * and the others the Guests. The program which sets up and configures Guests - * (such as the example in Documentation/virtual/lguest/lguest.c) is called the + * (such as the example in Documentation/lguest/lguest.c) is called the * Launcher. * * Secondly, we only run specially modified Guests, not normal kernels: setting @@ -913,6 +913,8 @@ static struct clocksource lguest_clock = { .rating = 200, .read = lguest_clock_read, .mask = CLOCKSOURCE_MASK(64), + .mult = 1 << 22, + .shift = 22, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -995,7 +997,7 @@ static void lguest_time_init(void) /* Set up the timer interrupt (0) to go to our simple timer routine */ irq_set_handler(0, lguest_time_irq); - clocksource_register_hz(&lguest_clock, NSEC_PER_SEC); + clocksource_register(&lguest_clock); /* We can't set cpumask in the initializer: damn C limitations! Set it * here and register our timer device. */ diff --git a/trunk/arch/x86/lib/clear_page_64.S b/trunk/arch/x86/lib/clear_page_64.S index f2145cfa12a6..aa4326bfb24a 100644 --- a/trunk/arch/x86/lib/clear_page_64.S +++ b/trunk/arch/x86/lib/clear_page_64.S @@ -1,6 +1,5 @@ #include #include -#include /* * Zero a page. @@ -15,15 +14,6 @@ ENTRY(clear_page_c) CFI_ENDPROC ENDPROC(clear_page_c) -ENTRY(clear_page_c_e) - CFI_STARTPROC - movl $4096,%ecx - xorl %eax,%eax - rep stosb - ret - CFI_ENDPROC -ENDPROC(clear_page_c_e) - ENTRY(clear_page) CFI_STARTPROC xorl %eax,%eax @@ -48,26 +38,21 @@ ENTRY(clear_page) .Lclear_page_end: ENDPROC(clear_page) - /* - * Some CPUs support enhanced REP MOVSB/STOSB instructions. - * It is recommended to use this when possible. - * If enhanced REP MOVSB/STOSB is not available, try to use fast string. - * Otherwise, use original function. - * - */ + /* Some CPUs run faster using the string instructions. + It is also a lot simpler. Use this when possible */ #include .section .altinstr_replacement,"ax" 1: .byte 0xeb /* jmp */ .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */ -2: .byte 0xeb /* jmp */ - .byte (clear_page_c_e - clear_page) - (3f - 2b) /* offset */ -3: +2: .previous .section .altinstructions,"a" - altinstruction_entry clear_page,1b,X86_FEATURE_REP_GOOD,\ - .Lclear_page_end-clear_page, 2b-1b - altinstruction_entry clear_page,2b,X86_FEATURE_ERMS, \ - .Lclear_page_end-clear_page,3b-2b + .align 8 + .quad clear_page + .quad 1b + .word X86_FEATURE_REP_GOOD + .byte .Lclear_page_end - clear_page + .byte 2b - 1b .previous diff --git a/trunk/arch/x86/lib/copy_user_64.S b/trunk/arch/x86/lib/copy_user_64.S index 024840266ba0..99e482615195 100644 --- a/trunk/arch/x86/lib/copy_user_64.S +++ b/trunk/arch/x86/lib/copy_user_64.S @@ -15,30 +15,23 @@ #include #include #include -#include -/* - * By placing feature2 after feature1 in altinstructions section, we logically - * implement: - * If CPU has feature2, jmp to alt2 is used - * else if CPU has feature1, jmp to alt1 is used - * else jmp to orig is used. - */ - .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2 + .macro ALTERNATIVE_JUMP feature,orig,alt 0: .byte 0xe9 /* 32bit jump */ .long \orig-1f /* by default jump to orig */ 1: .section .altinstr_replacement,"ax" 2: .byte 0xe9 /* near jump with 32bit immediate */ - .long \alt1-1b /* offset */ /* or alternatively to alt1 */ -3: .byte 0xe9 /* near jump with 32bit immediate */ - .long \alt2-1b /* offset */ /* or alternatively to alt2 */ + .long \alt-1b /* offset */ /* or alternatively to alt */ .previous - .section .altinstructions,"a" - altinstruction_entry 0b,2b,\feature1,5,5 - altinstruction_entry 0b,3b,\feature2,5,5 + .align 8 + .quad 0b + .quad 2b + .word \feature /* when feature is set */ + .byte 5 + .byte 5 .previous .endm @@ -79,10 +72,8 @@ ENTRY(_copy_to_user) addq %rdx,%rcx jc bad_to_user cmpq TI_addr_limit(%rax),%rcx - ja bad_to_user - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ - copy_user_generic_unrolled,copy_user_generic_string, \ - copy_user_enhanced_fast_string + jae bad_to_user + ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string CFI_ENDPROC ENDPROC(_copy_to_user) @@ -94,10 +85,8 @@ ENTRY(_copy_from_user) addq %rdx,%rcx jc bad_from_user cmpq TI_addr_limit(%rax),%rcx - ja bad_from_user - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ - copy_user_generic_unrolled,copy_user_generic_string, \ - copy_user_enhanced_fast_string + jae bad_from_user + ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string CFI_ENDPROC ENDPROC(_copy_from_user) @@ -266,37 +255,3 @@ ENTRY(copy_user_generic_string) .previous CFI_ENDPROC ENDPROC(copy_user_generic_string) - -/* - * Some CPUs are adding enhanced REP MOVSB/STOSB instructions. - * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled. - * - * Input: - * rdi destination - * rsi source - * rdx count - * - * Output: - * eax uncopied bytes or 0 if successful. - */ -ENTRY(copy_user_enhanced_fast_string) - CFI_STARTPROC - andl %edx,%edx - jz 2f - movl %edx,%ecx -1: rep - movsb -2: xorl %eax,%eax - ret - - .section .fixup,"ax" -12: movl %ecx,%edx /* ecx is zerorest also */ - jmp copy_user_handle_tail - .previous - - .section __ex_table,"a" - .align 8 - .quad 1b,12b - .previous - CFI_ENDPROC -ENDPROC(copy_user_enhanced_fast_string) diff --git a/trunk/arch/x86/lib/memcpy_64.S b/trunk/arch/x86/lib/memcpy_64.S index daab21dae2d1..75ef61e35e38 100644 --- a/trunk/arch/x86/lib/memcpy_64.S +++ b/trunk/arch/x86/lib/memcpy_64.S @@ -4,7 +4,6 @@ #include #include -#include /* * memcpy - Copy a memory block. @@ -38,23 +37,6 @@ .Lmemcpy_e: .previous -/* - * memcpy_c_e() - enhanced fast string memcpy. This is faster and simpler than - * memcpy_c. Use memcpy_c_e when possible. - * - * This gets patched over the unrolled variant (below) via the - * alternative instructions framework: - */ - .section .altinstr_replacement, "ax", @progbits -.Lmemcpy_c_e: - movq %rdi, %rax - - movl %edx, %ecx - rep movsb - ret -.Lmemcpy_e_e: - .previous - ENTRY(__memcpy) ENTRY(memcpy) CFI_STARTPROC @@ -189,22 +171,21 @@ ENDPROC(memcpy) ENDPROC(__memcpy) /* - * Some CPUs are adding enhanced REP MOVSB/STOSB feature - * If the feature is supported, memcpy_c_e() is the first choice. - * If enhanced rep movsb copy is not available, use fast string copy - * memcpy_c() when possible. This is faster and code is simpler than - * original memcpy(). - * Otherwise, original memcpy() is used. - * In .altinstructions section, ERMS feature is placed after REG_GOOD - * feature to implement the right patch order. - * + * Some CPUs run faster using the string copy instructions. + * It is also a lot simpler. Use this when possible: + */ + + .section .altinstructions, "a" + .align 8 + .quad memcpy + .quad .Lmemcpy_c + .word X86_FEATURE_REP_GOOD + + /* * Replace only beginning, memcpy is used to apply alternatives, * so it is silly to overwrite itself with nops - reboot is the * only outcome... */ - .section .altinstructions, "a" - altinstruction_entry memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\ - .Lmemcpy_e-.Lmemcpy_c,.Lmemcpy_e-.Lmemcpy_c - altinstruction_entry memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \ - .Lmemcpy_e_e-.Lmemcpy_c_e,.Lmemcpy_e_e-.Lmemcpy_c_e + .byte .Lmemcpy_e - .Lmemcpy_c + .byte .Lmemcpy_e - .Lmemcpy_c .previous diff --git a/trunk/arch/x86/lib/memmove_64.S b/trunk/arch/x86/lib/memmove_64.S index d0ec9c2936d7..0ecb8433e5a8 100644 --- a/trunk/arch/x86/lib/memmove_64.S +++ b/trunk/arch/x86/lib/memmove_64.S @@ -8,7 +8,6 @@ #define _STRING_C #include #include -#include #undef memmove @@ -25,7 +24,6 @@ */ ENTRY(memmove) CFI_STARTPROC - /* Handle more 32bytes in loop */ mov %rdi, %rax cmp $0x20, %rdx @@ -33,13 +31,8 @@ ENTRY(memmove) /* Decide forward/backward copy mode */ cmp %rdi, %rsi - jge .Lmemmove_begin_forward - mov %rsi, %r8 - add %rdx, %r8 - cmp %rdi, %r8 - jg 2f + jb 2f -.Lmemmove_begin_forward: /* * movsq instruction have many startup latency * so we handle small size by general register. @@ -85,8 +78,6 @@ ENTRY(memmove) rep movsq movq %r11, (%r10) jmp 13f -.Lmemmove_end_forward: - /* * Handle data backward by movsq. */ @@ -203,22 +194,4 @@ ENTRY(memmove) 13: retq CFI_ENDPROC - - .section .altinstr_replacement,"ax" -.Lmemmove_begin_forward_efs: - /* Forward moving data. */ - movq %rdx, %rcx - rep movsb - retq -.Lmemmove_end_forward_efs: - .previous - - .section .altinstructions,"a" - .align 8 - .quad .Lmemmove_begin_forward - .quad .Lmemmove_begin_forward_efs - .word X86_FEATURE_ERMS - .byte .Lmemmove_end_forward-.Lmemmove_begin_forward - .byte .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs - .previous ENDPROC(memmove) diff --git a/trunk/arch/x86/lib/memset_64.S b/trunk/arch/x86/lib/memset_64.S index 79bd454b78a3..09d344269652 100644 --- a/trunk/arch/x86/lib/memset_64.S +++ b/trunk/arch/x86/lib/memset_64.S @@ -2,13 +2,9 @@ #include #include -#include -#include /* - * ISO C memset - set a memory block to a byte value. This function uses fast - * string to get better performance than the original function. The code is - * simpler and shorter than the orignal function as well. + * ISO C memset - set a memory block to a byte value. * * rdi destination * rsi value (char) @@ -35,28 +31,6 @@ .Lmemset_e: .previous -/* - * ISO C memset - set a memory block to a byte value. This function uses - * enhanced rep stosb to override the fast string function. - * The code is simpler and shorter than the fast string function as well. - * - * rdi destination - * rsi value (char) - * rdx count (bytes) - * - * rax original destination - */ - .section .altinstr_replacement, "ax", @progbits -.Lmemset_c_e: - movq %rdi,%r9 - movb %sil,%al - movl %edx,%ecx - rep stosb - movq %r9,%rax - ret -.Lmemset_e_e: - .previous - ENTRY(memset) ENTRY(__memset) CFI_STARTPROC @@ -138,20 +112,16 @@ ENTRY(__memset) ENDPROC(memset) ENDPROC(__memset) - /* Some CPUs support enhanced REP MOVSB/STOSB feature. - * It is recommended to use this when possible. - * - * If enhanced REP MOVSB/STOSB feature is not available, use fast string - * instructions. - * - * Otherwise, use original memset function. - * - * In .altinstructions section, ERMS feature is placed after REG_GOOD - * feature to implement the right patch order. - */ + /* Some CPUs run faster using the string instructions. + It is also a lot simpler. Use this when possible */ + +#include + .section .altinstructions,"a" - altinstruction_entry memset,.Lmemset_c,X86_FEATURE_REP_GOOD,\ - .Lfinal-memset,.Lmemset_e-.Lmemset_c - altinstruction_entry memset,.Lmemset_c_e,X86_FEATURE_ERMS, \ - .Lfinal-memset,.Lmemset_e_e-.Lmemset_c_e + .align 8 + .quad memset + .quad .Lmemset_c + .word X86_FEATURE_REP_GOOD + .byte .Lfinal - memset + .byte .Lmemset_e - .Lmemset_c .previous diff --git a/trunk/arch/x86/mm/init.c b/trunk/arch/x86/mm/init.c index 37b8b0fe8320..286d289b039b 100644 --- a/trunk/arch/x86/mm/init.c +++ b/trunk/arch/x86/mm/init.c @@ -81,11 +81,6 @@ static void __init find_early_table_space(unsigned long end, int use_pse, end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); } -void __init native_pagetable_reserve(u64 start, u64 end) -{ - memblock_x86_reserve_range(start, end, "PGTABLE"); -} - struct map_range { unsigned long start; unsigned long end; @@ -277,24 +272,9 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, __flush_tlb_all(); - /* - * Reserve the kernel pagetable pages we used (pgt_buf_start - - * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) - * so that they can be reused for other purposes. - * - * On native it just means calling memblock_x86_reserve_range, on Xen it - * also means marking RW the pagetable pages that we allocated before - * but that haven't been used. - * - * In fact on xen we mark RO the whole range pgt_buf_start - - * pgt_buf_top, because we have to make sure that when - * init_memory_mapping reaches the pagetable pages area, it maps - * RO all the pagetable pages, including the ones that are beyond - * pgt_buf_end at that time. - */ if (!after_bootmem && pgt_buf_end > pgt_buf_start) - x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start), - PFN_PHYS(pgt_buf_end)); + memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT, + pgt_buf_end << PAGE_SHIFT, "PGTABLE"); if (!after_bootmem) early_memtest(start, end); diff --git a/trunk/arch/x86/mm/numa_64.c b/trunk/arch/x86/mm/numa_64.c index 85b52fc03084..e8c00cc72033 100644 --- a/trunk/arch/x86/mm/numa_64.c +++ b/trunk/arch/x86/mm/numa_64.c @@ -306,7 +306,7 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi) bi->end = min(bi->end, high); /* and there's no empty block */ - if (bi->start >= bi->end) { + if (bi->start == bi->end) { numa_remove_memblk_from(i--, mi); continue; } diff --git a/trunk/arch/x86/oprofile/backtrace.c b/trunk/arch/x86/oprofile/backtrace.c index a5b64ab4cd6e..2d49d4e19a36 100644 --- a/trunk/arch/x86/oprofile/backtrace.c +++ b/trunk/arch/x86/oprofile/backtrace.c @@ -16,6 +16,17 @@ #include #include +static void backtrace_warning_symbol(void *data, char *msg, + unsigned long symbol) +{ + /* Ignore warnings */ +} + +static void backtrace_warning(void *data, char *msg) +{ + /* Ignore warnings */ +} + static int backtrace_stack(void *data, char *name) { /* Yes, we want all stacks */ @@ -31,6 +42,8 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) } static struct stacktrace_ops backtrace_ops = { + .warning = backtrace_warning, + .warning_symbol = backtrace_warning_symbol, .stack = backtrace_stack, .address = backtrace_address, .walk_stack = print_context_stack, diff --git a/trunk/arch/x86/pci/xen.c b/trunk/arch/x86/pci/xen.c index 8214724ce54d..e37b407a0ee8 100644 --- a/trunk/arch/x86/pci/xen.c +++ b/trunk/arch/x86/pci/xen.c @@ -108,8 +108,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) } irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, (type == PCI_CAP_ID_MSIX) ? - "msi-x" : "msi", - DOMID_SELF); + "msi-x" : "msi"); if (irq < 0) goto error; dev_dbg(&dev->dev, @@ -149,8 +148,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, (type == PCI_CAP_ID_MSIX) ? "pcifront-msi-x" : - "pcifront-msi", - DOMID_SELF); + "pcifront-msi"); if (irq < 0) goto free; i++; @@ -192,16 +190,9 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) list_for_each_entry(msidesc, &dev->msi_list, list) { struct physdev_map_pirq map_irq; - domid_t domid; - - domid = ret = xen_find_device_domain_owner(dev); - /* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED, - * hence check ret value for < 0. */ - if (ret < 0) - domid = DOMID_SELF; memset(&map_irq, 0, sizeof(map_irq)); - map_irq.domid = domid; + map_irq.domid = DOMID_SELF; map_irq.type = MAP_PIRQ_TYPE_MSI; map_irq.index = -1; map_irq.pirq = -1; @@ -224,16 +215,14 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); if (ret) { - dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n", - ret, domid); + dev_warn(&dev->dev, "xen map irq failed %d\n", ret); goto out; } ret = xen_bind_pirq_msi_to_irq(dev, msidesc, map_irq.pirq, map_irq.index, (type == PCI_CAP_ID_MSIX) ? - "msi-x" : "msi", - domid); + "msi-x" : "msi"); if (ret < 0) goto out; } @@ -472,78 +461,3 @@ void __init xen_setup_pirqs(void) } } #endif - -#ifdef CONFIG_XEN_DOM0 -struct xen_device_domain_owner { - domid_t domain; - struct pci_dev *dev; - struct list_head list; -}; - -static DEFINE_SPINLOCK(dev_domain_list_spinlock); -static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list); - -static struct xen_device_domain_owner *find_device(struct pci_dev *dev) -{ - struct xen_device_domain_owner *owner; - - list_for_each_entry(owner, &dev_domain_list, list) { - if (owner->dev == dev) - return owner; - } - return NULL; -} - -int xen_find_device_domain_owner(struct pci_dev *dev) -{ - struct xen_device_domain_owner *owner; - int domain = -ENODEV; - - spin_lock(&dev_domain_list_spinlock); - owner = find_device(dev); - if (owner) - domain = owner->domain; - spin_unlock(&dev_domain_list_spinlock); - return domain; -} -EXPORT_SYMBOL_GPL(xen_find_device_domain_owner); - -int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain) -{ - struct xen_device_domain_owner *owner; - - owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL); - if (!owner) - return -ENODEV; - - spin_lock(&dev_domain_list_spinlock); - if (find_device(dev)) { - spin_unlock(&dev_domain_list_spinlock); - kfree(owner); - return -EEXIST; - } - owner->domain = domain; - owner->dev = dev; - list_add_tail(&owner->list, &dev_domain_list); - spin_unlock(&dev_domain_list_spinlock); - return 0; -} -EXPORT_SYMBOL_GPL(xen_register_device_domain_owner); - -int xen_unregister_device_domain_owner(struct pci_dev *dev) -{ - struct xen_device_domain_owner *owner; - - spin_lock(&dev_domain_list_spinlock); - owner = find_device(dev); - if (!owner) { - spin_unlock(&dev_domain_list_spinlock); - return -ENODEV; - } - list_del(&owner->list); - spin_unlock(&dev_domain_list_spinlock); - kfree(owner); - return 0; -} -EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner); -#endif diff --git a/trunk/arch/x86/platform/ce4100/falconfalls.dts b/trunk/arch/x86/platform/ce4100/falconfalls.dts index e70be38ce039..2d6d226f2b10 100644 --- a/trunk/arch/x86/platform/ce4100/falconfalls.dts +++ b/trunk/arch/x86/platform/ce4100/falconfalls.dts @@ -347,7 +347,7 @@ "pciclass0c03"; reg = <0x16800 0x0 0x0 0x0 0x0>; - interrupts = <22 1>; + interrupts = <22 3>; }; usb@d,1 { @@ -357,7 +357,7 @@ "pciclass0c03"; reg = <0x16900 0x0 0x0 0x0 0x0>; - interrupts = <22 1>; + interrupts = <22 3>; }; sata@e,0 { @@ -367,7 +367,7 @@ "pciclass0106"; reg = <0x17000 0x0 0x0 0x0 0x0>; - interrupts = <23 1>; + interrupts = <23 3>; }; flash@f,0 { diff --git a/trunk/arch/x86/platform/uv/tlb_uv.c b/trunk/arch/x86/platform/uv/tlb_uv.c index c58e0ea39ef5..7cb6424317f6 100644 --- a/trunk/arch/x86/platform/uv/tlb_uv.c +++ b/trunk/arch/x86/platform/uv/tlb_uv.c @@ -699,17 +699,16 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va, unsigned int cpu) { + int tcpu; + int uvhub; int locals = 0; int remotes = 0; int hubs = 0; - int tcpu; - int tpnode; struct bau_desc *bau_desc; struct cpumask *flush_mask; struct ptc_stats *stat; struct bau_control *bcp; struct bau_control *tbcp; - struct hub_and_pnode *hpp; /* kernel was booted 'nobau' */ if (nobau) @@ -751,18 +750,11 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); + /* cpu statistics */ for_each_cpu(tcpu, flush_mask) { - /* - * The distribution vector is a bit map of pnodes, relative - * to the partition base pnode (and the partition base nasid - * in the header). - * Translate cpu to pnode and hub using an array stored - * in local memory. - */ - hpp = &bcp->socket_master->target_hub_and_pnode[tcpu]; - tpnode = hpp->pnode - bcp->partition_base_pnode; - bau_uvhub_set(tpnode, &bau_desc->distribution); - if (hpp->uvhub == bcp->uvhub) + uvhub = uv_cpu_to_blade_id(tcpu); + bau_uvhub_set(uvhub, &bau_desc->distribution); + if (uvhub == bcp->uvhub) locals++; else remotes++; @@ -863,7 +855,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) * an interrupt, but causes an error message to be returned to * the sender. */ -static void __init uv_enable_timeouts(void) +static void uv_enable_timeouts(void) { int uvhub; int nuvhubs; @@ -1334,10 +1326,10 @@ static int __init uv_ptc_init(void) } /* - * Initialize the sending side's sending buffers. + * initialize the sending side's sending buffers */ static void -uv_activation_descriptor_init(int node, int pnode, int base_pnode) +uv_activation_descriptor_init(int node, int pnode) { int i; int cpu; @@ -1360,11 +1352,11 @@ uv_activation_descriptor_init(int node, int pnode, int base_pnode) n = pa >> uv_nshift; m = pa & uv_mmask; - /* the 14-bit pnode */ uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, (n << UV_DESC_BASE_PNODE_SHIFT | m)); + /* - * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each + * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each * cpu even though we only use the first one; one descriptor can * describe a broadcast to 256 uv hubs. */ @@ -1373,13 +1365,12 @@ uv_activation_descriptor_init(int node, int pnode, int base_pnode) memset(bd2, 0, sizeof(struct bau_desc)); bd2->header.sw_ack_flag = 1; /* - * The base_dest_nasid set in the message header is the nasid - * of the first uvhub in the partition. The bit map will - * indicate destination pnode numbers relative to that base. - * They may not be consecutive if nasid striding is being used. + * base_dest_nodeid is the nasid of the first uvhub + * in the partition. The bit map will indicate uvhub numbers, + * which are 0-N in a partition. Pnodes are unique system-wide. */ - bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode); - bd2->header.dest_subnodeid = UV_LB_SUBNODEID; + bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); + bd2->header.dest_subnodeid = 0x10; /* the LB */ bd2->header.command = UV_NET_ENDPOINT_INTD; bd2->header.int_both = 1; /* @@ -1451,7 +1442,7 @@ uv_payload_queue_init(int node, int pnode) /* * Initialization of each UV hub's structures */ -static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode) +static void __init uv_init_uvhub(int uvhub, int vector) { int node; int pnode; @@ -1459,11 +1450,11 @@ static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode) node = uvhub_to_first_node(uvhub); pnode = uv_blade_to_pnode(uvhub); - uv_activation_descriptor_init(node, pnode, base_pnode); + uv_activation_descriptor_init(node, pnode); uv_payload_queue_init(node, pnode); /* - * The below initialization can't be in firmware because the - * messaging IRQ will be determined by the OS. + * the below initialization can't be in firmware because the + * messaging IRQ will be determined by the OS */ apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, @@ -1500,11 +1491,10 @@ calculate_destination_timeout(void) /* * initialize the bau_control structure for each cpu */ -static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode) +static int __init uv_init_per_cpu(int nuvhubs) { int i; int cpu; - int tcpu; int pnode; int uvhub; int have_hmaster; @@ -1538,15 +1528,6 @@ static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode) bcp = &per_cpu(bau_control, cpu); memset(bcp, 0, sizeof(struct bau_control)); pnode = uv_cpu_hub_info(cpu)->pnode; - if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) { - printk(KERN_EMERG - "cpu %d pnode %d-%d beyond %d; BAU disabled\n", - cpu, pnode, base_part_pnode, - UV_DISTRIBUTION_SIZE); - return 1; - } - bcp->osnode = cpu_to_node(cpu); - bcp->partition_base_pnode = uv_partition_base_pnode; uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); bdp = &uvhub_descs[uvhub]; @@ -1555,7 +1536,7 @@ static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode) bdp->pnode = pnode; /* kludge: 'assuming' one node per socket, and assuming that disabling a socket just leaves a gap in node numbers */ - socket = bcp->osnode & 1; + socket = (cpu_to_node(cpu) & 1); bdp->socket_mask |= (1 << socket); sdp = &bdp->socket[socket]; sdp->cpu_number[sdp->num_cpus] = cpu; @@ -1604,20 +1585,6 @@ static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode) nextsocket: socket++; socket_mask = (socket_mask >> 1); - /* each socket gets a local array of pnodes/hubs */ - bcp = smaster; - bcp->target_hub_and_pnode = kmalloc_node( - sizeof(struct hub_and_pnode) * - num_possible_cpus(), GFP_KERNEL, bcp->osnode); - memset(bcp->target_hub_and_pnode, 0, - sizeof(struct hub_and_pnode) * - num_possible_cpus()); - for_each_present_cpu(tcpu) { - bcp->target_hub_and_pnode[tcpu].pnode = - uv_cpu_hub_info(tcpu)->pnode; - bcp->target_hub_and_pnode[tcpu].uvhub = - uv_cpu_hub_info(tcpu)->numa_blade_id; - } } } kfree(uvhub_descs); @@ -1670,22 +1637,21 @@ static int __init uv_bau_init(void) spin_lock_init(&disable_lock); congested_cycles = microsec_2_cycles(congested_response_us); + if (uv_init_per_cpu(nuvhubs)) { + nobau = 1; + return 0; + } + uv_partition_base_pnode = 0x7fffffff; - for (uvhub = 0; uvhub < nuvhubs; uvhub++) { + for (uvhub = 0; uvhub < nuvhubs; uvhub++) if (uv_blade_nr_possible_cpus(uvhub) && (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) uv_partition_base_pnode = uv_blade_to_pnode(uvhub); - } - - if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) { - nobau = 1; - return 0; - } vector = UV_BAU_MESSAGE; for_each_possible_blade(uvhub) if (uv_blade_nr_possible_cpus(uvhub)) - uv_init_uvhub(uvhub, vector, uv_partition_base_pnode); + uv_init_uvhub(uvhub, vector); uv_enable_timeouts(); alloc_intr_gate(vector, uv_bau_message_intr1); diff --git a/trunk/arch/x86/platform/uv/uv_time.c b/trunk/arch/x86/platform/uv/uv_time.c index 0eb90184515f..9daf5d1af9f1 100644 --- a/trunk/arch/x86/platform/uv/uv_time.c +++ b/trunk/arch/x86/platform/uv/uv_time.c @@ -40,6 +40,7 @@ static struct clocksource clocksource_uv = { .rating = 400, .read = uv_read_rtc, .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK, + .shift = 10, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -371,11 +372,14 @@ static __init int uv_rtc_setup_clock(void) if (!is_uv_system()) return -ENODEV; + clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, + clocksource_uv.shift); + /* If single blade, prefer tsc */ if (uv_num_possible_blades() == 1) clocksource_uv.rating = 250; - rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second); + rc = clocksource_register(&clocksource_uv); if (rc) printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc); else diff --git a/trunk/arch/x86/xen/enlighten.c b/trunk/arch/x86/xen/enlighten.c index dd7b88f2ec7a..e3c6a06cf725 100644 --- a/trunk/arch/x86/xen/enlighten.c +++ b/trunk/arch/x86/xen/enlighten.c @@ -235,7 +235,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, *dx &= maskedx; } -static void __init xen_init_cpuid_mask(void) +static __init void xen_init_cpuid_mask(void) { unsigned int ax, bx, cx, dx; unsigned int xsave_mask; @@ -400,7 +400,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr) /* * load_gdt for early boot, when the gdt is only mapped once */ -static void __init xen_load_gdt_boot(const struct desc_ptr *dtr) +static __init void xen_load_gdt_boot(const struct desc_ptr *dtr) { unsigned long va = dtr->address; unsigned int size = dtr->size + 1; @@ -662,7 +662,7 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry, * Version of write_gdt_entry for use at early boot-time needed to * update an entry as simply as possible. */ -static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, +static __init void xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, const void *desc, int type) { switch (type) { @@ -933,18 +933,18 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, return ret; } -static const struct pv_info xen_info __initconst = { +static const struct pv_info xen_info __initdata = { .paravirt_enabled = 1, .shared_kernel_pmd = 0, .name = "Xen", }; -static const struct pv_init_ops xen_init_ops __initconst = { +static const struct pv_init_ops xen_init_ops __initdata = { .patch = xen_patch, }; -static const struct pv_cpu_ops xen_cpu_ops __initconst = { +static const struct pv_cpu_ops xen_cpu_ops __initdata = { .cpuid = xen_cpuid, .set_debugreg = xen_set_debugreg, @@ -1004,7 +1004,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .end_context_switch = xen_end_context_switch, }; -static const struct pv_apic_ops xen_apic_ops __initconst = { +static const struct pv_apic_ops xen_apic_ops __initdata = { #ifdef CONFIG_X86_LOCAL_APIC .startup_ipi_hook = paravirt_nop, #endif @@ -1055,7 +1055,7 @@ int xen_panic_handler_init(void) return 0; } -static const struct machine_ops xen_machine_ops __initconst = { +static const struct machine_ops __initdata xen_machine_ops = { .restart = xen_restart, .halt = xen_machine_halt, .power_off = xen_machine_halt, @@ -1332,7 +1332,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = { +static struct notifier_block __cpuinitdata xen_hvm_cpu_notifier = { .notifier_call = xen_hvm_cpu_notify, }; @@ -1381,7 +1381,7 @@ bool xen_hvm_need_lapic(void) } EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); -const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = { +const __refconst struct hypervisor_x86 x86_hyper_xen_hvm = { .name = "Xen HVM", .detect = xen_hvm_platform, .init_platform = xen_hvm_guest_init, diff --git a/trunk/arch/x86/xen/irq.c b/trunk/arch/x86/xen/irq.c index 8bbb465b6f0a..6a6fe8939645 100644 --- a/trunk/arch/x86/xen/irq.c +++ b/trunk/arch/x86/xen/irq.c @@ -113,7 +113,7 @@ static void xen_halt(void) xen_safe_halt(); } -static const struct pv_irq_ops xen_irq_ops __initconst = { +static const struct pv_irq_ops xen_irq_ops __initdata = { .save_fl = PV_CALLEE_SAVE(xen_save_fl), .restore_fl = PV_CALLEE_SAVE(xen_restore_fl), .irq_disable = PV_CALLEE_SAVE(xen_irq_disable), diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c index 02d752460371..aef7af92b28b 100644 --- a/trunk/arch/x86/xen/mmu.c +++ b/trunk/arch/x86/xen/mmu.c @@ -1054,7 +1054,7 @@ void xen_mm_pin_all(void) * that's before we have page structures to store the bits. So do all * the book-keeping now. */ -static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page, +static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page, enum pt_level level) { SetPagePinned(page); @@ -1187,7 +1187,7 @@ static void drop_other_mm_ref(void *info) active_mm = percpu_read(cpu_tlbstate.active_mm); - if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK) + if (active_mm == mm) leave_mm(smp_processor_id()); /* If this cpu still has a stale cr3 reference, then make sure @@ -1271,27 +1271,13 @@ void xen_exit_mmap(struct mm_struct *mm) spin_unlock(&mm->page_table_lock); } -static void __init xen_pagetable_setup_start(pgd_t *base) +static __init void xen_pagetable_setup_start(pgd_t *base) { } -static __init void xen_mapping_pagetable_reserve(u64 start, u64 end) -{ - /* reserve the range used */ - native_pagetable_reserve(start, end); - - /* set as RW the rest */ - printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end, - PFN_PHYS(pgt_buf_top)); - while (end < PFN_PHYS(pgt_buf_top)) { - make_lowmem_page_readwrite(__va(end)); - end += PAGE_SIZE; - } -} - static void xen_post_allocator_init(void); -static void __init xen_pagetable_setup_done(pgd_t *base) +static __init void xen_pagetable_setup_done(pgd_t *base) { xen_setup_shared_info(); xen_post_allocator_init(); @@ -1488,7 +1474,7 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) } #ifdef CONFIG_X86_32 -static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) +static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) { /* If there's an existing pte, then don't allow _PAGE_RW to be set */ if (pte_val_ma(*ptep) & _PAGE_PRESENT) @@ -1498,7 +1484,7 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) return pte; } #else /* CONFIG_X86_64 */ -static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) +static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) { unsigned long pfn = pte_pfn(pte); @@ -1509,7 +1495,7 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) * it is RO. */ if (((!is_early_ioremap_ptep(ptep) && - pfn >= pgt_buf_start && pfn < pgt_buf_top)) || + pfn >= pgt_buf_start && pfn < pgt_buf_end)) || (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1))) pte = pte_wrprotect(pte); @@ -1519,7 +1505,7 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) /* Init-time set_pte while constructing initial pagetables, which doesn't allow RO pagetable pages to be remapped RW */ -static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) +static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) { pte = mask_rw_pte(ptep, pte); @@ -1537,7 +1523,7 @@ static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) /* Early in boot, while setting up the initial pagetable, assume everything is pinned. */ -static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) +static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) { #ifdef CONFIG_FLATMEM BUG_ON(mem_map); /* should only be used early */ @@ -1547,7 +1533,7 @@ static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) } /* Used for pmd and pud */ -static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) +static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) { #ifdef CONFIG_FLATMEM BUG_ON(mem_map); /* should only be used early */ @@ -1557,13 +1543,13 @@ static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) /* Early release_pte assumes that all pts are pinned, since there's only init_mm and anything attached to that is pinned. */ -static void __init xen_release_pte_init(unsigned long pfn) +static __init void xen_release_pte_init(unsigned long pfn) { pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); } -static void __init xen_release_pmd_init(unsigned long pfn) +static __init void xen_release_pmd_init(unsigned long pfn) { make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); } @@ -1689,7 +1675,7 @@ static void set_page_prot(void *addr, pgprot_t prot) BUG(); } -static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) +static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) { unsigned pmdidx, pteidx; unsigned ident_pte; @@ -1772,7 +1758,7 @@ static void convert_pfn_mfn(void *v) * of the physical mapping once some sort of allocator has been set * up. */ -pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, +__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) { pud_t *l3; @@ -1843,7 +1829,7 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); -static void __init xen_write_cr3_init(unsigned long cr3) +static __init void xen_write_cr3_init(unsigned long cr3) { unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); @@ -1880,7 +1866,7 @@ static void __init xen_write_cr3_init(unsigned long cr3) pv_mmu_ops.write_cr3 = &xen_write_cr3; } -pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, +__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) { pmd_t *kernel_pmd; @@ -1986,7 +1972,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) #endif } -void __init xen_ident_map_ISA(void) +__init void xen_ident_map_ISA(void) { unsigned long pa; @@ -2009,7 +1995,7 @@ void __init xen_ident_map_ISA(void) xen_flush_tlb(); } -static void __init xen_post_allocator_init(void) +static __init void xen_post_allocator_init(void) { #ifdef CONFIG_XEN_DEBUG pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); @@ -2046,7 +2032,7 @@ static void xen_leave_lazy_mmu(void) preempt_enable(); } -static const struct pv_mmu_ops xen_mmu_ops __initconst = { +static const struct pv_mmu_ops xen_mmu_ops __initdata = { .read_cr2 = xen_read_cr2, .write_cr2 = xen_write_cr2, @@ -2119,7 +2105,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { void __init xen_init_mmu_ops(void) { - x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve; x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; pv_mmu_ops = xen_mmu_ops; diff --git a/trunk/arch/x86/xen/p2m.c b/trunk/arch/x86/xen/p2m.c index 58efeb9d5440..141eb0de8b06 100644 --- a/trunk/arch/x86/xen/p2m.c +++ b/trunk/arch/x86/xen/p2m.c @@ -522,20 +522,11 @@ static bool __init __early_alloc_p2m(unsigned long pfn) /* Boundary cross-over for the edges: */ if (idx) { unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); - unsigned long *mid_mfn_p; p2m_init(p2m); p2m_top[topidx][mididx] = p2m; - /* For save/restore we need to MFN of the P2M saved */ - - mid_mfn_p = p2m_top_mfn_p[topidx]; - WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), - "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", - topidx, mididx); - mid_mfn_p[mididx] = virt_to_mfn(p2m); - } return idx != 0; } @@ -558,29 +549,12 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s, pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); - unsigned long *mid_mfn_p; - unsigned long **mid; - - mid = p2m_top[topidx]; - mid_mfn_p = p2m_top_mfn_p[topidx]; - if (mid == p2m_mid_missing) { - mid = extend_brk(PAGE_SIZE, PAGE_SIZE); + if (p2m_top[topidx] == p2m_mid_missing) { + unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(mid); p2m_top[topidx] = mid; - - BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); - } - /* And the save/restore P2M tables.. */ - if (mid_mfn_p == p2m_mid_missing_mfn) { - mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_mfn_init(mid_mfn_p); - - p2m_top_mfn_p[topidx] = mid_mfn_p; - p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); - /* Note: we don't set mid_mfn_p[midix] here, - * look in __early_alloc_p2m */ } } @@ -676,7 +650,7 @@ static unsigned long mfn_hash(unsigned long mfn) } /* Add an MFN override for a particular page */ -int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte) +int m2p_add_override(unsigned long mfn, struct page *page) { unsigned long flags; unsigned long pfn; @@ -688,6 +662,7 @@ int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte) if (!PageHighMem(page)) { address = (unsigned long)__va(pfn << PAGE_SHIFT); ptep = lookup_address(address, &level); + if (WARN(ptep == NULL || level != PG_LEVEL_4K, "m2p_add_override: pfn %lx not mapped", pfn)) return -EINVAL; @@ -699,17 +674,18 @@ int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte) if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) return -ENOMEM; - if (clear_pte && !PageHighMem(page)) + if (!PageHighMem(page)) /* Just zap old mapping for now */ pte_clear(&init_mm, address, ptep); + spin_lock_irqsave(&m2p_override_lock, flags); list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); spin_unlock_irqrestore(&m2p_override_lock, flags); return 0; } -EXPORT_SYMBOL_GPL(m2p_add_override); -int m2p_remove_override(struct page *page, bool clear_pte) + +int m2p_remove_override(struct page *page) { unsigned long flags; unsigned long mfn; @@ -737,7 +713,7 @@ int m2p_remove_override(struct page *page, bool clear_pte) spin_unlock_irqrestore(&m2p_override_lock, flags); set_phys_to_machine(pfn, page->index); - if (clear_pte && !PageHighMem(page)) + if (!PageHighMem(page)) set_pte_at(&init_mm, address, ptep, pfn_pte(pfn, PAGE_KERNEL)); /* No tlb flush necessary because the caller already @@ -745,7 +721,6 @@ int m2p_remove_override(struct page *page, bool clear_pte) return 0; } -EXPORT_SYMBOL_GPL(m2p_remove_override); struct page *m2p_find_override(unsigned long mfn) { diff --git a/trunk/arch/x86/xen/setup.c b/trunk/arch/x86/xen/setup.c index be1a464f6d66..90bac0aac3a5 100644 --- a/trunk/arch/x86/xen/setup.c +++ b/trunk/arch/x86/xen/setup.c @@ -50,7 +50,7 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size; */ #define EXTRA_MEM_RATIO (10) -static void __init xen_add_extra_mem(unsigned long pages) +static __init void xen_add_extra_mem(unsigned long pages) { unsigned long pfn; @@ -166,7 +166,7 @@ static unsigned long __init xen_set_identity(const struct e820entry *list, if (last > end) continue; - if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) { + if (entry->type == E820_RAM) { if (start > start_pci) identity += set_phys_range_identity( PFN_UP(start_pci), PFN_DOWN(start)); @@ -227,11 +227,7 @@ char * __init xen_memory_setup(void) memcpy(map_raw, map, sizeof(map)); e820.nr_map = 0; -#ifdef CONFIG_X86_32 - xen_extra_mem_start = mem_end; -#else xen_extra_mem_start = max((1ULL << 32), mem_end); -#endif for (i = 0; i < memmap.nr_entries; i++) { unsigned long long end; @@ -340,7 +336,7 @@ static void __init fiddle_vdso(void) #endif } -static int __cpuinit register_callback(unsigned type, const void *func) +static __cpuinit int register_callback(unsigned type, const void *func) { struct callback_register callback = { .type = type, diff --git a/trunk/arch/x86/xen/smp.c b/trunk/arch/x86/xen/smp.c index 41038c01de40..30612441ed99 100644 --- a/trunk/arch/x86/xen/smp.c +++ b/trunk/arch/x86/xen/smp.c @@ -46,17 +46,18 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); /* - * Reschedule call back. + * Reschedule call back. Nothing to do, + * all the work is done automatically when + * we return from the interrupt. */ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) { inc_irq_stat(irq_resched_count); - scheduler_ipi(); return IRQ_HANDLED; } -static void __cpuinit cpu_bringup(void) +static __cpuinit void cpu_bringup(void) { int cpu = smp_processor_id(); @@ -84,7 +85,7 @@ static void __cpuinit cpu_bringup(void) wmb(); /* make sure everything is out */ } -static void __cpuinit cpu_bringup_and_idle(void) +static __cpuinit void cpu_bringup_and_idle(void) { cpu_bringup(); cpu_idle(); @@ -241,7 +242,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) } } -static int __cpuinit +static __cpuinit int cpu_initialize_context(unsigned int cpu, struct task_struct *idle) { struct vcpu_guest_context *ctxt; @@ -485,7 +486,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static const struct smp_ops xen_smp_ops __initconst = { +static const struct smp_ops xen_smp_ops __initdata = { .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, .smp_prepare_cpus = xen_smp_prepare_cpus, .smp_cpus_done = xen_smp_cpus_done, diff --git a/trunk/arch/x86/xen/time.c b/trunk/arch/x86/xen/time.c index 5158c505bef9..2e2d370a47b1 100644 --- a/trunk/arch/x86/xen/time.c +++ b/trunk/arch/x86/xen/time.c @@ -26,6 +26,8 @@ #include "xen-ops.h" +#define XEN_SHIFT 22 + /* Xen may fire a timer up to this many ns early */ #define TIMER_SLOP 100000 #define NS_PER_TICK (1000000000LL / HZ) @@ -209,6 +211,8 @@ static struct clocksource xen_clocksource __read_mostly = { .rating = 400, .read = xen_clocksource_get_cycles, .mask = ~0, + .mult = 1< 0 and at this point we don't know how many cpus are diff --git a/trunk/arch/x86/xen/xen-ops.h b/trunk/arch/x86/xen/xen-ops.h index 97dfdc8757b3..3112f55638c4 100644 --- a/trunk/arch/x86/xen/xen-ops.h +++ b/trunk/arch/x86/xen/xen-ops.h @@ -74,7 +74,7 @@ static inline void xen_hvm_smp_init(void) {} #ifdef CONFIG_PARAVIRT_SPINLOCKS void __init xen_init_spinlocks(void); -void __cpuinit xen_init_lock_cpu(int cpu); +__cpuinit void xen_init_lock_cpu(int cpu); void xen_uninit_lock_cpu(int cpu); #else static inline void xen_init_spinlocks(void) diff --git a/trunk/block/blk-cgroup.c b/trunk/block/blk-cgroup.c index 471fdcc5df85..f0605ab2a761 100644 --- a/trunk/block/blk-cgroup.c +++ b/trunk/block/blk-cgroup.c @@ -114,13 +114,6 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) } EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); -struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) -{ - return container_of(task_subsys_state(tsk, blkio_subsys_id), - struct blkio_cgroup, css); -} -EXPORT_SYMBOL_GPL(task_blkio_cgroup); - static inline void blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) { diff --git a/trunk/block/blk-cgroup.h b/trunk/block/blk-cgroup.h index c774930cc206..10919fae2d3a 100644 --- a/trunk/block/blk-cgroup.h +++ b/trunk/block/blk-cgroup.h @@ -291,7 +291,6 @@ static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {} #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) extern struct blkio_cgroup blkio_root_cgroup; extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); -extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk); extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, struct blkio_group *blkg, void *key, dev_t dev, enum blkio_policy_id plid); @@ -315,8 +314,6 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg, struct cgroup; static inline struct blkio_cgroup * cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } -static inline struct blkio_cgroup * -task_blkio_cgroup(struct task_struct *tsk) { return NULL; } static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, struct blkio_group *blkg, void *key, dev_t dev, diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c index 3fe00a14822a..a2e58eeb3549 100644 --- a/trunk/block/blk-core.c +++ b/trunk/block/blk-core.c @@ -316,10 +316,8 @@ EXPORT_SYMBOL(__blk_run_queue); */ void blk_run_queue_async(struct request_queue *q) { - if (likely(!blk_queue_stopped(q))) { - __cancel_delayed_work(&q->delay_work); + if (likely(!blk_queue_stopped(q))) queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); - } } EXPORT_SYMBOL(blk_run_queue_async); diff --git a/trunk/block/blk-throttle.c b/trunk/block/blk-throttle.c index 252a81a306f7..0475a22a420d 100644 --- a/trunk/block/blk-throttle.c +++ b/trunk/block/blk-throttle.c @@ -160,8 +160,9 @@ static void throtl_put_tg(struct throtl_grp *tg) } static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, - struct blkio_cgroup *blkcg) + struct cgroup *cgroup) { + struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); struct throtl_grp *tg = NULL; void *key = td; struct backing_dev_info *bdi = &td->queue->backing_dev_info; @@ -228,12 +229,12 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, static struct throtl_grp * throtl_get_tg(struct throtl_data *td) { + struct cgroup *cgroup; struct throtl_grp *tg = NULL; - struct blkio_cgroup *blkcg; rcu_read_lock(); - blkcg = task_blkio_cgroup(current); - tg = throtl_find_alloc_tg(td, blkcg); + cgroup = task_cgroup(current, blkio_subsys_id); + tg = throtl_find_alloc_tg(td, cgroup); if (!tg) tg = &td->root_tg; rcu_read_unlock(); diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c index ab7a9e6a9b1c..5b52011e3a40 100644 --- a/trunk/block/cfq-iosched.c +++ b/trunk/block/cfq-iosched.c @@ -1014,9 +1014,10 @@ void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg, cfqg->needs_update = true; } -static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd, - struct blkio_cgroup *blkcg, int create) +static struct cfq_group * +cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) { + struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); struct cfq_group *cfqg = NULL; void *key = cfqd; int i, j; @@ -1078,12 +1079,12 @@ static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd, */ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) { - struct blkio_cgroup *blkcg; + struct cgroup *cgroup; struct cfq_group *cfqg = NULL; rcu_read_lock(); - blkcg = task_blkio_cgroup(current); - cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create); + cgroup = task_cgroup(current, blkio_subsys_id); + cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create); if (!cfqg && create) cfqg = &cfqd->root_group; rcu_read_unlock(); diff --git a/trunk/drivers/Kconfig b/trunk/drivers/Kconfig index 557a469c7aa6..177c7d156933 100644 --- a/trunk/drivers/Kconfig +++ b/trunk/drivers/Kconfig @@ -119,7 +119,4 @@ source "drivers/platform/Kconfig" source "drivers/clk/Kconfig" source "drivers/hwspinlock/Kconfig" - -source "drivers/clocksource/Kconfig" - endmenu diff --git a/trunk/drivers/acpi/processor_perflib.c b/trunk/drivers/acpi/processor_perflib.c index 85b32376dad7..3a73a93596e8 100644 --- a/trunk/drivers/acpi/processor_perflib.c +++ b/trunk/drivers/acpi/processor_perflib.c @@ -49,6 +49,10 @@ ACPI_MODULE_NAME("processor_perflib"); static DEFINE_MUTEX(performance_mutex); +/* Use cpufreq debug layer for _PPC changes. */ +#define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ + "cpufreq-core", msg) + /* * _PPC support is implemented as a CPUfreq policy notifier: * This means each time a CPUfreq driver registered also with @@ -141,7 +145,7 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) return -ENODEV; } - pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, + cpufreq_printk("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, (int)ppc, ppc ? "" : "not"); pr->performance_platform_limit = (int)ppc; diff --git a/trunk/drivers/acpi/scan.c b/trunk/drivers/acpi/scan.c index 449c556274c0..b136c9c1e531 100644 --- a/trunk/drivers/acpi/scan.c +++ b/trunk/drivers/acpi/scan.c @@ -943,10 +943,6 @@ static int acpi_bus_get_flags(struct acpi_device *device) if (ACPI_SUCCESS(status)) device->flags.lockable = 1; - /* Power resources cannot be power manageable. */ - if (device->device_type == ACPI_BUS_TYPE_POWER) - return 0; - /* Presence of _PS0|_PR0 indicates 'power manageable' */ status = acpi_get_handle(device->handle, "_PS0", &temp); if (ACPI_FAILURE(status)) diff --git a/trunk/drivers/ata/libahci.c b/trunk/drivers/ata/libahci.c index d38c40fe4ddb..ff9d832a163d 100644 --- a/trunk/drivers/ata/libahci.c +++ b/trunk/drivers/ata/libahci.c @@ -561,6 +561,27 @@ void ahci_start_engine(struct ata_port *ap) { void __iomem *port_mmio = ahci_port_base(ap); u32 tmp; + u8 status; + + status = readl(port_mmio + PORT_TFDATA) & 0xFF; + + /* + * At end of section 10.1 of AHCI spec (rev 1.3), it states + * Software shall not set PxCMD.ST to 1 until it is determined + * that a functoinal device is present on the port as determined by + * PxTFD.STS.BSY=0, PxTFD.STS.DRQ=0 and PxSSTS.DET=3h + * + * Even though most AHCI host controllers work without this check, + * specific controller will fail under this condition + */ + if (status & (ATA_BUSY | ATA_DRQ)) + return; + else { + ahci_scr_read(&ap->link, SCR_STATUS, &tmp); + + if ((tmp & 0xf) != 0x3) + return; + } /* start DMA */ tmp = readl(port_mmio + PORT_CMD); diff --git a/trunk/drivers/ata/libata-eh.c b/trunk/drivers/ata/libata-eh.c index dad9fd660f37..f26f2fe3480a 100644 --- a/trunk/drivers/ata/libata-eh.c +++ b/trunk/drivers/ata/libata-eh.c @@ -3316,7 +3316,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; enum ata_lpm_policy old_policy = link->lpm_policy; - bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; + bool no_dipm = ap->flags & ATA_FLAG_NO_DIPM; unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; unsigned int err_mask; int rc; diff --git a/trunk/drivers/atm/fore200e.c b/trunk/drivers/atm/fore200e.c index bc9e702186dd..bdd2719f3f68 100644 --- a/trunk/drivers/atm/fore200e.c +++ b/trunk/drivers/atm/fore200e.c @@ -2643,19 +2643,16 @@ fore200e_init(struct fore200e* fore200e, struct device *parent) } #ifdef CONFIG_SBUS -static const struct of_device_id fore200e_sba_match[]; static int __devinit fore200e_sba_probe(struct platform_device *op) { - const struct of_device_id *match; const struct fore200e_bus *bus; struct fore200e *fore200e; static int index = 0; int err; - match = of_match_device(fore200e_sba_match, &op->dev); - if (!match) + if (!op->dev.of_match) return -EINVAL; - bus = match->data; + bus = op->dev.of_match->data; fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); if (!fore200e) diff --git a/trunk/drivers/base/Kconfig b/trunk/drivers/base/Kconfig index d57e8d0fb823..e9e5238f3106 100644 --- a/trunk/drivers/base/Kconfig +++ b/trunk/drivers/base/Kconfig @@ -168,4 +168,11 @@ config SYS_HYPERVISOR bool default n +config ARCH_NO_SYSDEV_OPS + bool + ---help--- + To be selected by architectures that don't use sysdev class or + sysdev driver power management (suspend/resume) and shutdown + operations. + endmenu diff --git a/trunk/drivers/base/base.h b/trunk/drivers/base/base.h index a34dca0ad041..19f49e41ce5d 100644 --- a/trunk/drivers/base/base.h +++ b/trunk/drivers/base/base.h @@ -111,6 +111,8 @@ static inline int driver_match_device(struct device_driver *drv, return drv->bus->match ? drv->bus->match(dev, drv) : 1; } +extern void sysdev_shutdown(void); + extern char *make_class_name(const char *name, struct kobject *kobj); extern int devres_release_all(struct device *dev); diff --git a/trunk/drivers/base/dd.c b/trunk/drivers/base/dd.c index 29917c7506cb..da57ee9d63fe 100644 --- a/trunk/drivers/base/dd.c +++ b/trunk/drivers/base/dd.c @@ -316,7 +316,8 @@ static void __device_release_driver(struct device *dev) drv = dev->driver; if (drv) { - pm_runtime_get_sync(dev); + pm_runtime_get_noresume(dev); + pm_runtime_barrier(dev); driver_sysfs_remove(dev); @@ -325,8 +326,6 @@ static void __device_release_driver(struct device *dev) BUS_NOTIFY_UNBIND_DRIVER, dev); - pm_runtime_put_sync(dev); - if (dev->bus && dev->bus->remove) dev->bus->remove(dev); else if (drv->remove) @@ -339,6 +338,7 @@ static void __device_release_driver(struct device *dev) BUS_NOTIFY_UNBOUND_DRIVER, dev); + pm_runtime_put_sync(dev); } } diff --git a/trunk/drivers/base/firmware_class.c b/trunk/drivers/base/firmware_class.c index bbb03e6f7255..8c798ef7f13f 100644 --- a/trunk/drivers/base/firmware_class.c +++ b/trunk/drivers/base/firmware_class.c @@ -521,11 +521,6 @@ static int _request_firmware(const struct firmware **firmware_p, if (!firmware_p) return -EINVAL; - if (WARN_ON(usermodehelper_is_disabled())) { - dev_err(device, "firmware: %s will not be loaded\n", name); - return -EBUSY; - } - *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); if (!firmware) { dev_err(device, "%s: kmalloc(struct firmware) failed\n", diff --git a/trunk/drivers/base/platform.c b/trunk/drivers/base/platform.c index 48425f183029..9e0e4fc24c46 100644 --- a/trunk/drivers/base/platform.c +++ b/trunk/drivers/base/platform.c @@ -667,7 +667,7 @@ static int platform_legacy_resume(struct device *dev) return ret; } -int platform_pm_prepare(struct device *dev) +static int platform_pm_prepare(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -678,7 +678,7 @@ int platform_pm_prepare(struct device *dev) return ret; } -void platform_pm_complete(struct device *dev) +static void platform_pm_complete(struct device *dev) { struct device_driver *drv = dev->driver; @@ -686,11 +686,16 @@ void platform_pm_complete(struct device *dev) drv->pm->complete(dev); } -#endif /* CONFIG_PM_SLEEP */ +#else /* !CONFIG_PM_SLEEP */ + +#define platform_pm_prepare NULL +#define platform_pm_complete NULL + +#endif /* !CONFIG_PM_SLEEP */ #ifdef CONFIG_SUSPEND -int platform_pm_suspend(struct device *dev) +int __weak platform_pm_suspend(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -708,7 +713,7 @@ int platform_pm_suspend(struct device *dev) return ret; } -int platform_pm_suspend_noirq(struct device *dev) +int __weak platform_pm_suspend_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -724,7 +729,7 @@ int platform_pm_suspend_noirq(struct device *dev) return ret; } -int platform_pm_resume(struct device *dev) +int __weak platform_pm_resume(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -742,7 +747,7 @@ int platform_pm_resume(struct device *dev) return ret; } -int platform_pm_resume_noirq(struct device *dev) +int __weak platform_pm_resume_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -758,11 +763,18 @@ int platform_pm_resume_noirq(struct device *dev) return ret; } -#endif /* CONFIG_SUSPEND */ +#else /* !CONFIG_SUSPEND */ + +#define platform_pm_suspend NULL +#define platform_pm_resume NULL +#define platform_pm_suspend_noirq NULL +#define platform_pm_resume_noirq NULL + +#endif /* !CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS -int platform_pm_freeze(struct device *dev) +static int platform_pm_freeze(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -780,7 +792,7 @@ int platform_pm_freeze(struct device *dev) return ret; } -int platform_pm_freeze_noirq(struct device *dev) +static int platform_pm_freeze_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -796,7 +808,7 @@ int platform_pm_freeze_noirq(struct device *dev) return ret; } -int platform_pm_thaw(struct device *dev) +static int platform_pm_thaw(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -814,7 +826,7 @@ int platform_pm_thaw(struct device *dev) return ret; } -int platform_pm_thaw_noirq(struct device *dev) +static int platform_pm_thaw_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -830,7 +842,7 @@ int platform_pm_thaw_noirq(struct device *dev) return ret; } -int platform_pm_poweroff(struct device *dev) +static int platform_pm_poweroff(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -848,7 +860,7 @@ int platform_pm_poweroff(struct device *dev) return ret; } -int platform_pm_poweroff_noirq(struct device *dev) +static int platform_pm_poweroff_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -864,7 +876,7 @@ int platform_pm_poweroff_noirq(struct device *dev) return ret; } -int platform_pm_restore(struct device *dev) +static int platform_pm_restore(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -882,7 +894,7 @@ int platform_pm_restore(struct device *dev) return ret; } -int platform_pm_restore_noirq(struct device *dev) +static int platform_pm_restore_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -898,13 +910,62 @@ int platform_pm_restore_noirq(struct device *dev) return ret; } -#endif /* CONFIG_HIBERNATE_CALLBACKS */ +#else /* !CONFIG_HIBERNATE_CALLBACKS */ + +#define platform_pm_freeze NULL +#define platform_pm_thaw NULL +#define platform_pm_poweroff NULL +#define platform_pm_restore NULL +#define platform_pm_freeze_noirq NULL +#define platform_pm_thaw_noirq NULL +#define platform_pm_poweroff_noirq NULL +#define platform_pm_restore_noirq NULL + +#endif /* !CONFIG_HIBERNATE_CALLBACKS */ + +#ifdef CONFIG_PM_RUNTIME + +int __weak platform_pm_runtime_suspend(struct device *dev) +{ + return pm_generic_runtime_suspend(dev); +}; + +int __weak platform_pm_runtime_resume(struct device *dev) +{ + return pm_generic_runtime_resume(dev); +}; + +int __weak platform_pm_runtime_idle(struct device *dev) +{ + return pm_generic_runtime_idle(dev); +}; + +#else /* !CONFIG_PM_RUNTIME */ + +#define platform_pm_runtime_suspend NULL +#define platform_pm_runtime_resume NULL +#define platform_pm_runtime_idle NULL + +#endif /* !CONFIG_PM_RUNTIME */ static const struct dev_pm_ops platform_dev_pm_ops = { - .runtime_suspend = pm_generic_runtime_suspend, - .runtime_resume = pm_generic_runtime_resume, - .runtime_idle = pm_generic_runtime_idle, - USE_PLATFORM_PM_SLEEP_OPS + .prepare = platform_pm_prepare, + .complete = platform_pm_complete, + .suspend = platform_pm_suspend, + .resume = platform_pm_resume, + .freeze = platform_pm_freeze, + .thaw = platform_pm_thaw, + .poweroff = platform_pm_poweroff, + .restore = platform_pm_restore, + .suspend_noirq = platform_pm_suspend_noirq, + .resume_noirq = platform_pm_resume_noirq, + .freeze_noirq = platform_pm_freeze_noirq, + .thaw_noirq = platform_pm_thaw_noirq, + .poweroff_noirq = platform_pm_poweroff_noirq, + .restore_noirq = platform_pm_restore_noirq, + .runtime_suspend = platform_pm_runtime_suspend, + .runtime_resume = platform_pm_runtime_resume, + .runtime_idle = platform_pm_runtime_idle, }; struct bus_type platform_bus_type = { @@ -916,6 +977,41 @@ struct bus_type platform_bus_type = { }; EXPORT_SYMBOL_GPL(platform_bus_type); +/** + * platform_bus_get_pm_ops() - return pointer to busses dev_pm_ops + * + * This function can be used by platform code to get the current + * set of dev_pm_ops functions used by the platform_bus_type. + */ +const struct dev_pm_ops * __init platform_bus_get_pm_ops(void) +{ + return platform_bus_type.pm; +} + +/** + * platform_bus_set_pm_ops() - update dev_pm_ops for the platform_bus_type + * + * @pm: pointer to new dev_pm_ops struct to be used for platform_bus_type + * + * Platform code can override the dev_pm_ops methods of + * platform_bus_type by using this function. It is expected that + * platform code will first do a platform_bus_get_pm_ops(), then + * kmemdup it, then customize selected methods and pass a pointer to + * the new struct dev_pm_ops to this function. + * + * Since platform-specific code is customizing methods for *all* + * devices (not just platform-specific devices) it is expected that + * any custom overrides of these functions will keep existing behavior + * and simply extend it. For example, any customization of the + * runtime PM methods should continue to call the pm_generic_* + * functions as the default ones do in addition to the + * platform-specific behavior. + */ +void __init platform_bus_set_pm_ops(const struct dev_pm_ops *pm) +{ + platform_bus_type.pm = pm; +} + int __init platform_bus_init(void) { int error; diff --git a/trunk/drivers/base/power/Makefile b/trunk/drivers/base/power/Makefile index 3647e114d0e7..118c1b92a511 100644 --- a/trunk/drivers/base/power/Makefile +++ b/trunk/drivers/base/power/Makefile @@ -3,6 +3,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_PM_OPP) += opp.o -obj-$(CONFIG_HAVE_CLK) += clock_ops.o -ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file +ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG +ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG diff --git a/trunk/drivers/base/power/clock_ops.c b/trunk/drivers/base/power/clock_ops.c deleted file mode 100644 index c0dd09df7be8..000000000000 --- a/trunk/drivers/base/power/clock_ops.c +++ /dev/null @@ -1,431 +0,0 @@ -/* - * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks - * - * Copyright (c) 2011 Rafael J. Wysocki , Renesas Electronics Corp. - * - * This file is released under the GPLv2. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef CONFIG_PM_RUNTIME - -struct pm_runtime_clk_data { - struct list_head clock_list; - struct mutex lock; -}; - -enum pce_status { - PCE_STATUS_NONE = 0, - PCE_STATUS_ACQUIRED, - PCE_STATUS_ENABLED, - PCE_STATUS_ERROR, -}; - -struct pm_clock_entry { - struct list_head node; - char *con_id; - struct clk *clk; - enum pce_status status; -}; - -static struct pm_runtime_clk_data *__to_prd(struct device *dev) -{ - return dev ? dev->power.subsys_data : NULL; -} - -/** - * pm_runtime_clk_add - Start using a device clock for runtime PM. - * @dev: Device whose clock is going to be used for runtime PM. - * @con_id: Connection ID of the clock. - * - * Add the clock represented by @con_id to the list of clocks used for - * the runtime PM of @dev. - */ -int pm_runtime_clk_add(struct device *dev, const char *con_id) -{ - struct pm_runtime_clk_data *prd = __to_prd(dev); - struct pm_clock_entry *ce; - - if (!prd) - return -EINVAL; - - ce = kzalloc(sizeof(*ce), GFP_KERNEL); - if (!ce) { - dev_err(dev, "Not enough memory for clock entry.\n"); - return -ENOMEM; - } - - if (con_id) { - ce->con_id = kstrdup(con_id, GFP_KERNEL); - if (!ce->con_id) { - dev_err(dev, - "Not enough memory for clock connection ID.\n"); - kfree(ce); - return -ENOMEM; - } - } - - mutex_lock(&prd->lock); - list_add_tail(&ce->node, &prd->clock_list); - mutex_unlock(&prd->lock); - return 0; -} - -/** - * __pm_runtime_clk_remove - Destroy runtime PM clock entry. - * @ce: Runtime PM clock entry to destroy. - * - * This routine must be called under the mutex protecting the runtime PM list - * of clocks corresponding the the @ce's device. - */ -static void __pm_runtime_clk_remove(struct pm_clock_entry *ce) -{ - if (!ce) - return; - - list_del(&ce->node); - - if (ce->status < PCE_STATUS_ERROR) { - if (ce->status == PCE_STATUS_ENABLED) - clk_disable(ce->clk); - - if (ce->status >= PCE_STATUS_ACQUIRED) - clk_put(ce->clk); - } - - if (ce->con_id) - kfree(ce->con_id); - - kfree(ce); -} - -/** - * pm_runtime_clk_remove - Stop using a device clock for runtime PM. - * @dev: Device whose clock should not be used for runtime PM any more. - * @con_id: Connection ID of the clock. - * - * Remove the clock represented by @con_id from the list of clocks used for - * the runtime PM of @dev. - */ -void pm_runtime_clk_remove(struct device *dev, const char *con_id) -{ - struct pm_runtime_clk_data *prd = __to_prd(dev); - struct pm_clock_entry *ce; - - if (!prd) - return; - - mutex_lock(&prd->lock); - - list_for_each_entry(ce, &prd->clock_list, node) { - if (!con_id && !ce->con_id) { - __pm_runtime_clk_remove(ce); - break; - } else if (!con_id || !ce->con_id) { - continue; - } else if (!strcmp(con_id, ce->con_id)) { - __pm_runtime_clk_remove(ce); - break; - } - } - - mutex_unlock(&prd->lock); -} - -/** - * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks. - * @dev: Device to initialize the list of runtime PM clocks for. - * - * Allocate a struct pm_runtime_clk_data object, initialize its lock member and - * make the @dev's power.subsys_data field point to it. - */ -int pm_runtime_clk_init(struct device *dev) -{ - struct pm_runtime_clk_data *prd; - - prd = kzalloc(sizeof(*prd), GFP_KERNEL); - if (!prd) { - dev_err(dev, "Not enough memory fo runtime PM data.\n"); - return -ENOMEM; - } - - INIT_LIST_HEAD(&prd->clock_list); - mutex_init(&prd->lock); - dev->power.subsys_data = prd; - return 0; -} - -/** - * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks. - * @dev: Device to destroy the list of runtime PM clocks for. - * - * Clear the @dev's power.subsys_data field, remove the list of clock entries - * from the struct pm_runtime_clk_data object pointed to by it before and free - * that object. - */ -void pm_runtime_clk_destroy(struct device *dev) -{ - struct pm_runtime_clk_data *prd = __to_prd(dev); - struct pm_clock_entry *ce, *c; - - if (!prd) - return; - - dev->power.subsys_data = NULL; - - mutex_lock(&prd->lock); - - list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node) - __pm_runtime_clk_remove(ce); - - mutex_unlock(&prd->lock); - - kfree(prd); -} - -/** - * pm_runtime_clk_acquire - Acquire a device clock. - * @dev: Device whose clock is to be acquired. - * @con_id: Connection ID of the clock. - */ -static void pm_runtime_clk_acquire(struct device *dev, - struct pm_clock_entry *ce) -{ - ce->clk = clk_get(dev, ce->con_id); - if (IS_ERR(ce->clk)) { - ce->status = PCE_STATUS_ERROR; - } else { - ce->status = PCE_STATUS_ACQUIRED; - dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); - } -} - -/** - * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list. - * @dev: Device to disable the clocks for. - */ -int pm_runtime_clk_suspend(struct device *dev) -{ - struct pm_runtime_clk_data *prd = __to_prd(dev); - struct pm_clock_entry *ce; - - dev_dbg(dev, "%s()\n", __func__); - - if (!prd) - return 0; - - mutex_lock(&prd->lock); - - list_for_each_entry_reverse(ce, &prd->clock_list, node) { - if (ce->status == PCE_STATUS_NONE) - pm_runtime_clk_acquire(dev, ce); - - if (ce->status < PCE_STATUS_ERROR) { - clk_disable(ce->clk); - ce->status = PCE_STATUS_ACQUIRED; - } - } - - mutex_unlock(&prd->lock); - - return 0; -} - -/** - * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list. - * @dev: Device to enable the clocks for. - */ -int pm_runtime_clk_resume(struct device *dev) -{ - struct pm_runtime_clk_data *prd = __to_prd(dev); - struct pm_clock_entry *ce; - - dev_dbg(dev, "%s()\n", __func__); - - if (!prd) - return 0; - - mutex_lock(&prd->lock); - - list_for_each_entry(ce, &prd->clock_list, node) { - if (ce->status == PCE_STATUS_NONE) - pm_runtime_clk_acquire(dev, ce); - - if (ce->status < PCE_STATUS_ERROR) { - clk_enable(ce->clk); - ce->status = PCE_STATUS_ENABLED; - } - } - - mutex_unlock(&prd->lock); - - return 0; -} - -/** - * pm_runtime_clk_notify - Notify routine for device addition and removal. - * @nb: Notifier block object this function is a member of. - * @action: Operation being carried out by the caller. - * @data: Device the routine is being run for. - * - * For this function to work, @nb must be a member of an object of type - * struct pm_clk_notifier_block containing all of the requisite data. - * Specifically, the pwr_domain member of that object is copied to the device's - * pwr_domain field and its con_ids member is used to populate the device's list - * of runtime PM clocks, depending on @action. - * - * If the device's pwr_domain field is already populated with a value different - * from the one stored in the struct pm_clk_notifier_block object, the function - * does nothing. - */ -static int pm_runtime_clk_notify(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct pm_clk_notifier_block *clknb; - struct device *dev = data; - char *con_id; - int error; - - dev_dbg(dev, "%s() %ld\n", __func__, action); - - clknb = container_of(nb, struct pm_clk_notifier_block, nb); - - switch (action) { - case BUS_NOTIFY_ADD_DEVICE: - if (dev->pwr_domain) - break; - - error = pm_runtime_clk_init(dev); - if (error) - break; - - dev->pwr_domain = clknb->pwr_domain; - if (clknb->con_ids[0]) { - for (con_id = clknb->con_ids[0]; *con_id; con_id++) - pm_runtime_clk_add(dev, con_id); - } else { - pm_runtime_clk_add(dev, NULL); - } - - break; - case BUS_NOTIFY_DEL_DEVICE: - if (dev->pwr_domain != clknb->pwr_domain) - break; - - dev->pwr_domain = NULL; - pm_runtime_clk_destroy(dev); - break; - } - - return 0; -} - -#else /* !CONFIG_PM_RUNTIME */ - -/** - * enable_clock - Enable a device clock. - * @dev: Device whose clock is to be enabled. - * @con_id: Connection ID of the clock. - */ -static void enable_clock(struct device *dev, const char *con_id) -{ - struct clk *clk; - - clk = clk_get(dev, con_id); - if (!IS_ERR(clk)) { - clk_enable(clk); - clk_put(clk); - dev_info(dev, "Runtime PM disabled, clock forced on.\n"); - } -} - -/** - * disable_clock - Disable a device clock. - * @dev: Device whose clock is to be disabled. - * @con_id: Connection ID of the clock. - */ -static void disable_clock(struct device *dev, const char *con_id) -{ - struct clk *clk; - - clk = clk_get(dev, con_id); - if (!IS_ERR(clk)) { - clk_disable(clk); - clk_put(clk); - dev_info(dev, "Runtime PM disabled, clock forced off.\n"); - } -} - -/** - * pm_runtime_clk_notify - Notify routine for device addition and removal. - * @nb: Notifier block object this function is a member of. - * @action: Operation being carried out by the caller. - * @data: Device the routine is being run for. - * - * For this function to work, @nb must be a member of an object of type - * struct pm_clk_notifier_block containing all of the requisite data. - * Specifically, the con_ids member of that object is used to enable or disable - * the device's clocks, depending on @action. - */ -static int pm_runtime_clk_notify(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct pm_clk_notifier_block *clknb; - struct device *dev = data; - char *con_id; - - dev_dbg(dev, "%s() %ld\n", __func__, action); - - clknb = container_of(nb, struct pm_clk_notifier_block, nb); - - switch (action) { - case BUS_NOTIFY_ADD_DEVICE: - if (clknb->con_ids[0]) { - for (con_id = clknb->con_ids[0]; *con_id; con_id++) - enable_clock(dev, con_id); - } else { - enable_clock(dev, NULL); - } - break; - case BUS_NOTIFY_DEL_DEVICE: - if (clknb->con_ids[0]) { - for (con_id = clknb->con_ids[0]; *con_id; con_id++) - disable_clock(dev, con_id); - } else { - disable_clock(dev, NULL); - } - break; - } - - return 0; -} - -#endif /* !CONFIG_PM_RUNTIME */ - -/** - * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks. - * @bus: Bus type to add the notifier to. - * @clknb: Notifier to be added to the given bus type. - * - * The nb member of @clknb is not expected to be initialized and its - * notifier_call member will be replaced with pm_runtime_clk_notify(). However, - * the remaining members of @clknb should be populated prior to calling this - * routine. - */ -void pm_runtime_clk_add_notifier(struct bus_type *bus, - struct pm_clk_notifier_block *clknb) -{ - if (!bus || !clknb) - return; - - clknb->nb.notifier_call = pm_runtime_clk_notify; - bus_register_notifier(bus, &clknb->nb); -} diff --git a/trunk/drivers/base/power/generic_ops.c b/trunk/drivers/base/power/generic_ops.c index cb3bb368681c..42f97f925629 100644 --- a/trunk/drivers/base/power/generic_ops.c +++ b/trunk/drivers/base/power/generic_ops.c @@ -73,23 +73,6 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); #endif /* CONFIG_PM_RUNTIME */ #ifdef CONFIG_PM_SLEEP -/** - * pm_generic_prepare - Generic routine preparing a device for power transition. - * @dev: Device to prepare. - * - * Prepare a device for a system-wide power transition. - */ -int pm_generic_prepare(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (drv && drv->pm && drv->pm->prepare) - ret = drv->pm->prepare(dev); - - return ret; -} - /** * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. * @dev: Device to handle. @@ -230,38 +213,16 @@ int pm_generic_restore(struct device *dev) return __pm_generic_resume(dev, PM_EVENT_RESTORE); } EXPORT_SYMBOL_GPL(pm_generic_restore); - -/** - * pm_generic_complete - Generic routine competing a device power transition. - * @dev: Device to handle. - * - * Complete a device power transition during a system-wide power transition. - */ -void pm_generic_complete(struct device *dev) -{ - struct device_driver *drv = dev->driver; - - if (drv && drv->pm && drv->pm->complete) - drv->pm->complete(dev); - - /* - * Let runtime PM try to suspend devices that haven't been in use before - * going into the system-wide sleep state we're resuming from. - */ - pm_runtime_idle(dev); -} #endif /* CONFIG_PM_SLEEP */ struct dev_pm_ops generic_subsys_pm_ops = { #ifdef CONFIG_PM_SLEEP - .prepare = pm_generic_prepare, .suspend = pm_generic_suspend, .resume = pm_generic_resume, .freeze = pm_generic_freeze, .thaw = pm_generic_thaw, .poweroff = pm_generic_poweroff, .restore = pm_generic_restore, - .complete = pm_generic_complete, #endif #ifdef CONFIG_PM_RUNTIME .runtime_suspend = pm_generic_runtime_suspend, diff --git a/trunk/drivers/base/power/main.c b/trunk/drivers/base/power/main.c index aa6320207745..fbc5b6e7c591 100644 --- a/trunk/drivers/base/power/main.c +++ b/trunk/drivers/base/power/main.c @@ -63,7 +63,6 @@ void device_pm_init(struct device *dev) dev->power.wakeup = NULL; spin_lock_init(&dev->power.lock); pm_runtime_init(dev); - INIT_LIST_HEAD(&dev->power.entry); } /** @@ -426,8 +425,10 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) if (dev->pwr_domain) { pm_dev_dbg(dev, state, "EARLY power domain "); - error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); - } else if (dev->type && dev->type->pm) { + pm_noirq_op(dev, &dev->pwr_domain->ops, state); + } + + if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "EARLY type "); error = pm_noirq_op(dev, dev->type->pm, state); } else if (dev->class && dev->class->pm) { @@ -515,8 +516,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) if (dev->pwr_domain) { pm_dev_dbg(dev, state, "power domain "); - error = pm_op(dev, &dev->pwr_domain->ops, state); - goto End; + pm_op(dev, &dev->pwr_domain->ops, state); } if (dev->type && dev->type->pm) { @@ -579,13 +579,11 @@ static bool is_async(struct device *dev) * Execute the appropriate "resume" callback for all devices whose status * indicates that they are suspended. */ -void dpm_resume(pm_message_t state) +static void dpm_resume(pm_message_t state) { struct device *dev; ktime_t starttime = ktime_get(); - might_sleep(); - mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; @@ -630,11 +628,12 @@ static void device_complete(struct device *dev, pm_message_t state) { device_lock(dev); - if (dev->pwr_domain) { + if (dev->pwr_domain && dev->pwr_domain->ops.complete) { pm_dev_dbg(dev, state, "completing power domain "); - if (dev->pwr_domain->ops.complete) - dev->pwr_domain->ops.complete(dev); - } else if (dev->type && dev->type->pm) { + dev->pwr_domain->ops.complete(dev); + } + + if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "completing type "); if (dev->type->pm->complete) dev->type->pm->complete(dev); @@ -658,12 +657,10 @@ static void device_complete(struct device *dev, pm_message_t state) * Execute the ->complete() callbacks for all devices whose PM status is not * DPM_ON (this allows new devices to be registered). */ -void dpm_complete(pm_message_t state) +static void dpm_complete(pm_message_t state) { struct list_head list; - might_sleep(); - INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_prepared_list)) { @@ -692,6 +689,7 @@ void dpm_complete(pm_message_t state) */ void dpm_resume_end(pm_message_t state) { + might_sleep(); dpm_resume(state); dpm_complete(state); } @@ -733,12 +731,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) { int error; - if (dev->pwr_domain) { - pm_dev_dbg(dev, state, "LATE power domain "); - error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); - if (error) - return error; - } else if (dev->type && dev->type->pm) { + if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "LATE type "); error = pm_noirq_op(dev, dev->type->pm, state); if (error) @@ -755,6 +748,11 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) return error; } + if (dev->pwr_domain) { + pm_dev_dbg(dev, state, "LATE power domain "); + pm_noirq_op(dev, &dev->pwr_domain->ops, state); + } + return 0; } @@ -842,27 +840,21 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) goto End; } - if (dev->pwr_domain) { - pm_dev_dbg(dev, state, "power domain "); - error = pm_op(dev, &dev->pwr_domain->ops, state); - goto End; - } - if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "type "); error = pm_op(dev, dev->type->pm, state); - goto End; + goto Domain; } if (dev->class) { if (dev->class->pm) { pm_dev_dbg(dev, state, "class "); error = pm_op(dev, dev->class->pm, state); - goto End; + goto Domain; } else if (dev->class->suspend) { pm_dev_dbg(dev, state, "legacy class "); error = legacy_suspend(dev, state, dev->class->suspend); - goto End; + goto Domain; } } @@ -876,6 +868,12 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) } } + Domain: + if (!error && dev->pwr_domain) { + pm_dev_dbg(dev, state, "power domain "); + pm_op(dev, &dev->pwr_domain->ops, state); + } + End: device_unlock(dev); complete_all(&dev->power.completion); @@ -915,13 +913,11 @@ static int device_suspend(struct device *dev) * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. * @state: PM transition of the system being carried out. */ -int dpm_suspend(pm_message_t state) +static int dpm_suspend(pm_message_t state) { ktime_t starttime = ktime_get(); int error = 0; - might_sleep(); - mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; @@ -968,14 +964,7 @@ static int device_prepare(struct device *dev, pm_message_t state) device_lock(dev); - if (dev->pwr_domain) { - pm_dev_dbg(dev, state, "preparing power domain "); - if (dev->pwr_domain->ops.prepare) - error = dev->pwr_domain->ops.prepare(dev); - suspend_report_result(dev->pwr_domain->ops.prepare, error); - if (error) - goto End; - } else if (dev->type && dev->type->pm) { + if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "preparing type "); if (dev->type->pm->prepare) error = dev->type->pm->prepare(dev); @@ -994,6 +983,13 @@ static int device_prepare(struct device *dev, pm_message_t state) if (dev->bus->pm->prepare) error = dev->bus->pm->prepare(dev); suspend_report_result(dev->bus->pm->prepare, error); + if (error) + goto End; + } + + if (dev->pwr_domain && dev->pwr_domain->ops.prepare) { + pm_dev_dbg(dev, state, "preparing power domain "); + dev->pwr_domain->ops.prepare(dev); } End: @@ -1008,12 +1004,10 @@ static int device_prepare(struct device *dev, pm_message_t state) * * Execute the ->prepare() callback(s) for all devices. */ -int dpm_prepare(pm_message_t state) +static int dpm_prepare(pm_message_t state) { int error = 0; - might_sleep(); - mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_list)) { struct device *dev = to_device(dpm_list.next); @@ -1062,6 +1056,7 @@ int dpm_suspend_start(pm_message_t state) { int error; + might_sleep(); error = dpm_prepare(state); if (!error) error = dpm_suspend(state); diff --git a/trunk/drivers/base/power/runtime.c b/trunk/drivers/base/power/runtime.c index 0d4587b15c55..3172c60d23a9 100644 --- a/trunk/drivers/base/power/runtime.c +++ b/trunk/drivers/base/power/runtime.c @@ -168,6 +168,7 @@ static int rpm_check_suspend_allowed(struct device *dev) static int rpm_idle(struct device *dev, int rpmflags) { int (*callback)(struct device *); + int (*domain_callback)(struct device *); int retval; retval = rpm_check_suspend_allowed(dev); @@ -213,9 +214,7 @@ static int rpm_idle(struct device *dev, int rpmflags) dev->power.idle_notification = true; - if (dev->pwr_domain) - callback = dev->pwr_domain->ops.runtime_idle; - else if (dev->type && dev->type->pm) + if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_idle; else if (dev->class && dev->class->pm) callback = dev->class->pm->runtime_idle; @@ -224,10 +223,19 @@ static int rpm_idle(struct device *dev, int rpmflags) else callback = NULL; - if (callback) { + if (dev->pwr_domain) + domain_callback = dev->pwr_domain->ops.runtime_idle; + else + domain_callback = NULL; + + if (callback || domain_callback) { spin_unlock_irq(&dev->power.lock); - callback(dev); + if (domain_callback) + retval = domain_callback(dev); + + if (!retval && callback) + callback(dev); spin_lock_irq(&dev->power.lock); } @@ -374,9 +382,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_SUSPENDING); - if (dev->pwr_domain) - callback = dev->pwr_domain->ops.runtime_suspend; - else if (dev->type && dev->type->pm) + if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_suspend; else if (dev->class && dev->class->pm) callback = dev->class->pm->runtime_suspend; @@ -394,6 +400,8 @@ static int rpm_suspend(struct device *dev, int rpmflags) else pm_runtime_cancel_pending(dev); } else { + if (dev->pwr_domain) + rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev); no_callback: __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); @@ -574,8 +582,9 @@ static int rpm_resume(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_RESUMING); if (dev->pwr_domain) - callback = dev->pwr_domain->ops.runtime_resume; - else if (dev->type && dev->type->pm) + rpm_callback(dev->pwr_domain->ops.runtime_resume, dev); + + if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_resume; else if (dev->class && dev->class->pm) callback = dev->class->pm->runtime_resume; diff --git a/trunk/drivers/base/power/sysfs.c b/trunk/drivers/base/power/sysfs.c index a9f5b8979611..fff49bee781d 100644 --- a/trunk/drivers/base/power/sysfs.c +++ b/trunk/drivers/base/power/sysfs.c @@ -212,9 +212,8 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, autosuspend_delay_ms_store); -#endif /* CONFIG_PM_RUNTIME */ +#endif -#ifdef CONFIG_PM_SLEEP static ssize_t wake_show(struct device * dev, struct device_attribute *attr, char * buf) { @@ -249,6 +248,7 @@ wake_store(struct device * dev, struct device_attribute *attr, static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); +#ifdef CONFIG_PM_SLEEP static ssize_t wakeup_count_show(struct device *dev, struct device_attribute *attr, char *buf) { diff --git a/trunk/drivers/base/power/wakeup.c b/trunk/drivers/base/power/wakeup.c index 84f7c7d5a098..4573c83df6dd 100644 --- a/trunk/drivers/base/power/wakeup.c +++ b/trunk/drivers/base/power/wakeup.c @@ -110,6 +110,7 @@ void wakeup_source_add(struct wakeup_source *ws) spin_lock_irq(&events_lock); list_add_rcu(&ws->entry, &wakeup_sources); spin_unlock_irq(&events_lock); + synchronize_rcu(); } EXPORT_SYMBOL_GPL(wakeup_source_add); @@ -257,7 +258,7 @@ void device_set_wakeup_capable(struct device *dev, bool capable) if (!!dev->power.can_wakeup == !!capable) return; - if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { + if (device_is_registered(dev)) { if (capable) { if (wakeup_sysfs_add(dev)) return; diff --git a/trunk/drivers/base/sys.c b/trunk/drivers/base/sys.c index 9dff77bfe1e3..acde9b5ee131 100644 --- a/trunk/drivers/base/sys.c +++ b/trunk/drivers/base/sys.c @@ -328,8 +328,203 @@ void sysdev_unregister(struct sys_device *sysdev) kobject_put(&sysdev->kobj); } -EXPORT_SYMBOL_GPL(sysdev_register); -EXPORT_SYMBOL_GPL(sysdev_unregister); + +#ifndef CONFIG_ARCH_NO_SYSDEV_OPS +/** + * sysdev_shutdown - Shut down all system devices. + * + * Loop over each class of system devices, and the devices in each + * of those classes. For each device, we call the shutdown method for + * each driver registered for the device - the auxiliaries, + * and the class driver. + * + * Note: The list is iterated in reverse order, so that we shut down + * child devices before we shut down their parents. The list ordering + * is guaranteed by virtue of the fact that child devices are registered + * after their parents. + */ +void sysdev_shutdown(void) +{ + struct sysdev_class *cls; + + pr_debug("Shutting Down System Devices\n"); + + mutex_lock(&sysdev_drivers_lock); + list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { + struct sys_device *sysdev; + + pr_debug("Shutting down type '%s':\n", + kobject_name(&cls->kset.kobj)); + + list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { + struct sysdev_driver *drv; + pr_debug(" %s\n", kobject_name(&sysdev->kobj)); + + /* Call auxiliary drivers first */ + list_for_each_entry(drv, &cls->drivers, entry) { + if (drv->shutdown) + drv->shutdown(sysdev); + } + + /* Now call the generic one */ + if (cls->shutdown) + cls->shutdown(sysdev); + } + } + mutex_unlock(&sysdev_drivers_lock); +} + +static void __sysdev_resume(struct sys_device *dev) +{ + struct sysdev_class *cls = dev->cls; + struct sysdev_driver *drv; + + /* First, call the class-specific one */ + if (cls->resume) + cls->resume(dev); + WARN_ONCE(!irqs_disabled(), + "Interrupts enabled after %pF\n", cls->resume); + + /* Call auxiliary drivers next. */ + list_for_each_entry(drv, &cls->drivers, entry) { + if (drv->resume) + drv->resume(dev); + WARN_ONCE(!irqs_disabled(), + "Interrupts enabled after %pF\n", drv->resume); + } +} + +/** + * sysdev_suspend - Suspend all system devices. + * @state: Power state to enter. + * + * We perform an almost identical operation as sysdev_shutdown() + * above, though calling ->suspend() instead. Interrupts are disabled + * when this called. Devices are responsible for both saving state and + * quiescing or powering down the device. + * + * This is only called by the device PM core, so we let them handle + * all synchronization. + */ +int sysdev_suspend(pm_message_t state) +{ + struct sysdev_class *cls; + struct sys_device *sysdev, *err_dev; + struct sysdev_driver *drv, *err_drv; + int ret; + + pr_debug("Checking wake-up interrupts\n"); + + /* Return error code if there are any wake-up interrupts pending */ + ret = check_wakeup_irqs(); + if (ret) + return ret; + + WARN_ONCE(!irqs_disabled(), + "Interrupts enabled while suspending system devices\n"); + + pr_debug("Suspending System Devices\n"); + + list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { + pr_debug("Suspending type '%s':\n", + kobject_name(&cls->kset.kobj)); + + list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { + pr_debug(" %s\n", kobject_name(&sysdev->kobj)); + + /* Call auxiliary drivers first */ + list_for_each_entry(drv, &cls->drivers, entry) { + if (drv->suspend) { + ret = drv->suspend(sysdev, state); + if (ret) + goto aux_driver; + } + WARN_ONCE(!irqs_disabled(), + "Interrupts enabled after %pF\n", + drv->suspend); + } + + /* Now call the generic one */ + if (cls->suspend) { + ret = cls->suspend(sysdev, state); + if (ret) + goto cls_driver; + WARN_ONCE(!irqs_disabled(), + "Interrupts enabled after %pF\n", + cls->suspend); + } + } + } + return 0; + /* resume current sysdev */ +cls_driver: + drv = NULL; + printk(KERN_ERR "Class suspend failed for %s: %d\n", + kobject_name(&sysdev->kobj), ret); + +aux_driver: + if (drv) + printk(KERN_ERR "Class driver suspend failed for %s: %d\n", + kobject_name(&sysdev->kobj), ret); + list_for_each_entry(err_drv, &cls->drivers, entry) { + if (err_drv == drv) + break; + if (err_drv->resume) + err_drv->resume(sysdev); + } + + /* resume other sysdevs in current class */ + list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { + if (err_dev == sysdev) + break; + pr_debug(" %s\n", kobject_name(&err_dev->kobj)); + __sysdev_resume(err_dev); + } + + /* resume other classes */ + list_for_each_entry_continue(cls, &system_kset->list, kset.kobj.entry) { + list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { + pr_debug(" %s\n", kobject_name(&err_dev->kobj)); + __sysdev_resume(err_dev); + } + } + return ret; +} +EXPORT_SYMBOL_GPL(sysdev_suspend); + +/** + * sysdev_resume - Bring system devices back to life. + * + * Similar to sysdev_suspend(), but we iterate the list forwards + * to guarantee that parent devices are resumed before their children. + * + * Note: Interrupts are disabled when called. + */ +int sysdev_resume(void) +{ + struct sysdev_class *cls; + + WARN_ONCE(!irqs_disabled(), + "Interrupts enabled while resuming system devices\n"); + + pr_debug("Resuming System Devices\n"); + + list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) { + struct sys_device *sysdev; + + pr_debug("Resuming type '%s':\n", + kobject_name(&cls->kset.kobj)); + + list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { + pr_debug(" %s\n", kobject_name(&sysdev->kobj)); + + __sysdev_resume(sysdev); + } + } + return 0; +} +EXPORT_SYMBOL_GPL(sysdev_resume); +#endif /* CONFIG_ARCH_NO_SYSDEV_OPS */ int __init system_bus_init(void) { @@ -339,6 +534,9 @@ int __init system_bus_init(void) return 0; } +EXPORT_SYMBOL_GPL(sysdev_register); +EXPORT_SYMBOL_GPL(sysdev_unregister); + #define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr) ssize_t sysdev_store_ulong(struct sys_device *sysdev, diff --git a/trunk/drivers/block/DAC960.c b/trunk/drivers/block/DAC960.c index e086fbbbe853..8066d086578a 100644 --- a/trunk/drivers/block/DAC960.c +++ b/trunk/drivers/block/DAC960.c @@ -2547,6 +2547,7 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) disk->major = MajorNumber; disk->first_minor = n << DAC960_MaxPartitionsBits; disk->fops = &DAC960_BlockDeviceOperations; + disk->events = DISK_EVENT_MEDIA_CHANGE; } /* Indicate the Block Device Registration completed successfully, diff --git a/trunk/drivers/block/amiflop.c b/trunk/drivers/block/amiflop.c index 8eba86bba599..456c0cc90dcf 100644 --- a/trunk/drivers/block/amiflop.c +++ b/trunk/drivers/block/amiflop.c @@ -1736,6 +1736,7 @@ static int __init fd_probe_drives(void) disk->major = FLOPPY_MAJOR; disk->first_minor = drive; disk->fops = &floppy_fops; + disk->events = DISK_EVENT_MEDIA_CHANGE; sprintf(disk->disk_name, "fd%d", drive); disk->private_data = &unit[drive]; set_capacity(disk, 880*2); diff --git a/trunk/drivers/block/ataflop.c b/trunk/drivers/block/ataflop.c index ede16c64ff07..c871eae14120 100644 --- a/trunk/drivers/block/ataflop.c +++ b/trunk/drivers/block/ataflop.c @@ -1964,6 +1964,7 @@ static int __init atari_floppy_init (void) unit[i].disk->first_minor = i; sprintf(unit[i].disk->disk_name, "fd%d", i); unit[i].disk->fops = &floppy_fops; + unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE; unit[i].disk->private_data = &unit[i]; unit[i].disk->queue = blk_init_queue(do_fd_request, &ataflop_lock); diff --git a/trunk/drivers/block/floppy.c b/trunk/drivers/block/floppy.c index db8f88586c8d..301d7a9a41a6 100644 --- a/trunk/drivers/block/floppy.c +++ b/trunk/drivers/block/floppy.c @@ -4205,6 +4205,7 @@ static int __init floppy_init(void) disks[dr]->major = FLOPPY_MAJOR; disks[dr]->first_minor = TOMINOR(dr); disks[dr]->fops = &floppy_fops; + disks[dr]->events = DISK_EVENT_MEDIA_CHANGE; sprintf(disks[dr]->disk_name, "fd%d", dr); init_timer(&motor_off_timer[dr]); diff --git a/trunk/drivers/block/paride/pcd.c b/trunk/drivers/block/paride/pcd.c index 8690e31d9932..2f2ccf686251 100644 --- a/trunk/drivers/block/paride/pcd.c +++ b/trunk/drivers/block/paride/pcd.c @@ -320,6 +320,7 @@ static void pcd_init_units(void) disk->first_minor = unit; strcpy(disk->disk_name, cd->name); /* umm... */ disk->fops = &pcd_bdops; + disk->events = DISK_EVENT_MEDIA_CHANGE; } } diff --git a/trunk/drivers/block/paride/pd.c b/trunk/drivers/block/paride/pd.c index 869e7676d46f..21dfdb776869 100644 --- a/trunk/drivers/block/paride/pd.c +++ b/trunk/drivers/block/paride/pd.c @@ -837,6 +837,7 @@ static void pd_probe_drive(struct pd_unit *disk) p->fops = &pd_fops; p->major = major; p->first_minor = (disk - pd) << PD_BITS; + p->events = DISK_EVENT_MEDIA_CHANGE; disk->gd = p; p->private_data = disk; p->queue = pd_queue; diff --git a/trunk/drivers/block/paride/pf.c b/trunk/drivers/block/paride/pf.c index f21b520ef419..7adeb1edbf43 100644 --- a/trunk/drivers/block/paride/pf.c +++ b/trunk/drivers/block/paride/pf.c @@ -294,6 +294,7 @@ static void __init pf_init_units(void) disk->first_minor = unit; strcpy(disk->disk_name, pf->name); disk->fops = &pf_fops; + disk->events = DISK_EVENT_MEDIA_CHANGE; if (!(*drives[unit])[D_PRT]) pf_drive_count++; } diff --git a/trunk/drivers/block/rbd.c b/trunk/drivers/block/rbd.c index 9712fad82bc6..16dc3645291c 100644 --- a/trunk/drivers/block/rbd.c +++ b/trunk/drivers/block/rbd.c @@ -92,8 +92,6 @@ struct rbd_client { struct list_head node; }; -struct rbd_req_coll; - /* * a single io request */ @@ -102,24 +100,6 @@ struct rbd_request { struct bio *bio; /* cloned bio */ struct page **pages; /* list of used pages */ u64 len; - int coll_index; - struct rbd_req_coll *coll; -}; - -struct rbd_req_status { - int done; - int rc; - u64 bytes; -}; - -/* - * a collection of requests - */ -struct rbd_req_coll { - int total; - int num_done; - struct kref kref; - struct rbd_req_status status[0]; }; struct rbd_snap { @@ -436,17 +416,6 @@ static void rbd_put_client(struct rbd_device *rbd_dev) rbd_dev->client = NULL; } -/* - * Destroy requests collection - */ -static void rbd_coll_release(struct kref *kref) -{ - struct rbd_req_coll *coll = - container_of(kref, struct rbd_req_coll, kref); - - dout("rbd_coll_release %p\n", coll); - kfree(coll); -} /* * Create a new header structure, translate header format from the on-disk @@ -621,14 +590,6 @@ static u64 rbd_get_segment(struct rbd_image_header *header, return len; } -static int rbd_get_num_segments(struct rbd_image_header *header, - u64 ofs, u64 len) -{ - u64 start_seg = ofs >> header->obj_order; - u64 end_seg = (ofs + len - 1) >> header->obj_order; - return end_seg - start_seg + 1; -} - /* * bio helpers */ @@ -774,50 +735,6 @@ static void rbd_destroy_ops(struct ceph_osd_req_op *ops) kfree(ops); } -static void rbd_coll_end_req_index(struct request *rq, - struct rbd_req_coll *coll, - int index, - int ret, u64 len) -{ - struct request_queue *q; - int min, max, i; - - dout("rbd_coll_end_req_index %p index %d ret %d len %lld\n", - coll, index, ret, len); - - if (!rq) - return; - - if (!coll) { - blk_end_request(rq, ret, len); - return; - } - - q = rq->q; - - spin_lock_irq(q->queue_lock); - coll->status[index].done = 1; - coll->status[index].rc = ret; - coll->status[index].bytes = len; - max = min = coll->num_done; - while (max < coll->total && coll->status[max].done) - max++; - - for (i = min; istatus[i].rc, - coll->status[i].bytes); - coll->num_done++; - kref_put(&coll->kref, rbd_coll_release); - } - spin_unlock_irq(q->queue_lock); -} - -static void rbd_coll_end_req(struct rbd_request *req, - int ret, u64 len) -{ - rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len); -} - /* * Send ceph osd request */ @@ -832,8 +749,6 @@ static int rbd_do_request(struct request *rq, int flags, struct ceph_osd_req_op *ops, int num_reply, - struct rbd_req_coll *coll, - int coll_index, void (*rbd_cb)(struct ceph_osd_request *req, struct ceph_msg *msg), struct ceph_osd_request **linger_req, @@ -848,20 +763,12 @@ static int rbd_do_request(struct request *rq, struct ceph_osd_request_head *reqhead; struct rbd_image_header *header = &dev->header; + ret = -ENOMEM; req_data = kzalloc(sizeof(*req_data), GFP_NOIO); - if (!req_data) { - if (coll) - rbd_coll_end_req_index(rq, coll, coll_index, - -ENOMEM, len); - return -ENOMEM; - } - - if (coll) { - req_data->coll = coll; - req_data->coll_index = coll_index; - } + if (!req_data) + goto done; - dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs); + dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs); down_read(&header->snap_rwsem); @@ -870,9 +777,9 @@ static int rbd_do_request(struct request *rq, ops, false, GFP_NOIO, pages, bio); - if (!req) { + if (IS_ERR(req)) { up_read(&header->snap_rwsem); - ret = -ENOMEM; + ret = PTR_ERR(req); goto done_pages; } @@ -921,8 +828,7 @@ static int rbd_do_request(struct request *rq, ret = ceph_osdc_wait_request(&dev->client->osdc, req); if (ver) *ver = le64_to_cpu(req->r_reassert_version.version); - dout("reassert_ver=%lld\n", - le64_to_cpu(req->r_reassert_version.version)); + dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version)); ceph_osdc_put_request(req); } return ret; @@ -931,8 +837,10 @@ static int rbd_do_request(struct request *rq, bio_chain_put(req_data->bio); ceph_osdc_put_request(req); done_pages: - rbd_coll_end_req(req_data, ret, len); kfree(req_data); +done: + if (rq) + blk_end_request(rq, ret, len); return ret; } @@ -966,7 +874,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg) bytes = req_data->len; } - rbd_coll_end_req(req_data, rc, bytes); + blk_end_request(req_data->rq, rc, bytes); if (req_data->bio) bio_chain_put(req_data->bio); @@ -1026,7 +934,6 @@ static int rbd_req_sync_op(struct rbd_device *dev, flags, ops, 2, - NULL, 0, NULL, linger_req, ver); if (ret < 0) @@ -1052,9 +959,7 @@ static int rbd_do_op(struct request *rq, u64 snapid, int opcode, int flags, int num_reply, u64 ofs, u64 len, - struct bio *bio, - struct rbd_req_coll *coll, - int coll_index) + struct bio *bio) { char *seg_name; u64 seg_ofs; @@ -1090,10 +995,7 @@ static int rbd_do_op(struct request *rq, flags, ops, num_reply, - coll, coll_index, rbd_req_cb, 0, NULL); - - rbd_destroy_ops(ops); done: kfree(seg_name); return ret; @@ -1106,15 +1008,13 @@ static int rbd_req_write(struct request *rq, struct rbd_device *rbd_dev, struct ceph_snap_context *snapc, u64 ofs, u64 len, - struct bio *bio, - struct rbd_req_coll *coll, - int coll_index) + struct bio *bio) { return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 2, - ofs, len, bio, coll, coll_index); + ofs, len, bio); } /* @@ -1124,16 +1024,14 @@ static int rbd_req_read(struct request *rq, struct rbd_device *rbd_dev, u64 snapid, u64 ofs, u64 len, - struct bio *bio, - struct rbd_req_coll *coll, - int coll_index) + struct bio *bio) { return rbd_do_op(rq, rbd_dev, NULL, (snapid ? snapid : CEPH_NOSNAP), CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 2, - ofs, len, bio, coll, coll_index); + ofs, len, bio); } /* @@ -1165,9 +1063,7 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, { struct ceph_osd_req_op *ops; struct page **pages = NULL; - int ret; - - ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); + int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); if (ret < 0) return ret; @@ -1181,7 +1077,6 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, CEPH_OSD_FLAG_READ, ops, 1, - NULL, 0, rbd_simple_req_cb, 0, NULL); rbd_destroy_ops(ops); @@ -1379,20 +1274,6 @@ static int rbd_req_sync_exec(struct rbd_device *dev, return ret; } -static struct rbd_req_coll *rbd_alloc_coll(int num_reqs) -{ - struct rbd_req_coll *coll = - kzalloc(sizeof(struct rbd_req_coll) + - sizeof(struct rbd_req_status) * num_reqs, - GFP_ATOMIC); - - if (!coll) - return NULL; - coll->total = num_reqs; - kref_init(&coll->kref); - return coll; -} - /* * block device queue callback */ @@ -1410,8 +1291,6 @@ static void rbd_rq_fn(struct request_queue *q) bool do_write; int size, op_size = 0; u64 ofs; - int num_segs, cur_seg = 0; - struct rbd_req_coll *coll; /* peek at request from block layer */ if (!rq) @@ -1442,14 +1321,6 @@ static void rbd_rq_fn(struct request_queue *q) do_write ? "write" : "read", size, blk_rq_pos(rq) * 512ULL); - num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size); - coll = rbd_alloc_coll(num_segs); - if (!coll) { - spin_lock_irq(q->queue_lock); - __blk_end_request_all(rq, -ENOMEM); - goto next; - } - do { /* a bio clone to be passed down to OSD req */ dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); @@ -1457,41 +1328,35 @@ static void rbd_rq_fn(struct request_queue *q) rbd_dev->header.block_name, ofs, size, NULL, NULL); - kref_get(&coll->kref); bio = bio_chain_clone(&rq_bio, &next_bio, &bp, op_size, GFP_ATOMIC); if (!bio) { - rbd_coll_end_req_index(rq, coll, cur_seg, - -ENOMEM, op_size); - goto next_seg; + spin_lock_irq(q->queue_lock); + __blk_end_request_all(rq, -ENOMEM); + goto next; } - /* init OSD command: write or read */ if (do_write) rbd_req_write(rq, rbd_dev, rbd_dev->header.snapc, ofs, - op_size, bio, - coll, cur_seg); + op_size, bio); else rbd_req_read(rq, rbd_dev, cur_snap_id(rbd_dev), ofs, - op_size, bio, - coll, cur_seg); + op_size, bio); -next_seg: size -= op_size; ofs += op_size; - cur_seg++; rq_bio = next_bio; } while (size > 0); - kref_put(&coll->kref, rbd_coll_release); if (bp) bio_pair_release(bp); + spin_lock_irq(q->queue_lock); next: rq = blk_fetch_request(q); diff --git a/trunk/drivers/block/swim.c b/trunk/drivers/block/swim.c index fd5adcd55944..24a482f2fbd6 100644 --- a/trunk/drivers/block/swim.c +++ b/trunk/drivers/block/swim.c @@ -858,6 +858,7 @@ static int __devinit swim_floppy_init(struct swim_priv *swd) swd->unit[drive].disk->first_minor = drive; sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive); swd->unit[drive].disk->fops = &floppy_fops; + swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE; swd->unit[drive].disk->private_data = &swd->unit[drive]; swd->unit[drive].disk->queue = swd->queue; set_capacity(swd->unit[drive].disk, 2880); diff --git a/trunk/drivers/block/swim3.c b/trunk/drivers/block/swim3.c index 773bfa792777..4c10f56facbf 100644 --- a/trunk/drivers/block/swim3.c +++ b/trunk/drivers/block/swim3.c @@ -1163,6 +1163,7 @@ static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device disk->major = FLOPPY_MAJOR; disk->first_minor = i; disk->fops = &floppy_fops; + disk->events = DISK_EVENT_MEDIA_CHANGE; disk->private_data = &floppy_states[i]; disk->queue = swim3_queue; disk->flags |= GENHD_FL_REMOVABLE; diff --git a/trunk/drivers/block/ub.c b/trunk/drivers/block/ub.c index 0e376d46bdd1..68b9430c7cfe 100644 --- a/trunk/drivers/block/ub.c +++ b/trunk/drivers/block/ub.c @@ -2334,6 +2334,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) disk->major = UB_MAJOR; disk->first_minor = lun->id * UB_PARTS_PER_LUN; disk->fops = &ub_bd_fops; + disk->events = DISK_EVENT_MEDIA_CHANGE; disk->private_data = lun; disk->driverfs_dev = &sc->intf->dev; diff --git a/trunk/drivers/block/xsysace.c b/trunk/drivers/block/xsysace.c index 6c7fd7db6dff..645ff765cd12 100644 --- a/trunk/drivers/block/xsysace.c +++ b/trunk/drivers/block/xsysace.c @@ -1005,6 +1005,7 @@ static int __devinit ace_setup(struct ace_device *ace) ace->gd->major = ace_major; ace->gd->first_minor = ace->id * ACE_NUM_MINORS; ace->gd->fops = &ace_fops; + ace->gd->events = DISK_EVENT_MEDIA_CHANGE; ace->gd->queue = ace->queue; ace->gd->private_data = ace; snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); diff --git a/trunk/drivers/cdrom/cdrom.c b/trunk/drivers/cdrom/cdrom.c index 75fb965b8f72..514dd8efaf73 100644 --- a/trunk/drivers/cdrom/cdrom.c +++ b/trunk/drivers/cdrom/cdrom.c @@ -986,9 +986,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t cdinfo(CD_OPEN, "entering cdrom_open\n"); - /* open is event synchronization point, check events first */ - check_disk_change(bdev); - /* if this was a O_NONBLOCK open and we should honor the flags, * do a quick open without drive/disc integrity checks. */ cdi->use_count++; @@ -1015,6 +1012,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", cdi->name, cdi->use_count); + /* Do this on open. Don't wait for mount, because they might + not be mounting, but opening with O_NONBLOCK */ + check_disk_change(bdev); return 0; err_release: if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { diff --git a/trunk/drivers/cdrom/gdrom.c b/trunk/drivers/cdrom/gdrom.c index 3ceaf006e7f0..b2b034fea34e 100644 --- a/trunk/drivers/cdrom/gdrom.c +++ b/trunk/drivers/cdrom/gdrom.c @@ -803,6 +803,7 @@ static int __devinit probe_gdrom(struct platform_device *devptr) goto probe_fail_cdrom_register; } gd.disk->fops = &gdrom_bdops; + gd.disk->events = DISK_EVENT_MEDIA_CHANGE; /* latch on to the interrupt */ err = gdrom_set_interrupt_handlers(); if (err) diff --git a/trunk/drivers/cdrom/viocd.c b/trunk/drivers/cdrom/viocd.c index e427fbe45999..4e874c5fa605 100644 --- a/trunk/drivers/cdrom/viocd.c +++ b/trunk/drivers/cdrom/viocd.c @@ -626,6 +626,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) gendisk->queue = q; gendisk->fops = &viocd_fops; gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; + gendisk->events = DISK_EVENT_MEDIA_CHANGE; set_capacity(gendisk, 0); gendisk->private_data = d; d->viocd_disk = gendisk; diff --git a/trunk/drivers/char/hpet.c b/trunk/drivers/char/hpet.c index 051474c65b78..7066e801b9d3 100644 --- a/trunk/drivers/char/hpet.c +++ b/trunk/drivers/char/hpet.c @@ -84,6 +84,8 @@ static struct clocksource clocksource_hpet = { .rating = 250, .read = read_hpet, .mask = CLOCKSOURCE_MASK(64), + .mult = 0, /* to be calculated */ + .shift = 10, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static struct clocksource *hpet_clocksource; @@ -932,7 +934,9 @@ int hpet_alloc(struct hpet_data *hdp) if (!hpet_clocksource) { hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc; CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr); - clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq); + clocksource_hpet.mult = clocksource_hz2mult(hpetp->hp_tick_freq, + clocksource_hpet.shift); + clocksource_register(&clocksource_hpet); hpetp->hp_clocksource = &clocksource_hpet; hpet_clocksource = &clocksource_hpet; } diff --git a/trunk/drivers/char/hw_random/n2-drv.c b/trunk/drivers/char/hw_random/n2-drv.c index ac6739e085e3..43ac61978d8b 100644 --- a/trunk/drivers/char/hw_random/n2-drv.c +++ b/trunk/drivers/char/hw_random/n2-drv.c @@ -619,18 +619,15 @@ static void __devinit n2rng_driver_version(void) pr_info("%s", version); } -static const struct of_device_id n2rng_match[]; static int __devinit n2rng_probe(struct platform_device *op) { - const struct of_device_id *match; int victoria_falls; int err = -ENOMEM; struct n2rng *np; - match = of_match_device(n2rng_match, &op->dev); - if (!match) + if (!op->dev.of_match) return -EINVAL; - victoria_falls = (match->data != NULL); + victoria_falls = (op->dev.of_match->data != NULL); n2rng_driver_version(); np = kzalloc(sizeof(*np), GFP_KERNEL); diff --git a/trunk/drivers/char/ipmi/ipmi_si_intf.c b/trunk/drivers/char/ipmi/ipmi_si_intf.c index 64c6b8530615..cc6c9b2546a3 100644 --- a/trunk/drivers/char/ipmi/ipmi_si_intf.c +++ b/trunk/drivers/char/ipmi/ipmi_si_intf.c @@ -2554,11 +2554,9 @@ static struct pci_driver ipmi_pci_driver = { }; #endif /* CONFIG_PCI */ -static struct of_device_id ipmi_match[]; static int __devinit ipmi_probe(struct platform_device *dev) { #ifdef CONFIG_OF - const struct of_device_id *match; struct smi_info *info; struct resource resource; const __be32 *regsize, *regspacing, *regshift; @@ -2568,8 +2566,7 @@ static int __devinit ipmi_probe(struct platform_device *dev) dev_info(&dev->dev, "probing via device tree\n"); - match = of_match_device(ipmi_match, &dev->dev); - if (!match) + if (!dev->dev.of_match) return -EINVAL; ret = of_address_to_resource(np, 0, &resource); @@ -2604,7 +2601,7 @@ static int __devinit ipmi_probe(struct platform_device *dev) return -ENOMEM; } - info->si_type = (enum si_type) match->data; + info->si_type = (enum si_type) dev->dev.of_match->data; info->addr_source = SI_DEVICETREE; info->irq_setup = std_irq_setup; diff --git a/trunk/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/trunk/drivers/char/xilinx_hwicap/xilinx_hwicap.c index 39ccdeada791..d6412c16385f 100644 --- a/trunk/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/trunk/drivers/char/xilinx_hwicap/xilinx_hwicap.c @@ -715,13 +715,13 @@ static int __devexit hwicap_remove(struct device *dev) } #ifdef CONFIG_OF -static int __devinit hwicap_of_probe(struct platform_device *op, - const struct hwicap_driver_config *config) +static int __devinit hwicap_of_probe(struct platform_device *op) { struct resource res; const unsigned int *id; const char *family; int rc; + const struct hwicap_driver_config *config = op->dev.of_match->data; const struct config_registers *regs; @@ -751,24 +751,20 @@ static int __devinit hwicap_of_probe(struct platform_device *op, regs); } #else -static inline int hwicap_of_probe(struct platform_device *op, - const struct hwicap_driver_config *config) +static inline int hwicap_of_probe(struct platform_device *op) { return -EINVAL; } #endif /* CONFIG_OF */ -static const struct of_device_id __devinitconst hwicap_of_match[]; static int __devinit hwicap_drv_probe(struct platform_device *pdev) { - const struct of_device_id *match; struct resource *res; const struct config_registers *regs; const char *family; - match = of_match_device(hwicap_of_match, &pdev->dev); - if (match) - return hwicap_of_probe(pdev, match->data); + if (pdev->dev.of_match) + return hwicap_of_probe(pdev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) diff --git a/trunk/drivers/clk/clkdev.c b/trunk/drivers/clk/clkdev.c index 6db161f64ae0..0fc0a79852de 100644 --- a/trunk/drivers/clk/clkdev.c +++ b/trunk/drivers/clk/clkdev.c @@ -32,9 +32,10 @@ static DEFINE_MUTEX(clocks_mutex); * Then we take the most specific entry - with the following * order of precedence: dev+con > dev only > con only. */ -static struct clk_lookup *clk_find(const char *dev_id, const char *con_id) +static struct clk *clk_find(const char *dev_id, const char *con_id) { - struct clk_lookup *p, *cl = NULL; + struct clk_lookup *p; + struct clk *clk = NULL; int match, best = 0; list_for_each_entry(p, &clocks, node) { @@ -51,27 +52,27 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id) } if (match > best) { - cl = p; + clk = p->clk; if (match != 3) best = match; else break; } } - return cl; + return clk; } struct clk *clk_get_sys(const char *dev_id, const char *con_id) { - struct clk_lookup *cl; + struct clk *clk; mutex_lock(&clocks_mutex); - cl = clk_find(dev_id, con_id); - if (cl && !__clk_get(cl->clk)) - cl = NULL; + clk = clk_find(dev_id, con_id); + if (clk && !__clk_get(clk)) + clk = NULL; mutex_unlock(&clocks_mutex); - return cl ? cl->clk : ERR_PTR(-ENOENT); + return clk ? clk : ERR_PTR(-ENOENT); } EXPORT_SYMBOL(clk_get_sys); diff --git a/trunk/drivers/clocksource/Kconfig b/trunk/drivers/clocksource/Kconfig deleted file mode 100644 index 110aeeb52f9a..000000000000 --- a/trunk/drivers/clocksource/Kconfig +++ /dev/null @@ -1,2 +0,0 @@ -config CLKSRC_I8253 - bool diff --git a/trunk/drivers/clocksource/Makefile b/trunk/drivers/clocksource/Makefile index cfb6383b543a..be61ece6330b 100644 --- a/trunk/drivers/clocksource/Makefile +++ b/trunk/drivers/clocksource/Makefile @@ -6,4 +6,3 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o -obj-$(CONFIG_CLKSRC_I8253) += i8253.o diff --git a/trunk/drivers/clocksource/cyclone.c b/trunk/drivers/clocksource/cyclone.c index 72f811f73e9c..64e528e8bfa6 100644 --- a/trunk/drivers/clocksource/cyclone.c +++ b/trunk/drivers/clocksource/cyclone.c @@ -29,6 +29,8 @@ static struct clocksource clocksource_cyclone = { .rating = 250, .read = read_cyclone, .mask = CYCLONE_TIMER_MASK, + .mult = 10, + .shift = 0, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -106,8 +108,12 @@ static int __init init_cyclone_clocksource(void) } cyclone_ptr = cyclone_timer; - return clocksource_register_hz(&clocksource_cyclone, - CYCLONE_TIMER_FREQ); + /* sort out mult/shift values: */ + clocksource_cyclone.shift = 22; + clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ, + clocksource_cyclone.shift); + + return clocksource_register(&clocksource_cyclone); } arch_initcall(init_cyclone_clocksource); diff --git a/trunk/drivers/clocksource/i8253.c b/trunk/drivers/clocksource/i8253.c deleted file mode 100644 index 225c1761b372..000000000000 --- a/trunk/drivers/clocksource/i8253.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * i8253 PIT clocksource - */ -#include -#include -#include -#include -#include - -#include - -/* - * Since the PIT overflows every tick, its not very useful - * to just read by itself. So use jiffies to emulate a free - * running counter: - */ -static cycle_t i8253_read(struct clocksource *cs) -{ - static int old_count; - static u32 old_jifs; - unsigned long flags; - int count; - u32 jifs; - - raw_spin_lock_irqsave(&i8253_lock, flags); - /* - * Although our caller may have the read side of xtime_lock, - * this is now a seqlock, and we are cheating in this routine - * by having side effects on state that we cannot undo if - * there is a collision on the seqlock and our caller has to - * retry. (Namely, old_jifs and old_count.) So we must treat - * jiffies as volatile despite the lock. We read jiffies - * before latching the timer count to guarantee that although - * the jiffies value might be older than the count (that is, - * the counter may underflow between the last point where - * jiffies was incremented and the point where we latch the - * count), it cannot be newer. - */ - jifs = jiffies; - outb_pit(0x00, PIT_MODE); /* latch the count ASAP */ - count = inb_pit(PIT_CH0); /* read the latched count */ - count |= inb_pit(PIT_CH0) << 8; - - /* VIA686a test code... reset the latch if count > max + 1 */ - if (count > LATCH) { - outb_pit(0x34, PIT_MODE); - outb_pit(PIT_LATCH & 0xff, PIT_CH0); - outb_pit(PIT_LATCH >> 8, PIT_CH0); - count = PIT_LATCH - 1; - } - - /* - * It's possible for count to appear to go the wrong way for a - * couple of reasons: - * - * 1. The timer counter underflows, but we haven't handled the - * resulting interrupt and incremented jiffies yet. - * 2. Hardware problem with the timer, not giving us continuous time, - * the counter does small "jumps" upwards on some Pentium systems, - * (see c't 95/10 page 335 for Neptun bug.) - * - * Previous attempts to handle these cases intelligently were - * buggy, so we just do the simple thing now. - */ - if (count > old_count && jifs == old_jifs) - count = old_count; - - old_count = count; - old_jifs = jifs; - - raw_spin_unlock_irqrestore(&i8253_lock, flags); - - count = (PIT_LATCH - 1) - count; - - return (cycle_t)(jifs * PIT_LATCH) + count; -} - -static struct clocksource i8253_cs = { - .name = "pit", - .rating = 110, - .read = i8253_read, - .mask = CLOCKSOURCE_MASK(32), -}; - -int __init clocksource_i8253_init(void) -{ - return clocksource_register_hz(&i8253_cs, PIT_TICK_RATE); -} diff --git a/trunk/drivers/cpufreq/Kconfig b/trunk/drivers/cpufreq/Kconfig index 9fb84853d8e3..ca8ee8093d6c 100644 --- a/trunk/drivers/cpufreq/Kconfig +++ b/trunk/drivers/cpufreq/Kconfig @@ -1,5 +1,3 @@ -menu "CPU Frequency scaling" - config CPU_FREQ bool "CPU Frequency scaling" help @@ -20,6 +18,19 @@ if CPU_FREQ config CPU_FREQ_TABLE tristate +config CPU_FREQ_DEBUG + bool "Enable CPUfreq debugging" + help + Say Y here to enable CPUfreq subsystem (including drivers) + debugging. You will need to activate it via the kernel + command line by passing + cpufreq.debug= + + To get , add + 1 to activate CPUfreq core debugging, + 2 to activate CPUfreq drivers debugging, and + 4 to activate CPUfreq governor debugging + config CPU_FREQ_STAT tristate "CPU frequency translation statistics" select CPU_FREQ_TABLE @@ -179,10 +190,4 @@ config CPU_FREQ_GOV_CONSERVATIVE If in doubt, say N. -menu "x86 CPU frequency scaling drivers" -depends on X86 -source "drivers/cpufreq/Kconfig.x86" -endmenu - -endif -endmenu +endif # CPU_FREQ diff --git a/trunk/drivers/cpufreq/Makefile b/trunk/drivers/cpufreq/Makefile index c7f1a6f16b6e..71fc3b4173f1 100644 --- a/trunk/drivers/cpufreq/Makefile +++ b/trunk/drivers/cpufreq/Makefile @@ -13,29 +13,3 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o -##################################################################################d -# x86 drivers. -# Link order matters. K8 is preferred to ACPI because of firmware bugs in early -# K8 systems. ACPI is preferred to all other hardware-specific drivers. -# speedstep-* is preferred over p4-clockmod. - -obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o -obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o -obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o -obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o -obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o -obj-$(CONFIG_X86_LONGHAUL) += longhaul.o -obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o -obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o -obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o -obj-$(CONFIG_X86_LONGRUN) += longrun.o -obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o -obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o -obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o -obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o -obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o -obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o -obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o - -##################################################################################d - diff --git a/trunk/drivers/cpufreq/cpufreq.c b/trunk/drivers/cpufreq/cpufreq.c index 0a5bea9e3585..2dafc5c38ae7 100644 --- a/trunk/drivers/cpufreq/cpufreq.c +++ b/trunk/drivers/cpufreq/cpufreq.c @@ -32,6 +32,9 @@ #include +#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ + "cpufreq-core", msg) + /** * The "cpufreq driver" - the arch- or hardware-dependent low * level driver of CPUFreq support, and its spinlock. This lock @@ -177,6 +180,93 @@ void cpufreq_cpu_put(struct cpufreq_policy *data) EXPORT_SYMBOL_GPL(cpufreq_cpu_put); +/********************************************************************* + * UNIFIED DEBUG HELPERS * + *********************************************************************/ +#ifdef CONFIG_CPU_FREQ_DEBUG + +/* what part(s) of the CPUfreq subsystem are debugged? */ +static unsigned int debug; + +/* is the debug output ratelimit'ed using printk_ratelimit? User can + * set or modify this value. + */ +static unsigned int debug_ratelimit = 1; + +/* is the printk_ratelimit'ing enabled? It's enabled after a successful + * loading of a cpufreq driver, temporarily disabled when a new policy + * is set, and disabled upon cpufreq driver removal + */ +static unsigned int disable_ratelimit = 1; +static DEFINE_SPINLOCK(disable_ratelimit_lock); + +static void cpufreq_debug_enable_ratelimit(void) +{ + unsigned long flags; + + spin_lock_irqsave(&disable_ratelimit_lock, flags); + if (disable_ratelimit) + disable_ratelimit--; + spin_unlock_irqrestore(&disable_ratelimit_lock, flags); +} + +static void cpufreq_debug_disable_ratelimit(void) +{ + unsigned long flags; + + spin_lock_irqsave(&disable_ratelimit_lock, flags); + disable_ratelimit++; + spin_unlock_irqrestore(&disable_ratelimit_lock, flags); +} + +void cpufreq_debug_printk(unsigned int type, const char *prefix, + const char *fmt, ...) +{ + char s[256]; + va_list args; + unsigned int len; + unsigned long flags; + + WARN_ON(!prefix); + if (type & debug) { + spin_lock_irqsave(&disable_ratelimit_lock, flags); + if (!disable_ratelimit && debug_ratelimit + && !printk_ratelimit()) { + spin_unlock_irqrestore(&disable_ratelimit_lock, flags); + return; + } + spin_unlock_irqrestore(&disable_ratelimit_lock, flags); + + len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix); + + va_start(args, fmt); + len += vsnprintf(&s[len], (256 - len), fmt, args); + va_end(args); + + printk(s); + + WARN_ON(len < 5); + } +} +EXPORT_SYMBOL(cpufreq_debug_printk); + + +module_param(debug, uint, 0644); +MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core," + " 2 to debug drivers, and 4 to debug governors."); + +module_param(debug_ratelimit, uint, 0644); +MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:" + " set to 0 to disable ratelimiting."); + +#else /* !CONFIG_CPU_FREQ_DEBUG */ + +static inline void cpufreq_debug_enable_ratelimit(void) { return; } +static inline void cpufreq_debug_disable_ratelimit(void) { return; } + +#endif /* CONFIG_CPU_FREQ_DEBUG */ + + /********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * *********************************************************************/ @@ -201,7 +291,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) if (!l_p_j_ref_freq) { l_p_j_ref = loops_per_jiffy; l_p_j_ref_freq = ci->old; - pr_debug("saving %lu as reference value for loops_per_jiffy; " + dprintk("saving %lu as reference value for loops_per_jiffy; " "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); } if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || @@ -209,7 +299,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); - pr_debug("scaling loops_per_jiffy to %lu " + dprintk("scaling loops_per_jiffy to %lu " "for frequency %u kHz\n", loops_per_jiffy, ci->new); } } @@ -236,7 +326,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) BUG_ON(irqs_disabled()); freqs->flags = cpufreq_driver->flags; - pr_debug("notification %u of frequency transition to %u kHz\n", + dprintk("notification %u of frequency transition to %u kHz\n", state, freqs->new); policy = per_cpu(cpufreq_cpu_data, freqs->cpu); @@ -250,7 +340,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { if ((policy) && (policy->cpu == freqs->cpu) && (policy->cur) && (policy->cur != freqs->old)) { - pr_debug("Warning: CPU frequency is" + dprintk("Warning: CPU frequency is" " %u, cpufreq assumed %u kHz.\n", freqs->old, policy->cur); freqs->old = policy->cur; @@ -263,7 +353,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) case CPUFREQ_POSTCHANGE: adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); - pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, + dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, (unsigned long)freqs->cpu); trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu); trace_cpu_frequency(freqs->new, freqs->cpu); @@ -321,14 +411,21 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, t = __find_governor(str_governor); if (t == NULL) { - int ret; + char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", + str_governor); + + if (name) { + int ret; - mutex_unlock(&cpufreq_governor_mutex); - ret = request_module("cpufreq_%s", str_governor); - mutex_lock(&cpufreq_governor_mutex); + mutex_unlock(&cpufreq_governor_mutex); + ret = request_module("%s", name); + mutex_lock(&cpufreq_governor_mutex); - if (ret == 0) - t = __find_governor(str_governor); + if (ret == 0) + t = __find_governor(str_governor); + } + + kfree(name); } if (t != NULL) { @@ -656,7 +753,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, static void cpufreq_sysfs_release(struct kobject *kobj) { struct cpufreq_policy *policy = to_policy(kobj); - pr_debug("last reference is dropped\n"); + dprintk("last reference is dropped\n"); complete(&policy->kobj_unregister); } @@ -691,7 +788,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu, gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); if (gov) { policy->governor = gov; - pr_debug("Restoring governor %s for cpu %d\n", + dprintk("Restoring governor %s for cpu %d\n", policy->governor->name, cpu); } #endif @@ -727,7 +824,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu, per_cpu(cpufreq_cpu_data, cpu) = managed_policy; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - pr_debug("CPU already managed, adding link\n"); + dprintk("CPU already managed, adding link\n"); ret = sysfs_create_link(&sys_dev->kobj, &managed_policy->kobj, "cpufreq"); @@ -768,7 +865,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, if (!cpu_online(j)) continue; - pr_debug("CPU %u already managed, adding link\n", j); + dprintk("CPU %u already managed, adding link\n", j); managed_policy = cpufreq_cpu_get(cpu); cpu_sys_dev = get_cpu_sysdev(j); ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, @@ -844,7 +941,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu, policy->user_policy.governor = policy->governor; if (ret) { - pr_debug("setting policy failed\n"); + dprintk("setting policy failed\n"); if (cpufreq_driver->exit) cpufreq_driver->exit(policy); } @@ -880,7 +977,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) if (cpu_is_offline(cpu)) return 0; - pr_debug("adding CPU %u\n", cpu); + cpufreq_debug_disable_ratelimit(); + dprintk("adding CPU %u\n", cpu); #ifdef CONFIG_SMP /* check whether a different CPU already registered this @@ -888,6 +986,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) policy = cpufreq_cpu_get(cpu); if (unlikely(policy)) { cpufreq_cpu_put(policy); + cpufreq_debug_enable_ratelimit(); return 0; } #endif @@ -938,7 +1037,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) */ ret = cpufreq_driver->init(policy); if (ret) { - pr_debug("initialization failed\n"); + dprintk("initialization failed\n"); goto err_unlock_policy; } policy->user_policy.min = policy->min; @@ -964,7 +1063,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) kobject_uevent(&policy->kobj, KOBJ_ADD); module_put(cpufreq_driver->owner); - pr_debug("initialization complete\n"); + dprintk("initialization complete\n"); + cpufreq_debug_enable_ratelimit(); return 0; @@ -988,6 +1088,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) nomem_out: module_put(cpufreq_driver->owner); module_out: + cpufreq_debug_enable_ratelimit(); return ret; } @@ -1011,13 +1112,15 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) unsigned int j; #endif - pr_debug("unregistering CPU %u\n", cpu); + cpufreq_debug_disable_ratelimit(); + dprintk("unregistering CPU %u\n", cpu); spin_lock_irqsave(&cpufreq_driver_lock, flags); data = per_cpu(cpufreq_cpu_data, cpu); if (!data) { spin_unlock_irqrestore(&cpufreq_driver_lock, flags); + cpufreq_debug_enable_ratelimit(); unlock_policy_rwsem_write(cpu); return -EINVAL; } @@ -1029,11 +1132,12 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) * only need to unlink, put and exit */ if (unlikely(cpu != data->cpu)) { - pr_debug("removing link\n"); + dprintk("removing link\n"); cpumask_clear_cpu(cpu, data->cpus); spin_unlock_irqrestore(&cpufreq_driver_lock, flags); kobj = &sys_dev->kobj; cpufreq_cpu_put(data); + cpufreq_debug_enable_ratelimit(); unlock_policy_rwsem_write(cpu); sysfs_remove_link(kobj, "cpufreq"); return 0; @@ -1066,7 +1170,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) for_each_cpu(j, data->cpus) { if (j == cpu) continue; - pr_debug("removing link for cpu %u\n", j); + dprintk("removing link for cpu %u\n", j); #ifdef CONFIG_HOTPLUG_CPU strncpy(per_cpu(cpufreq_cpu_governor, j), data->governor->name, CPUFREQ_NAME_LEN); @@ -1095,35 +1199,21 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) * not referenced anymore by anybody before we proceed with * unloading. */ - pr_debug("waiting for dropping of refcount\n"); + dprintk("waiting for dropping of refcount\n"); wait_for_completion(cmp); - pr_debug("wait complete\n"); + dprintk("wait complete\n"); lock_policy_rwsem_write(cpu); if (cpufreq_driver->exit) cpufreq_driver->exit(data); unlock_policy_rwsem_write(cpu); -#ifdef CONFIG_HOTPLUG_CPU - /* when the CPU which is the parent of the kobj is hotplugged - * offline, check for siblings, and create cpufreq sysfs interface - * and symlinks - */ - if (unlikely(cpumask_weight(data->cpus) > 1)) { - /* first sibling now owns the new sysfs dir */ - cpumask_clear_cpu(cpu, data->cpus); - cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus))); - - /* finally remove our own symlink */ - lock_policy_rwsem_write(cpu); - __cpufreq_remove_dev(sys_dev); - } -#endif - free_cpumask_var(data->related_cpus); free_cpumask_var(data->cpus); kfree(data); + per_cpu(cpufreq_cpu_data, cpu) = NULL; + cpufreq_debug_enable_ratelimit(); return 0; } @@ -1149,7 +1239,7 @@ static void handle_update(struct work_struct *work) struct cpufreq_policy *policy = container_of(work, struct cpufreq_policy, update); unsigned int cpu = policy->cpu; - pr_debug("handle_update for cpu %u called\n", cpu); + dprintk("handle_update for cpu %u called\n", cpu); cpufreq_update_policy(cpu); } @@ -1167,7 +1257,7 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, { struct cpufreq_freqs freqs; - pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " + dprintk("Warning: CPU frequency out of sync: cpufreq and timing " "core thinks of %u, is %u kHz.\n", old_freq, new_freq); freqs.cpu = cpu; @@ -1270,7 +1360,7 @@ static int cpufreq_bp_suspend(void) int cpu = smp_processor_id(); struct cpufreq_policy *cpu_policy; - pr_debug("suspending cpu %u\n", cpu); + dprintk("suspending cpu %u\n", cpu); /* If there's no policy for the boot CPU, we have nothing to do. */ cpu_policy = cpufreq_cpu_get(cpu); @@ -1308,7 +1398,7 @@ static void cpufreq_bp_resume(void) int cpu = smp_processor_id(); struct cpufreq_policy *cpu_policy; - pr_debug("resuming cpu %u\n", cpu); + dprintk("resuming cpu %u\n", cpu); /* If there's no policy for the boot CPU, we have nothing to do. */ cpu_policy = cpufreq_cpu_get(cpu); @@ -1420,7 +1510,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, { int retval = -EINVAL; - pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu, + dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, target_freq, relation); if (cpu_online(policy->cpu) && cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); @@ -1506,7 +1596,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, if (!try_module_get(policy->governor->owner)) return -EINVAL; - pr_debug("__cpufreq_governor for CPU %u, event %u\n", + dprintk("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event); ret = policy->governor->governor(policy, event); @@ -1607,7 +1697,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, { int ret = 0; - pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, + cpufreq_debug_disable_ratelimit(); + dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, policy->min, policy->max); memcpy(&policy->cpuinfo, &data->cpuinfo, @@ -1644,19 +1735,19 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, data->min = policy->min; data->max = policy->max; - pr_debug("new min and max freqs are %u - %u kHz\n", + dprintk("new min and max freqs are %u - %u kHz\n", data->min, data->max); if (cpufreq_driver->setpolicy) { data->policy = policy->policy; - pr_debug("setting range\n"); + dprintk("setting range\n"); ret = cpufreq_driver->setpolicy(policy); } else { if (policy->governor != data->governor) { /* save old, working values */ struct cpufreq_governor *old_gov = data->governor; - pr_debug("governor switch\n"); + dprintk("governor switch\n"); /* end old governor */ if (data->governor) @@ -1666,7 +1757,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, data->governor = policy->governor; if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { /* new governor failed, so re-start old one */ - pr_debug("starting governor %s failed\n", + dprintk("starting governor %s failed\n", data->governor->name); if (old_gov) { data->governor = old_gov; @@ -1678,11 +1769,12 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, } /* might be a policy change, too, so fall through */ } - pr_debug("governor: change or update limits\n"); + dprintk("governor: change or update limits\n"); __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); } error_out: + cpufreq_debug_enable_ratelimit(); return ret; } @@ -1709,7 +1801,7 @@ int cpufreq_update_policy(unsigned int cpu) goto fail; } - pr_debug("updating policy for CPU %u\n", cpu); + dprintk("updating policy for CPU %u\n", cpu); memcpy(&policy, data, sizeof(struct cpufreq_policy)); policy.min = data->user_policy.min; policy.max = data->user_policy.max; @@ -1721,7 +1813,7 @@ int cpufreq_update_policy(unsigned int cpu) if (cpufreq_driver->get) { policy.cur = cpufreq_driver->get(cpu); if (!data->cur) { - pr_debug("Driver did not initialize current freq"); + dprintk("Driver did not initialize current freq"); data->cur = policy.cur; } else { if (data->cur != policy.cur) @@ -1797,7 +1889,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) ((!driver_data->setpolicy) && (!driver_data->target))) return -EINVAL; - pr_debug("trying to register driver %s\n", driver_data->name); + dprintk("trying to register driver %s\n", driver_data->name); if (driver_data->setpolicy) driver_data->flags |= CPUFREQ_CONST_LOOPS; @@ -1828,14 +1920,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) /* if all ->init() calls failed, unregister */ if (ret) { - pr_debug("no CPU initialized for driver %s\n", + dprintk("no CPU initialized for driver %s\n", driver_data->name); goto err_sysdev_unreg; } } register_hotcpu_notifier(&cpufreq_cpu_notifier); - pr_debug("driver %s up and running\n", driver_data->name); + dprintk("driver %s up and running\n", driver_data->name); + cpufreq_debug_enable_ratelimit(); return 0; err_sysdev_unreg: @@ -1862,10 +1955,14 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) { unsigned long flags; - if (!cpufreq_driver || (driver != cpufreq_driver)) + cpufreq_debug_disable_ratelimit(); + + if (!cpufreq_driver || (driver != cpufreq_driver)) { + cpufreq_debug_enable_ratelimit(); return -EINVAL; + } - pr_debug("unregistering driver %s\n", driver->name); + dprintk("unregistering driver %s\n", driver->name); sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); unregister_hotcpu_notifier(&cpufreq_cpu_notifier); diff --git a/trunk/drivers/cpufreq/cpufreq_performance.c b/trunk/drivers/cpufreq/cpufreq_performance.c index f13a8a9af6a1..7e2e515087f8 100644 --- a/trunk/drivers/cpufreq/cpufreq_performance.c +++ b/trunk/drivers/cpufreq/cpufreq_performance.c @@ -15,6 +15,9 @@ #include #include +#define dprintk(msg...) \ + cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg) + static int cpufreq_governor_performance(struct cpufreq_policy *policy, unsigned int event) @@ -22,7 +25,7 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy, switch (event) { case CPUFREQ_GOV_START: case CPUFREQ_GOV_LIMITS: - pr_debug("setting to %u kHz because of event %u\n", + dprintk("setting to %u kHz because of event %u\n", policy->max, event); __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); diff --git a/trunk/drivers/cpufreq/cpufreq_powersave.c b/trunk/drivers/cpufreq/cpufreq_powersave.c index 4c2eb512f2bc..e6db5faf3eb1 100644 --- a/trunk/drivers/cpufreq/cpufreq_powersave.c +++ b/trunk/drivers/cpufreq/cpufreq_powersave.c @@ -15,13 +15,16 @@ #include #include +#define dprintk(msg...) \ + cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg) + static int cpufreq_governor_powersave(struct cpufreq_policy *policy, unsigned int event) { switch (event) { case CPUFREQ_GOV_START: case CPUFREQ_GOV_LIMITS: - pr_debug("setting to %u kHz because of event %u\n", + dprintk("setting to %u kHz because of event %u\n", policy->min, event); __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); diff --git a/trunk/drivers/cpufreq/cpufreq_stats.c b/trunk/drivers/cpufreq/cpufreq_stats.c index b60a4c263686..00d73fc8e4e2 100644 --- a/trunk/drivers/cpufreq/cpufreq_stats.c +++ b/trunk/drivers/cpufreq/cpufreq_stats.c @@ -165,27 +165,17 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) return -1; } -/* should be called late in the CPU removal sequence so that the stats - * memory is still available in case someone tries to use it. - */ static void cpufreq_stats_free_table(unsigned int cpu) { struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + if (policy && policy->cpu == cpu) + sysfs_remove_group(&policy->kobj, &stats_attr_group); if (stat) { kfree(stat->time_in_state); kfree(stat); } per_cpu(cpufreq_stats_table, cpu) = NULL; -} - -/* must be called early in the CPU removal sequence (before - * cpufreq_remove_dev) so that policy is still valid. - */ -static void cpufreq_stats_free_sysfs(unsigned int cpu) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - if (policy && policy->cpu == cpu) - sysfs_remove_group(&policy->kobj, &stats_attr_group); if (policy) cpufreq_cpu_put(policy); } @@ -326,9 +316,6 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, case CPU_ONLINE_FROZEN: cpufreq_update_policy(cpu); break; - case CPU_DOWN_PREPARE: - cpufreq_stats_free_sysfs(cpu); - break; case CPU_DEAD: case CPU_DEAD_FROZEN: cpufreq_stats_free_table(cpu); @@ -337,10 +324,9 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -/* priority=1 so this will get called before cpufreq_remove_dev */ -static struct notifier_block cpufreq_stat_cpu_notifier __refdata = { +static struct notifier_block cpufreq_stat_cpu_notifier __refdata = +{ .notifier_call = cpufreq_stat_cpu_callback, - .priority = 1, }; static struct notifier_block notifier_policy_block = { diff --git a/trunk/drivers/cpufreq/cpufreq_userspace.c b/trunk/drivers/cpufreq/cpufreq_userspace.c index f231015904c0..66d2d1d6c80f 100644 --- a/trunk/drivers/cpufreq/cpufreq_userspace.c +++ b/trunk/drivers/cpufreq/cpufreq_userspace.c @@ -37,6 +37,9 @@ static DEFINE_PER_CPU(unsigned int, cpu_is_managed); static DEFINE_MUTEX(userspace_mutex); static int cpus_using_userspace_governor; +#define dprintk(msg...) \ + cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) + /* keep track of frequency transitions */ static int userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, @@ -47,7 +50,7 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, if (!per_cpu(cpu_is_managed, freq->cpu)) return 0; - pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n", + dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", freq->cpu, freq->new); per_cpu(cpu_cur_freq, freq->cpu) = freq->new; @@ -70,7 +73,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) { int ret = -EINVAL; - pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); + dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); mutex_lock(&userspace_mutex); if (!per_cpu(cpu_is_managed, policy->cpu)) @@ -131,7 +134,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, per_cpu(cpu_max_freq, cpu) = policy->max; per_cpu(cpu_cur_freq, cpu) = policy->cur; per_cpu(cpu_set_freq, cpu) = policy->cur; - pr_debug("managing cpu %u started " + dprintk("managing cpu %u started " "(%u - %u kHz, currently %u kHz)\n", cpu, per_cpu(cpu_min_freq, cpu), @@ -153,12 +156,12 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, per_cpu(cpu_min_freq, cpu) = 0; per_cpu(cpu_max_freq, cpu) = 0; per_cpu(cpu_set_freq, cpu) = 0; - pr_debug("managing cpu %u stopped\n", cpu); + dprintk("managing cpu %u stopped\n", cpu); mutex_unlock(&userspace_mutex); break; case CPUFREQ_GOV_LIMITS: mutex_lock(&userspace_mutex); - pr_debug("limit event for cpu %u: %u - %u kHz, " + dprintk("limit event for cpu %u: %u - %u kHz, " "currently %u kHz, last set to %u kHz\n", cpu, policy->min, policy->max, per_cpu(cpu_cur_freq, cpu), diff --git a/trunk/drivers/cpufreq/freq_table.c b/trunk/drivers/cpufreq/freq_table.c index 90431cb92804..05432216e224 100644 --- a/trunk/drivers/cpufreq/freq_table.c +++ b/trunk/drivers/cpufreq/freq_table.c @@ -14,6 +14,9 @@ #include #include +#define dprintk(msg...) \ + cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg) + /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ @@ -28,11 +31,11 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) { - pr_debug("table entry %u is invalid, skipping\n", i); + dprintk("table entry %u is invalid, skipping\n", i); continue; } - pr_debug("table entry %u: %u kHz, %u index\n", + dprintk("table entry %u: %u kHz, %u index\n", i, freq, table[i].index); if (freq < min_freq) min_freq = freq; @@ -58,7 +61,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, unsigned int i; unsigned int count = 0; - pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", + dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); if (!cpu_online(policy->cpu)) @@ -83,7 +86,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); - pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", + dprintk("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); return 0; @@ -107,7 +110,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, }; unsigned int i; - pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", + dprintk("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); switch (relation) { @@ -164,7 +167,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, } else *index = optimal.index; - pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, + dprintk("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, table[*index].index); return 0; @@ -213,14 +216,14 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, unsigned int cpu) { - pr_debug("setting show_table for cpu %u to %p\n", cpu, table); + dprintk("setting show_table for cpu %u to %p\n", cpu, table); per_cpu(cpufreq_show_table, cpu) = table; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); void cpufreq_frequency_table_put_attr(unsigned int cpu) { - pr_debug("clearing show_table for cpu %u\n", cpu); + dprintk("clearing show_table for cpu %u\n", cpu); per_cpu(cpufreq_show_table, cpu) = NULL; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); diff --git a/trunk/drivers/edac/ppc4xx_edac.c b/trunk/drivers/edac/ppc4xx_edac.c index af8e7b1aa290..c1f0045ceb8e 100644 --- a/trunk/drivers/edac/ppc4xx_edac.c +++ b/trunk/drivers/edac/ppc4xx_edac.c @@ -1019,7 +1019,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci, struct ppc4xx_edac_pdata *pdata = NULL; const struct device_node *np = op->dev.of_node; - if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL) + if (op->dev.of_match == NULL) return -EINVAL; /* Initial driver pointers and private data */ diff --git a/trunk/drivers/firewire/ohci.c b/trunk/drivers/firewire/ohci.c index 23d1468ad253..f903d7b6f34a 100644 --- a/trunk/drivers/firewire/ohci.c +++ b/trunk/drivers/firewire/ohci.c @@ -2199,6 +2199,7 @@ static int ohci_set_config_rom(struct fw_card *card, { struct fw_ohci *ohci; unsigned long flags; + int ret = -EBUSY; __be32 *next_config_rom; dma_addr_t uninitialized_var(next_config_rom_bus); @@ -2239,37 +2240,22 @@ static int ohci_set_config_rom(struct fw_card *card, spin_lock_irqsave(&ohci->lock, flags); - /* - * If there is not an already pending config_rom update, - * push our new allocation into the ohci->next_config_rom - * and then mark the local variable as null so that we - * won't deallocate the new buffer. - * - * OTOH, if there is a pending config_rom update, just - * use that buffer with the new config_rom data, and - * let this routine free the unused DMA allocation. - */ - if (ohci->next_config_rom == NULL) { ohci->next_config_rom = next_config_rom; ohci->next_config_rom_bus = next_config_rom_bus; - next_config_rom = NULL; - } - copy_config_rom(ohci->next_config_rom, config_rom, length); + copy_config_rom(ohci->next_config_rom, config_rom, length); - ohci->next_header = config_rom[0]; - ohci->next_config_rom[0] = 0; + ohci->next_header = config_rom[0]; + ohci->next_config_rom[0] = 0; - reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); + reg_write(ohci, OHCI1394_ConfigROMmap, + ohci->next_config_rom_bus); + ret = 0; + } spin_unlock_irqrestore(&ohci->lock, flags); - /* If we didn't use the DMA allocation, delete it. */ - if (next_config_rom != NULL) - dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, - next_config_rom, next_config_rom_bus); - /* * Now initiate a bus reset to have the changes take * effect. We clean up the old config rom memory and DMA @@ -2277,10 +2263,13 @@ static int ohci_set_config_rom(struct fw_card *card, * controller could need to access it before the bus reset * takes effect. */ + if (ret == 0) + fw_schedule_bus_reset(&ohci->card, true, true); + else + dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, + next_config_rom, next_config_rom_bus); - fw_schedule_bus_reset(&ohci->card, true, true); - - return 0; + return ret; } static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) diff --git a/trunk/drivers/firmware/iscsi_ibft_find.c b/trunk/drivers/firmware/iscsi_ibft_find.c index f032e446fc11..2192456dfd68 100644 --- a/trunk/drivers/firmware/iscsi_ibft_find.c +++ b/trunk/drivers/firmware/iscsi_ibft_find.c @@ -42,20 +42,7 @@ struct acpi_table_ibft *ibft_addr; EXPORT_SYMBOL_GPL(ibft_addr); -static const struct { - char *sign; -} ibft_signs[] = { -#ifdef CONFIG_ACPI - /* - * One spec says "IBFT", the other says "iBFT". We have to check - * for both. - */ - { ACPI_SIG_IBFT }, -#endif - { "iBFT" }, - { "BIFT" }, /* Broadcom iSCSI Offload */ -}; - +#define IBFT_SIGN "iBFT" #define IBFT_SIGN_LEN 4 #define IBFT_START 0x80000 /* 512kB */ #define IBFT_END 0x100000 /* 1MB */ @@ -75,7 +62,6 @@ static int __init find_ibft_in_mem(void) unsigned long pos; unsigned int len = 0; void *virt; - int i; for (pos = IBFT_START; pos < IBFT_END; pos += 16) { /* The table can't be inside the VGA BIOS reserved space, @@ -83,23 +69,18 @@ static int __init find_ibft_in_mem(void) if (pos == VGA_MEM) pos += VGA_SIZE; virt = isa_bus_to_virt(pos); - - for (i = 0; i < ARRAY_SIZE(ibft_signs); i++) { - if (memcmp(virt, ibft_signs[i].sign, IBFT_SIGN_LEN) == - 0) { - unsigned long *addr = - (unsigned long *)isa_bus_to_virt(pos + 4); - len = *addr; - /* if the length of the table extends past 1M, - * the table cannot be valid. */ - if (pos + len <= (IBFT_END-1)) { - ibft_addr = (struct acpi_table_ibft *)virt; - goto done; - } + if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { + unsigned long *addr = + (unsigned long *)isa_bus_to_virt(pos + 4); + len = *addr; + /* if the length of the table extends past 1M, + * the table cannot be valid. */ + if (pos + len <= (IBFT_END-1)) { + ibft_addr = (struct acpi_table_ibft *)virt; + break; } } } -done: return len; } /* @@ -108,12 +89,18 @@ static int __init find_ibft_in_mem(void) */ unsigned long __init find_ibft_region(unsigned long *sizep) { - int i; + ibft_addr = NULL; #ifdef CONFIG_ACPI - for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) - acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft); + /* + * One spec says "IBFT", the other says "iBFT". We have to check + * for both. + */ + if (!ibft_addr) + acpi_table_parse(ACPI_SIG_IBFT, acpi_find_ibft); + if (!ibft_addr) + acpi_table_parse(IBFT_SIGN, acpi_find_ibft); #endif /* CONFIG_ACPI */ /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will diff --git a/trunk/drivers/gpu/drm/Kconfig b/trunk/drivers/gpu/drm/Kconfig index b493663c7ba7..c58f691ec3ce 100644 --- a/trunk/drivers/gpu/drm/Kconfig +++ b/trunk/drivers/gpu/drm/Kconfig @@ -24,7 +24,6 @@ config DRM_KMS_HELPER depends on DRM select FB select FRAMEBUFFER_CONSOLE if !EXPERT - select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE help FB and CRTC helpers for KMS drivers. diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c index 140b9525b48a..950720473967 100644 --- a/trunk/drivers/gpu/drm/drm_fb_helper.c +++ b/trunk/drivers/gpu/drm/drm_fb_helper.c @@ -342,22 +342,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info) } EXPORT_SYMBOL(drm_fb_helper_debug_leave); -bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) -{ - bool error = false; - int i, ret; - for (i = 0; i < fb_helper->crtc_count; i++) { - struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; - ret = drm_crtc_helper_set_config(mode_set); - if (ret) - error = true; - } - return error; -} -EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode); - bool drm_fb_helper_force_kernel_mode(void) { + int i = 0; bool ret, error = false; struct drm_fb_helper *helper; @@ -365,12 +352,12 @@ bool drm_fb_helper_force_kernel_mode(void) return false; list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { - if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF) - continue; - - ret = drm_fb_helper_restore_fbdev_mode(helper); - if (ret) - error = true; + for (i = 0; i < helper->crtc_count; i++) { + struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set; + ret = drm_crtc_helper_set_config(mode_set); + if (ret) + error = true; + } } return error; } @@ -1516,33 +1503,17 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) } EXPORT_SYMBOL(drm_fb_helper_initial_config); -/** - * drm_fb_helper_hotplug_event - respond to a hotplug notification by - * probing all the outputs attached to the fb. - * @fb_helper: the drm_fb_helper - * - * LOCKING: - * Called at runtime, must take mode config lock. - * - * Scan the connectors attached to the fb_helper and try to put together a - * setup after *notification of a change in output configuration. - * - * RETURNS: - * 0 on success and a non-zero error code otherwise. - */ -int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) +bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) { - struct drm_device *dev = fb_helper->dev; int count = 0; u32 max_width, max_height, bpp_sel; bool bound = false, crtcs_bound = false; struct drm_crtc *crtc; if (!fb_helper->fb) - return 0; + return false; - mutex_lock(&dev->mode_config.mutex); - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { if (crtc->fb) crtcs_bound = true; if (crtc->fb == fb_helper->fb) @@ -1551,8 +1522,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) if (!bound && crtcs_bound) { fb_helper->delayed_hotplug = true; - mutex_unlock(&dev->mode_config.mutex); - return 0; + return false; } DRM_DEBUG_KMS("\n"); @@ -1563,7 +1533,6 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height); drm_setup_crtcs(fb_helper); - mutex_unlock(&dev->mode_config.mutex); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); } diff --git a/trunk/drivers/gpu/drm/drm_irq.c b/trunk/drivers/gpu/drm/drm_irq.c index a1f12cb043de..741457bd1c46 100644 --- a/trunk/drivers/gpu/drm/drm_irq.c +++ b/trunk/drivers/gpu/drm/drm_irq.c @@ -932,34 +932,11 @@ EXPORT_SYMBOL(drm_vblank_put); void drm_vblank_off(struct drm_device *dev, int crtc) { - struct drm_pending_vblank_event *e, *t; - struct timeval now; unsigned long irqflags; - unsigned int seq; spin_lock_irqsave(&dev->vbl_lock, irqflags); vblank_disable_and_save(dev, crtc); DRM_WAKEUP(&dev->vbl_queue[crtc]); - - /* Send any queued vblank events, lest the natives grow disquiet */ - seq = drm_vblank_count_and_time(dev, crtc, &now); - list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { - if (e->pipe != crtc) - continue; - DRM_DEBUG("Sending premature vblank event on disable: \ - wanted %d, current %d\n", - e->event.sequence, seq); - - e->event.sequence = seq; - e->event.tv_sec = now.tv_sec; - e->event.tv_usec = now.tv_usec; - drm_vblank_put(dev, e->pipe); - list_move_tail(&e->base.link, &e->base.file_priv->event_list); - wake_up_interruptible(&e->base.file_priv->event_wait); - trace_drm_vblank_event_delivered(e->base.pid, e->pipe, - e->event.sequence); - } - spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } EXPORT_SYMBOL(drm_vblank_off); diff --git a/trunk/drivers/gpu/drm/drm_mm.c b/trunk/drivers/gpu/drm/drm_mm.c index 959186cbf328..5d00b0fc0d91 100644 --- a/trunk/drivers/gpu/drm/drm_mm.c +++ b/trunk/drivers/gpu/drm/drm_mm.c @@ -431,7 +431,7 @@ EXPORT_SYMBOL(drm_mm_search_free_in_range); void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) { list_replace(&old->node_list, &new->node_list); - list_replace(&old->hole_stack, &new->hole_stack); + list_replace(&old->node_list, &new->hole_stack); new->hole_follows = old->hole_follows; new->mm = old->mm; new->start = old->start; @@ -699,8 +699,8 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) entry->size); total_used += entry->size; if (entry->hole_follows) { - hole_start = drm_mm_hole_node_start(entry); - hole_end = drm_mm_hole_node_end(entry); + hole_start = drm_mm_hole_node_start(&mm->head_node); + hole_end = drm_mm_hole_node_end(&mm->head_node); hole_size = hole_end - hole_start; seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", hole_start, hole_end, hole_size); diff --git a/trunk/drivers/gpu/drm/i915/i915_dma.c b/trunk/drivers/gpu/drm/i915/i915_dma.c index 12876f2795d2..72730377a01b 100644 --- a/trunk/drivers/gpu/drm/i915/i915_dma.c +++ b/trunk/drivers/gpu/drm/i915/i915_dma.c @@ -2207,7 +2207,7 @@ void i915_driver_lastclose(struct drm_device * dev) drm_i915_private_t *dev_priv = dev->dev_private; if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { - intel_fb_restore_mode(dev); + drm_fb_helper_restore(); vga_switcheroo_process_delayed_switch(); return; } diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c index 32d1b3e829c8..c34a8dd31d02 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.c +++ b/trunk/drivers/gpu/drm/i915/i915_drv.c @@ -49,7 +49,7 @@ module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); unsigned int i915_powersave = 1; module_param_named(powersave, i915_powersave, int, 0600); -unsigned int i915_semaphores = 0; +unsigned int i915_semaphores = 1; module_param_named(semaphores, i915_semaphores, int, 0600); unsigned int i915_enable_rc6 = 0; diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index 2166ee071ddb..e522c702b04e 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -5605,9 +5605,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) intel_clock_t clock; if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) - fp = I915_READ(FP0(pipe)); + fp = FP0(pipe); else - fp = I915_READ(FP1(pipe)); + fp = FP1(pipe); clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; if (IS_PINEVIEW(dev)) { @@ -6579,10 +6579,8 @@ intel_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOENT); intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); - if (!intel_fb) { - drm_gem_object_unreference_unlocked(&obj->base); + if (!intel_fb) return ERR_PTR(-ENOMEM); - } ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); if (ret) { diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c index a4d80314e7f8..cb8578b7e443 100644 --- a/trunk/drivers/gpu/drm/i915/intel_dp.c +++ b/trunk/drivers/gpu/drm/i915/intel_dp.c @@ -1470,8 +1470,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) if (!HAS_PCH_CPT(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { - struct drm_crtc *crtc = intel_dp->base.base.crtc; - + struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); /* Hardware workaround: leaving our transcoder select * set to transcoder B while it's off will prevent the * corresponding HDMI output on transcoder A. @@ -1486,19 +1485,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) /* Changes to enable or select take place the vblank * after being written. */ - if (crtc == NULL) { - /* We can arrive here never having been attached - * to a CRTC, for instance, due to inheriting - * random state from the BIOS. - * - * If the pipe is not running, play safe and - * wait for the clocks to stabilise before - * continuing. - */ - POSTING_READ(intel_dp->output_reg); - msleep(50); - } else - intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); + intel_wait_for_vblank(dev, intel_crtc->pipe); } I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); diff --git a/trunk/drivers/gpu/drm/i915/intel_drv.h b/trunk/drivers/gpu/drm/i915/intel_drv.h index 1d20712d527f..f5b0d8306d83 100644 --- a/trunk/drivers/gpu/drm/i915/intel_drv.h +++ b/trunk/drivers/gpu/drm/i915/intel_drv.h @@ -338,5 +338,4 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void intel_fb_output_poll_changed(struct drm_device *dev); -extern void intel_fb_restore_mode(struct drm_device *dev); #endif /* __INTEL_DRV_H__ */ diff --git a/trunk/drivers/gpu/drm/i915/intel_fb.c b/trunk/drivers/gpu/drm/i915/intel_fb.c index ec49bae73382..512782728e51 100644 --- a/trunk/drivers/gpu/drm/i915/intel_fb.c +++ b/trunk/drivers/gpu/drm/i915/intel_fb.c @@ -264,13 +264,3 @@ void intel_fb_output_poll_changed(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); } - -void intel_fb_restore_mode(struct drm_device *dev) -{ - int ret; - drm_i915_private_t *dev_priv = dev->dev_private; - - ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); - if (ret) - DRM_DEBUG("failed to restore crtc mode\n"); -} diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c index 67cb076d271b..a562bd2648c7 100644 --- a/trunk/drivers/gpu/drm/i915/intel_lvds.c +++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c @@ -539,9 +539,6 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, struct drm_device *dev = dev_priv->dev; struct drm_connector *connector = dev_priv->int_lvds_connector; - if (dev->switch_power_state != DRM_SWITCH_POWER_ON) - return NOTIFY_OK; - /* * check and update the status of LVDS connector after receiving * the LID nofication event. diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c b/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c index c3e953b08992..5045f8b921d6 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -152,6 +152,8 @@ nouveau_mem_vram_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + nouveau_bo_ref(NULL, &dev_priv->vga_ram); + ttm_bo_device_release(&dev_priv->ttm.bdev); nouveau_ttm_global_release(dev_priv); diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/trunk/drivers/gpu/drm/nouveau/nouveau_sgdma.c index c77111eca6ac..4bce801bc588 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -42,8 +42,7 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, nvbe->nr_pages = 0; while (num_pages--) { - /* this code path isn't called and is incorrect anyways */ - if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/ + if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) { nvbe->pages[nvbe->nr_pages] = dma_addrs[nvbe->nr_pages]; nvbe->ttm_alloced[nvbe->nr_pages] = true; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_state.c b/trunk/drivers/gpu/drm/nouveau/nouveau_state.c index 915fbce89595..a30adec5beaa 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_state.c @@ -768,11 +768,6 @@ static void nouveau_card_takedown(struct drm_device *dev) engine->mc.takedown(dev); engine->display.late_takedown(dev); - if (dev_priv->vga_ram) { - nouveau_bo_unpin(dev_priv->vga_ram); - nouveau_bo_ref(NULL, &dev_priv->vga_ram); - } - mutex_lock(&dev->struct_mutex); ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index 9073e3bfb08c..e9bc135d9189 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -862,15 +862,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) SYSTEM_ACCESS_MODE_NOT_IN_SYS | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); - if (rdev->flags & RADEON_IS_IGP) { - WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp); - WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp); - WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp); - } else { - WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); - WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); - WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); - } + WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); + WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); + WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); @@ -1780,10 +1774,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) mc_shared_chmap = RREG32(MC_SHARED_CHMAP); - if (rdev->flags & RADEON_IS_IGP) - mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG); - else - mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); + mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); switch (rdev->config.evergreen.max_tile_pipes) { case 1: @@ -2932,6 +2923,11 @@ static int evergreen_startup(struct radeon_device *rdev) rdev->asic->copy = NULL; dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } + /* XXX: ontario has problems blitting to gart at the moment */ + if (rdev->family == CHIP_PALM) { + rdev->asic->copy = NULL; + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + } /* allocate wb buffer */ r = radeon_wb_init(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/evergreend.h b/trunk/drivers/gpu/drm/radeon/evergreend.h index fc40e0cc3451..9aaa3f0c9372 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreend.h +++ b/trunk/drivers/gpu/drm/radeon/evergreend.h @@ -200,7 +200,6 @@ #define BURSTLENGTH_SHIFT 9 #define BURSTLENGTH_MASK 0x00000200 #define CHANSIZE_OVERRIDE (1 << 11) -#define FUS_MC_ARB_RAMCFG 0x2768 #define MC_VM_AGP_TOP 0x2028 #define MC_VM_AGP_BOT 0x202C #define MC_VM_AGP_BASE 0x2030 @@ -222,11 +221,6 @@ #define MC_VM_MD_L1_TLB0_CNTL 0x2654 #define MC_VM_MD_L1_TLB1_CNTL 0x2658 #define MC_VM_MD_L1_TLB2_CNTL 0x265C - -#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C -#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660 -#define FUS_MC_VM_MD_L1_TLB2_CNTL 0x2664 - #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 diff --git a/trunk/drivers/gpu/drm/radeon/ni.c b/trunk/drivers/gpu/drm/radeon/ni.c index 3d8a7634bbe9..7aade20f63a8 100644 --- a/trunk/drivers/gpu/drm/radeon/ni.c +++ b/trunk/drivers/gpu/drm/radeon/ni.c @@ -674,7 +674,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); - cgts_tcc_disable = 0xff000000; + cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE); gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); @@ -871,7 +871,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) smx_dc_ctl0 = RREG32(SMX_DC_CTL0); smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); - smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); + smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); WREG32(SMX_DC_CTL0, smx_dc_ctl0); WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); @@ -887,20 +887,20 @@ static void cayman_gpu_init(struct radeon_device *rdev) WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); - WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | - POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | - SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); + WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | + POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | + SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); - WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | - SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | - SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); + WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | + SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | + SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); WREG32(VGT_NUM_INSTANCES, 1); WREG32(CP_PERFMON_CNTL, 0); - WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | + WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | FETCH_FIFO_HIWATER(0x4) | DONE_FIFO_HIWATER(0xe0) | ALU_UPDATE_FIFO_HIWATER(0x8))); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c index 90dfb2b8cf03..f5d12fb103fa 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c @@ -431,7 +431,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } } - /* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port + /* Acer laptop (Acer TravelMate 5730G) has an HDMI port * on the laptop and a DVI port on the docking station and * both share the same encoder, hpd pin, and ddc line. * So while the bios table is technically correct, @@ -440,7 +440,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, * with different crtcs which isn't possible on the hardware * side and leaves no crtcs for LVDS or VGA. */ - if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) && + if ((dev->pdev->device == 0x95c4) && (dev->pdev->subsystem_vendor == 0x1025) && (dev->pdev->subsystem_device == 0x013c)) { if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && @@ -1574,17 +1574,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; bool bad_record = false; - u8 *record; - - if ((frev == 1) && (crev < 2)) - /* absolute */ - record = (u8 *)(mode_info->atom_context->bios + - le16_to_cpu(lvds_info->info.usModePatchTableOffset)); - else - /* relative */ - record = (u8 *)(mode_info->atom_context->bios + - data_offset + - le16_to_cpu(lvds_info->info.usModePatchTableOffset)); + u8 *record = (u8 *)(mode_info->atom_context->bios + + data_offset + + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); while (*record != ATOM_RECORD_END_TYPE) { switch (*record) { case LCD_MODE_PATCH_RECORD_MODE_TYPE: @@ -1607,10 +1599,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], fake_edid_record->ucFakeEDIDLength); - if (drm_edid_is_valid(edid)) { + if (drm_edid_is_valid(edid)) rdev->mode_info.bios_hardcoded_edid = edid; - rdev->mode_info.bios_hardcoded_edid_size = edid_size; - } else + else kfree(edid); } } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 9d95792bea3e..ed5dfe58f29c 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -15,9 +15,6 @@ #define ATPX_VERSION 0 #define ATPX_GPU_PWR 2 #define ATPX_MUX_SELECT 3 -#define ATPX_I2C_MUX_SELECT 4 -#define ATPX_SWITCH_START 5 -#define ATPX_SWITCH_END 6 #define ATPX_INTEGRATED 0 #define ATPX_DISCRETE 1 @@ -152,35 +149,13 @@ static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id) return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id); } -static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id) -{ - return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id); -} - -static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id) -{ - return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id); -} - -static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id) -{ - return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id); -} static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) { - int gpu_id; - if (id == VGA_SWITCHEROO_IGD) - gpu_id = ATPX_INTEGRATED; + radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0); else - gpu_id = ATPX_DISCRETE; - - radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id); - radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id); - radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id); - radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id); - + radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1); return 0; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_cursor.c b/trunk/drivers/gpu/drm/radeon/radeon_cursor.c index 3189a7efb2e9..bdf2fa1189ae 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_cursor.c @@ -167,6 +167,9 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, return -EINVAL; } + radeon_crtc->cursor_width = width; + radeon_crtc->cursor_height = height; + obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); if (!obj) { DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id); @@ -177,9 +180,6 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, if (ret) goto fail; - radeon_crtc->cursor_width = width; - radeon_crtc->cursor_height = height; - radeon_lock_cursor(crtc, true); /* XXX only 27 bit offset for legacy cursor */ radeon_set_cursor(crtc, obj, gpu_addr); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gart.c b/trunk/drivers/gpu/drm/radeon/radeon_gart.c index a533f52fd163..8a955bbdb608 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gart.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gart.c @@ -181,9 +181,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { - /* we reverted the patch using dma_addr in TTM for now but this - * code stops building on alpha so just comment it out for now */ - if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */ + /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32 + * is requested. */ + if (dma_addr[i] != DMA_ERROR_CODE) { rdev->gart.ttm_alloced[p] = true; rdev->gart.pages_addr[p] = dma_addr[i]; } else { diff --git a/trunk/drivers/gpu/drm/radeon/radeon_kms.c b/trunk/drivers/gpu/drm/radeon/radeon_kms.c index bd58af658581..bf7d4c061451 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_kms.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_kms.c @@ -221,22 +221,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return -EINVAL; } break; - case RADEON_INFO_NUM_TILE_PIPES: - if (rdev->family >= CHIP_CAYMAN) - value = rdev->config.cayman.max_tile_pipes; - else if (rdev->family >= CHIP_CEDAR) - value = rdev->config.evergreen.max_tile_pipes; - else if (rdev->family >= CHIP_RV770) - value = rdev->config.rv770.max_tile_pipes; - else if (rdev->family >= CHIP_R600) - value = rdev->config.r600.max_tile_pipes; - else { - return -EINVAL; - } - break; - case RADEON_INFO_FUSION_GART_WORKING: - value = 1; - break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; diff --git a/trunk/drivers/gpu/drm/radeon/reg_srcs/cayman b/trunk/drivers/gpu/drm/radeon/reg_srcs/cayman index 0aa8e85a9457..6334f8ac1209 100644 --- a/trunk/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/trunk/drivers/gpu/drm/radeon/reg_srcs/cayman @@ -33,7 +33,6 @@ cayman 0x9400 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 0x00009100 SPI_CONFIG_CNTL 0x0000913C SPI_CONFIG_CNTL_1 -0x00009508 TA_CNTL_AUX 0x00009830 DB_DEBUG 0x00009834 DB_DEBUG2 0x00009838 DB_DEBUG3 diff --git a/trunk/drivers/gpu/drm/radeon/reg_srcs/evergreen b/trunk/drivers/gpu/drm/radeon/reg_srcs/evergreen index 0e28cae7ea43..7e1637176e08 100644 --- a/trunk/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/trunk/drivers/gpu/drm/radeon/reg_srcs/evergreen @@ -46,7 +46,6 @@ evergreen 0x9400 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 0x00009100 SPI_CONFIG_CNTL 0x0000913C SPI_CONFIG_CNTL_1 -0x00009508 TA_CNTL_AUX 0x00009700 VC_CNTL 0x00009714 VC_ENHANCE 0x00009830 DB_DEBUG diff --git a/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 b/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 index 92f1900dc7ca..af0da4ae3f55 100644 --- a/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 @@ -708,7 +708,6 @@ r600 0x9400 0x00028D0C DB_RENDER_CONTROL 0x00028D10 DB_RENDER_OVERRIDE 0x0002880C DB_SHADER_CONTROL -0x00028D28 DB_SRESULTS_COMPARE_STATE0 0x00028D2C DB_SRESULTS_COMPARE_STATE1 0x00028430 DB_STENCILREFMASK 0x00028434 DB_STENCILREFMASK_BF diff --git a/trunk/drivers/gpu/vga/vga_switcheroo.c b/trunk/drivers/gpu/vga/vga_switcheroo.c index 498b284e5ef9..e01cacba685f 100644 --- a/trunk/drivers/gpu/vga/vga_switcheroo.c +++ b/trunk/drivers/gpu/vga/vga_switcheroo.c @@ -219,6 +219,9 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) int i; struct vga_switcheroo_client *active = NULL; + if (new_client->active == true) + return 0; + for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { if (vgasr_priv.clients[i].active == true) { active = &vgasr_priv.clients[i]; @@ -369,9 +372,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, goto out; } - if (client->active == true) - goto out; - /* okay we want a switch - test if devices are willing to switch */ can_switch = true; for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { diff --git a/trunk/drivers/hwmon/Kconfig b/trunk/drivers/hwmon/Kconfig index 50e40dbd8bb6..060ef6327876 100644 --- a/trunk/drivers/hwmon/Kconfig +++ b/trunk/drivers/hwmon/Kconfig @@ -110,7 +110,8 @@ config SENSORS_ADM1021 help If you say yes here you get support for Analog Devices ADM1021 and ADM1023 sensor chips and clones: Maxim MAX1617 and MAX1617A, - Genesys Logic GL523SM, National Semiconductor LM84 and TI THMC10. + Genesys Logic GL523SM, National Semiconductor LM84, TI THMC10, + and the XEON processor built-in sensor. This driver can also be built as a module. If so, the module will be called adm1021. @@ -617,10 +618,10 @@ config SENSORS_LM90 depends on I2C help If you say yes here you get support for National Semiconductor LM90, - LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A, - Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659, - MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008, - and Winbond/Nuvoton W83L771W/G/AWG/ASG sensor chips. + LM86, LM89 and LM99, Analog Devices ADM1032 and ADT7461, Maxim + MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659, + MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, and Winbond/Nuvoton + W83L771W/G/AWG/ASG sensor chips. This driver can also be built as a module. If so, the module will be called lm90. diff --git a/trunk/drivers/hwmon/lm85.c b/trunk/drivers/hwmon/lm85.c index da72dc12068c..250d099ca398 100644 --- a/trunk/drivers/hwmon/lm85.c +++ b/trunk/drivers/hwmon/lm85.c @@ -1094,7 +1094,6 @@ static struct attribute *lm85_attributes_minctl[] = { &sensor_dev_attr_pwm1_auto_pwm_minctl.dev_attr.attr, &sensor_dev_attr_pwm2_auto_pwm_minctl.dev_attr.attr, &sensor_dev_attr_pwm3_auto_pwm_minctl.dev_attr.attr, - NULL }; static const struct attribute_group lm85_group_minctl = { @@ -1105,7 +1104,6 @@ static struct attribute *lm85_attributes_temp_off[] = { &sensor_dev_attr_temp1_auto_temp_off.dev_attr.attr, &sensor_dev_attr_temp2_auto_temp_off.dev_attr.attr, &sensor_dev_attr_temp3_auto_temp_off.dev_attr.attr, - NULL }; static const struct attribute_group lm85_group_temp_off = { @@ -1331,11 +1329,11 @@ static int lm85_probe(struct i2c_client *client, if (data->type != emc6d103s) { err = sysfs_create_group(&client->dev.kobj, &lm85_group_minctl); if (err) - goto err_remove_files; + goto err_kfree; err = sysfs_create_group(&client->dev.kobj, &lm85_group_temp_off); if (err) - goto err_remove_files; + goto err_kfree; } /* The ADT7463/68 have an optional VRM 10 mode where pin 21 is used diff --git a/trunk/drivers/hwmon/lm90.c b/trunk/drivers/hwmon/lm90.c index 2f94f9504804..c43b4e9f96a9 100644 --- a/trunk/drivers/hwmon/lm90.c +++ b/trunk/drivers/hwmon/lm90.c @@ -49,10 +49,10 @@ * chips, but support three temperature sensors instead of two. MAX6695 * and MAX6696 only differ in the pinout so they can be treated identically. * - * This driver also supports ADT7461 and ADT7461A from Analog Devices as well as - * NCT1008 from ON Semiconductor. The chips are supported in both compatibility - * and extended mode. They are mostly compatible with LM90 except for a data - * format difference for the temperature value registers. + * This driver also supports the ADT7461 chip from Analog Devices. + * It's supported in both compatibility and extended mode. It is mostly + * compatible with LM90 except for a data format difference for the + * temperature value registers. * * Since the LM90 was the first chipset supported by this driver, most * comments will refer to this chipset, but are actually general and @@ -88,10 +88,9 @@ * Addresses to scan * Address is fully defined internally and cannot be changed except for * MAX6659, MAX6680 and MAX6681. - * LM86, LM89, LM90, LM99, ADM1032, ADM1032-1, ADT7461, ADT7461A, MAX6649, - * MAX6657, MAX6658, NCT1008 and W83L771 have address 0x4c. - * ADM1032-2, ADT7461-2, ADT7461A-2, LM89-1, LM99-1, MAX6646, and NCT1008D - * have address 0x4d. + * LM86, LM89, LM90, LM99, ADM1032, ADM1032-1, ADT7461, MAX6649, MAX6657, + * MAX6658 and W83L771 have address 0x4c. + * ADM1032-2, ADT7461-2, LM89-1, LM99-1 and MAX6646 have address 0x4d. * MAX6647 has address 0x4e. * MAX6659 can have address 0x4c, 0x4d or 0x4e. * MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, @@ -175,7 +174,6 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680, static const struct i2c_device_id lm90_id[] = { { "adm1032", adm1032 }, { "adt7461", adt7461 }, - { "adt7461a", adt7461 }, { "lm90", lm90 }, { "lm86", lm86 }, { "lm89", lm86 }, @@ -190,7 +188,6 @@ static const struct i2c_device_id lm90_id[] = { { "max6681", max6680 }, { "max6695", max6696 }, { "max6696", max6696 }, - { "nct1008", adt7461 }, { "w83l771", w83l771 }, { } }; @@ -1156,11 +1153,6 @@ static int lm90_detect(struct i2c_client *new_client, && (reg_config1 & 0x1B) == 0x00 && reg_convrate <= 0x0A) { name = "adt7461"; - } else - if (chip_id == 0x57 /* ADT7461A, NCT1008 */ - && (reg_config1 & 0x1B) == 0x00 - && reg_convrate <= 0x0A) { - name = "adt7461a"; } } else if (man_id == 0x4D) { /* Maxim */ diff --git a/trunk/drivers/hwmon/twl4030-madc-hwmon.c b/trunk/drivers/hwmon/twl4030-madc-hwmon.c index 57240740b161..de5819199e2e 100644 --- a/trunk/drivers/hwmon/twl4030-madc-hwmon.c +++ b/trunk/drivers/hwmon/twl4030-madc-hwmon.c @@ -98,6 +98,7 @@ static const struct attribute_group twl4030_madc_group = { static int __devinit twl4030_madc_hwmon_probe(struct platform_device *pdev) { int ret; + int status; struct device *hwmon; ret = sysfs_create_group(&pdev->dev.kobj, &twl4030_madc_group); @@ -106,7 +107,7 @@ static int __devinit twl4030_madc_hwmon_probe(struct platform_device *pdev) hwmon = hwmon_device_register(&pdev->dev); if (IS_ERR(hwmon)) { dev_err(&pdev->dev, "hwmon_device_register failed.\n"); - ret = PTR_ERR(hwmon); + status = PTR_ERR(hwmon); goto err_reg; } diff --git a/trunk/drivers/i2c/busses/i2c-i801.c b/trunk/drivers/i2c/busses/i2c-i801.c index 455e909bc768..72c0415f6f94 100644 --- a/trunk/drivers/i2c/busses/i2c-i801.c +++ b/trunk/drivers/i2c/busses/i2c-i801.c @@ -134,15 +134,10 @@ SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | \ SMBHSTSTS_INTR) -/* Older devices have their ID defined in */ -#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 -#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 /* Patsburg also has three 'Integrated Device Function' SMBus controllers */ #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72 -#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 -#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 struct i801_priv { struct i2c_adapter adapter; diff --git a/trunk/drivers/i2c/busses/i2c-mpc.c b/trunk/drivers/i2c/busses/i2c-mpc.c index 107397a606b4..75b984c519ac 100644 --- a/trunk/drivers/i2c/busses/i2c-mpc.c +++ b/trunk/drivers/i2c/busses/i2c-mpc.c @@ -560,18 +560,15 @@ static struct i2c_adapter mpc_ops = { .timeout = HZ, }; -static const struct of_device_id mpc_i2c_of_match[]; static int __devinit fsl_i2c_probe(struct platform_device *op) { - const struct of_device_id *match; struct mpc_i2c *i2c; const u32 *prop; u32 clock = MPC_I2C_CLOCK_LEGACY; int result = 0; int plen; - match = of_match_device(mpc_i2c_of_match, &op->dev); - if (!match) + if (!op->dev.of_match) return -EINVAL; i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); @@ -608,8 +605,8 @@ static int __devinit fsl_i2c_probe(struct platform_device *op) clock = *prop; } - if (match->data) { - struct mpc_i2c_data *data = match->data; + if (op->dev.of_match->data) { + struct mpc_i2c_data *data = op->dev.of_match->data; data->setup(op->dev.of_node, i2c, clock, data->prescaler); } else { /* Backwards compatibility */ diff --git a/trunk/drivers/i2c/busses/i2c-parport.c b/trunk/drivers/i2c/busses/i2c-parport.c index 2dbba163b102..0eb1515541e7 100644 --- a/trunk/drivers/i2c/busses/i2c-parport.c +++ b/trunk/drivers/i2c/busses/i2c-parport.c @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------ * * i2c-parport.c I2C bus over parallel port * * ------------------------------------------------------------------------ * - Copyright (C) 2003-2011 Jean Delvare + Copyright (C) 2003-2010 Jean Delvare Based on older i2c-philips-par.c driver Copyright (C) 1995-2000 Simon G. Vogl @@ -33,8 +33,6 @@ #include #include #include -#include -#include #include "i2c-parport.h" /* ----- Device list ------------------------------------------------------ */ @@ -45,11 +43,10 @@ struct i2c_par { struct i2c_algo_bit_data algo_data; struct i2c_smbus_alert_setup alert_data; struct i2c_client *ara; - struct list_head node; + struct i2c_par *next; }; -static LIST_HEAD(adapter_list); -static DEFINE_MUTEX(adapter_list_lock); +static struct i2c_par *adapter_list; /* ----- Low-level parallel port access ----------------------------------- */ @@ -231,9 +228,8 @@ static void i2c_parport_attach (struct parport *port) } /* Add the new adapter to the list */ - mutex_lock(&adapter_list_lock); - list_add_tail(&adapter->node, &adapter_list); - mutex_unlock(&adapter_list_lock); + adapter->next = adapter_list; + adapter_list = adapter; return; ERROR1: @@ -245,11 +241,11 @@ static void i2c_parport_attach (struct parport *port) static void i2c_parport_detach (struct parport *port) { - struct i2c_par *adapter, *_n; + struct i2c_par *adapter, *prev; /* Walk the list */ - mutex_lock(&adapter_list_lock); - list_for_each_entry_safe(adapter, _n, &adapter_list, node) { + for (prev = NULL, adapter = adapter_list; adapter; + prev = adapter, adapter = adapter->next) { if (adapter->pdev->port == port) { if (adapter->ara) { parport_disable_irq(port); @@ -263,11 +259,14 @@ static void i2c_parport_detach (struct parport *port) parport_release(adapter->pdev); parport_unregister_device(adapter->pdev); - list_del(&adapter->node); + if (prev) + prev->next = adapter->next; + else + adapter_list = adapter->next; kfree(adapter); + return; } } - mutex_unlock(&adapter_list_lock); } static struct parport_driver i2c_parport_driver = { diff --git a/trunk/drivers/i2c/busses/i2c-pnx.c b/trunk/drivers/i2c/busses/i2c-pnx.c index 04be9f82e14b..a97e3fec8148 100644 --- a/trunk/drivers/i2c/busses/i2c-pnx.c +++ b/trunk/drivers/i2c/busses/i2c-pnx.c @@ -65,7 +65,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data) jiffies, expires); timer->expires = jiffies + expires; - timer->data = (unsigned long)alg_data; + timer->data = (unsigned long)&alg_data; add_timer(timer); } diff --git a/trunk/drivers/infiniband/core/cma.c b/trunk/drivers/infiniband/core/cma.c index 99dde874fbbd..5ed9d25d021a 100644 --- a/trunk/drivers/infiniband/core/cma.c +++ b/trunk/drivers/infiniband/core/cma.c @@ -148,7 +148,6 @@ struct rdma_id_private { u32 qp_num; u8 srq; u8 tos; - u8 reuseaddr; }; struct cma_multicast { @@ -713,21 +712,6 @@ static inline int cma_any_addr(struct sockaddr *addr) return cma_zero_addr(addr) || cma_loopback_addr(addr); } -static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) -{ - if (src->sa_family != dst->sa_family) - return -1; - - switch (src->sa_family) { - case AF_INET: - return ((struct sockaddr_in *) src)->sin_addr.s_addr != - ((struct sockaddr_in *) dst)->sin_addr.s_addr; - default: - return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, - &((struct sockaddr_in6 *) dst)->sin6_addr); - } -} - static inline __be16 cma_port(struct sockaddr *addr) { if (addr->sa_family == AF_INET) @@ -1580,6 +1564,50 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv) mutex_unlock(&lock); } +int rdma_listen(struct rdma_cm_id *id, int backlog) +{ + struct rdma_id_private *id_priv; + int ret; + + id_priv = container_of(id, struct rdma_id_private, id); + if (id_priv->state == CMA_IDLE) { + ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; + ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); + if (ret) + return ret; + } + + if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) + return -EINVAL; + + id_priv->backlog = backlog; + if (id->device) { + switch (rdma_node_get_transport(id->device->node_type)) { + case RDMA_TRANSPORT_IB: + ret = cma_ib_listen(id_priv); + if (ret) + goto err; + break; + case RDMA_TRANSPORT_IWARP: + ret = cma_iw_listen(id_priv, backlog); + if (ret) + goto err; + break; + default: + ret = -ENOSYS; + goto err; + } + } else + cma_listen_on_all(id_priv); + + return 0; +err: + id_priv->backlog = 0; + cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); + return ret; +} +EXPORT_SYMBOL(rdma_listen); + void rdma_set_service_type(struct rdma_cm_id *id, int tos) { struct rdma_id_private *id_priv; @@ -2062,25 +2090,6 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, } EXPORT_SYMBOL(rdma_resolve_addr); -int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) -{ - struct rdma_id_private *id_priv; - unsigned long flags; - int ret; - - id_priv = container_of(id, struct rdma_id_private, id); - spin_lock_irqsave(&id_priv->lock, flags); - if (id_priv->state == CMA_IDLE) { - id_priv->reuseaddr = reuse; - ret = 0; - } else { - ret = -EINVAL; - } - spin_unlock_irqrestore(&id_priv->lock, flags); - return ret; -} -EXPORT_SYMBOL(rdma_set_reuseaddr); - static void cma_bind_port(struct rdma_bind_list *bind_list, struct rdma_id_private *id_priv) { @@ -2156,71 +2165,41 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) return -EADDRNOTAVAIL; } -/* - * Check that the requested port is available. This is called when trying to - * bind to a specific port, or when trying to listen on a bound port. In - * the latter case, the provided id_priv may already be on the bind_list, but - * we still need to check that it's okay to start listening. - */ -static int cma_check_port(struct rdma_bind_list *bind_list, - struct rdma_id_private *id_priv, uint8_t reuseaddr) -{ - struct rdma_id_private *cur_id; - struct sockaddr *addr, *cur_addr; - struct hlist_node *node; - - addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; - if (cma_any_addr(addr) && !reuseaddr) - return -EADDRNOTAVAIL; - - hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { - if (id_priv == cur_id) - continue; - - if ((cur_id->state == CMA_LISTEN) || - !reuseaddr || !cur_id->reuseaddr) { - cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; - if (cma_any_addr(cur_addr)) - return -EADDRNOTAVAIL; - - if (!cma_addr_cmp(addr, cur_addr)) - return -EADDRINUSE; - } - } - return 0; -} - static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) { + struct rdma_id_private *cur_id; + struct sockaddr_in *sin, *cur_sin; struct rdma_bind_list *bind_list; + struct hlist_node *node; unsigned short snum; - int ret; - snum = ntohs(cma_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)); + sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; + snum = ntohs(sin->sin_port); if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; bind_list = idr_find(ps, snum); - if (!bind_list) { - ret = cma_alloc_port(ps, id_priv, snum); - } else { - ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); - if (!ret) - cma_bind_port(bind_list, id_priv); - } - return ret; -} + if (!bind_list) + return cma_alloc_port(ps, id_priv, snum); -static int cma_bind_listen(struct rdma_id_private *id_priv) -{ - struct rdma_bind_list *bind_list = id_priv->bind_list; - int ret = 0; + /* + * We don't support binding to any address if anyone is bound to + * a specific address on the same port. + */ + if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) + return -EADDRNOTAVAIL; - mutex_lock(&lock); - if (bind_list->owners.first->next) - ret = cma_check_port(bind_list, id_priv, 0); - mutex_unlock(&lock); - return ret; + hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { + if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr)) + return -EADDRNOTAVAIL; + + cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; + if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) + return -EADDRINUSE; + } + + cma_bind_port(bind_list, id_priv); + return 0; } static int cma_get_port(struct rdma_id_private *id_priv) @@ -2274,56 +2253,6 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, return 0; } -int rdma_listen(struct rdma_cm_id *id, int backlog) -{ - struct rdma_id_private *id_priv; - int ret; - - id_priv = container_of(id, struct rdma_id_private, id); - if (id_priv->state == CMA_IDLE) { - ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; - ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); - if (ret) - return ret; - } - - if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) - return -EINVAL; - - if (id_priv->reuseaddr) { - ret = cma_bind_listen(id_priv); - if (ret) - goto err; - } - - id_priv->backlog = backlog; - if (id->device) { - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: - ret = cma_ib_listen(id_priv); - if (ret) - goto err; - break; - case RDMA_TRANSPORT_IWARP: - ret = cma_iw_listen(id_priv, backlog); - if (ret) - goto err; - break; - default: - ret = -ENOSYS; - goto err; - } - } else - cma_listen_on_all(id_priv); - - return 0; -err: - id_priv->backlog = 0; - cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); - return ret; -} -EXPORT_SYMBOL(rdma_listen); - int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) { struct rdma_id_private *id_priv; diff --git a/trunk/drivers/infiniband/core/iwcm.c b/trunk/drivers/infiniband/core/iwcm.c index a9c042345c6f..2a1e9ae134b4 100644 --- a/trunk/drivers/infiniband/core/iwcm.c +++ b/trunk/drivers/infiniband/core/iwcm.c @@ -725,7 +725,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, */ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); - if (iw_event->status == 0) { + if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) { cm_id_priv->id.local_addr = iw_event->local_addr; cm_id_priv->id.remote_addr = iw_event->remote_addr; cm_id_priv->state = IW_CM_STATE_ESTABLISHED; diff --git a/trunk/drivers/infiniband/core/ucma.c b/trunk/drivers/infiniband/core/ucma.c index b3fa798525b2..ec1e9da1488b 100644 --- a/trunk/drivers/infiniband/core/ucma.c +++ b/trunk/drivers/infiniband/core/ucma.c @@ -883,13 +883,6 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname, } rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); break; - case RDMA_OPTION_ID_REUSEADDR: - if (optlen != sizeof(int)) { - ret = -EINVAL; - break; - } - ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); - break; default: ret = -ENOSYS; } diff --git a/trunk/drivers/infiniband/hw/cxgb4/cm.c b/trunk/drivers/infiniband/hw/cxgb4/cm.c index d7ee70fc9173..9d8dcfab2b38 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/cm.c +++ b/trunk/drivers/infiniband/hw/cxgb4/cm.c @@ -1198,7 +1198,9 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) } PDBG("%s ep %p status %d error %d\n", __func__, ep, rpl->status, status2errno(rpl->status)); - c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); + ep->com.wr_wait.ret = status2errno(rpl->status); + ep->com.wr_wait.done = 1; + wake_up(&ep->com.wr_wait.wait); return 0; } @@ -1232,7 +1234,9 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) struct c4iw_listen_ep *ep = lookup_stid(t, stid); PDBG("%s ep %p\n", __func__, ep); - c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); + ep->com.wr_wait.ret = status2errno(rpl->status); + ep->com.wr_wait.done = 1; + wake_up(&ep->com.wr_wait.wait); return 0; } @@ -1462,7 +1466,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) struct c4iw_qp_attributes attrs; int disconnect = 1; int release = 0; - int abort = 0; + int closing = 0; struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(hdr); @@ -1488,22 +1492,23 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) * in rdma connection migration (see c4iw_accept_cr()). */ __state_set(&ep->com, CLOSING); + ep->com.wr_wait.done = 1; + ep->com.wr_wait.ret = -ECONNRESET; PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); - c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); + wake_up(&ep->com.wr_wait.wait); break; case MPA_REP_SENT: __state_set(&ep->com, CLOSING); + ep->com.wr_wait.done = 1; + ep->com.wr_wait.ret = -ECONNRESET; PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); - c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); + wake_up(&ep->com.wr_wait.wait); break; case FPDU_MODE: start_ep_timer(ep); __state_set(&ep->com, CLOSING); - attrs.next_state = C4IW_QP_STATE_CLOSING; - abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, - C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); + closing = 1; peer_close_upcall(ep); - disconnect = 1; break; case ABORTING: disconnect = 0; @@ -1531,6 +1536,11 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) BUG_ON(1); } mutex_unlock(&ep->com.mutex); + if (closing) { + attrs.next_state = C4IW_QP_STATE_CLOSING; + c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); + } if (disconnect) c4iw_ep_disconnect(ep, 0, GFP_KERNEL); if (release) @@ -1571,7 +1581,9 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) /* * Wake up any threads in rdma_init() or rdma_fini(). */ - c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); + ep->com.wr_wait.done = 1; + ep->com.wr_wait.ret = -ECONNRESET; + wake_up(&ep->com.wr_wait.wait); mutex_lock(&ep->com.mutex); switch (ep->com.state) { @@ -1698,14 +1710,14 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) ep = lookup_tid(t, tid); BUG_ON(!ep); - if (ep && ep->com.qp) { + if (ep->com.qp) { printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, ep->com.qp->wq.sq.qid); attrs.next_state = C4IW_QP_STATE_TERMINATE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } else - printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); + printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid); return 0; } @@ -2284,8 +2296,14 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); - if (wr_waitp) - c4iw_wake_up(wr_waitp, ret ? -ret : 0); + if (wr_waitp) { + if (ret) + wr_waitp->ret = -ret; + else + wr_waitp->ret = 0; + wr_waitp->done = 1; + wake_up(&wr_waitp->wait); + } kfree_skb(skb); break; case 2: diff --git a/trunk/drivers/infiniband/hw/cxgb4/device.c b/trunk/drivers/infiniband/hw/cxgb4/device.c index 40a13cc633a3..e29172c2afcb 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/device.c +++ b/trunk/drivers/infiniband/hw/cxgb4/device.c @@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); -static LIST_HEAD(uld_ctx_list); +static LIST_HEAD(dev_list); static DEFINE_MUTEX(dev_mutex); static struct dentry *c4iw_debugfs_root; @@ -370,23 +370,18 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) c4iw_destroy_resource(&rdev->resource); } -struct uld_ctx { - struct list_head entry; - struct cxgb4_lld_info lldi; - struct c4iw_dev *dev; -}; - -static void c4iw_remove(struct uld_ctx *ctx) +static void c4iw_remove(struct c4iw_dev *dev) { - PDBG("%s c4iw_dev %p\n", __func__, ctx->dev); - c4iw_unregister_device(ctx->dev); - c4iw_rdev_close(&ctx->dev->rdev); - idr_destroy(&ctx->dev->cqidr); - idr_destroy(&ctx->dev->qpidr); - idr_destroy(&ctx->dev->mmidr); - iounmap(ctx->dev->rdev.oc_mw_kva); - ib_dealloc_device(&ctx->dev->ibdev); - ctx->dev = NULL; + PDBG("%s c4iw_dev %p\n", __func__, dev); + list_del(&dev->entry); + if (dev->registered) + c4iw_unregister_device(dev); + c4iw_rdev_close(&dev->rdev); + idr_destroy(&dev->cqidr); + idr_destroy(&dev->qpidr); + idr_destroy(&dev->mmidr); + iounmap(dev->rdev.oc_mw_kva); + ib_dealloc_device(&dev->ibdev); } static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) @@ -397,7 +392,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); if (!devp) { printk(KERN_ERR MOD "Cannot allocate ib device\n"); - return ERR_PTR(-ENOMEM); + return NULL; } devp->rdev.lldi = *infop; @@ -407,23 +402,27 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, devp->rdev.lldi.vr->ocq.size); - PDBG(KERN_INFO MOD "ocq memory: " + printk(KERN_INFO MOD "ocq memory: " "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); + mutex_lock(&dev_mutex); + ret = c4iw_rdev_open(&devp->rdev); if (ret) { mutex_unlock(&dev_mutex); printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); ib_dealloc_device(&devp->ibdev); - return ERR_PTR(ret); + return NULL; } idr_init(&devp->cqidr); idr_init(&devp->qpidr); idr_init(&devp->mmidr); spin_lock_init(&devp->lock); + list_add_tail(&devp->entry, &dev_list); + mutex_unlock(&dev_mutex); if (c4iw_debugfs_root) { devp->debugfs_root = debugfs_create_dir( @@ -436,7 +435,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) { - struct uld_ctx *ctx; + struct c4iw_dev *dev; static int vers_printed; int i; @@ -444,33 +443,25 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", DRV_VERSION); - ctx = kzalloc(sizeof *ctx, GFP_KERNEL); - if (!ctx) { - ctx = ERR_PTR(-ENOMEM); + dev = c4iw_alloc(infop); + if (!dev) goto out; - } - ctx->lldi = *infop; PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", - __func__, pci_name(ctx->lldi.pdev), - ctx->lldi.nchan, ctx->lldi.nrxq, - ctx->lldi.ntxq, ctx->lldi.nports); - - mutex_lock(&dev_mutex); - list_add_tail(&ctx->entry, &uld_ctx_list); - mutex_unlock(&dev_mutex); + __func__, pci_name(dev->rdev.lldi.pdev), + dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq, + dev->rdev.lldi.ntxq, dev->rdev.lldi.nports); - for (i = 0; i < ctx->lldi.nrxq; i++) - PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]); + for (i = 0; i < dev->rdev.lldi.nrxq; i++) + PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); out: - return ctx; + return dev; } static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, const struct pkt_gl *gl) { - struct uld_ctx *ctx = handle; - struct c4iw_dev *dev = ctx->dev; + struct c4iw_dev *dev = handle; struct sk_buff *skb; const struct cpl_act_establish *rpl; unsigned int opcode; @@ -512,49 +503,47 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) { - struct uld_ctx *ctx = handle; + struct c4iw_dev *dev = handle; PDBG("%s new_state %u\n", __func__, new_state); switch (new_state) { case CXGB4_STATE_UP: - printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); - if (!ctx->dev) { - int ret = 0; - - ctx->dev = c4iw_alloc(&ctx->lldi); - if (!IS_ERR(ctx->dev)) - ret = c4iw_register_device(ctx->dev); - if (IS_ERR(ctx->dev) || ret) + printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev)); + if (!dev->registered) { + int ret; + ret = c4iw_register_device(dev); + if (ret) printk(KERN_ERR MOD "%s: RDMA registration failed: %d\n", - pci_name(ctx->lldi.pdev), ret); + pci_name(dev->rdev.lldi.pdev), ret); } break; case CXGB4_STATE_DOWN: printk(KERN_INFO MOD "%s: Down\n", - pci_name(ctx->lldi.pdev)); - if (ctx->dev) - c4iw_remove(ctx); + pci_name(dev->rdev.lldi.pdev)); + if (dev->registered) + c4iw_unregister_device(dev); break; case CXGB4_STATE_START_RECOVERY: printk(KERN_INFO MOD "%s: Fatal Error\n", - pci_name(ctx->lldi.pdev)); - if (ctx->dev) { + pci_name(dev->rdev.lldi.pdev)); + dev->rdev.flags |= T4_FATAL_ERROR; + if (dev->registered) { struct ib_event event; - ctx->dev->rdev.flags |= T4_FATAL_ERROR; memset(&event, 0, sizeof event); event.event = IB_EVENT_DEVICE_FATAL; - event.device = &ctx->dev->ibdev; + event.device = &dev->ibdev; ib_dispatch_event(&event); - c4iw_remove(ctx); + c4iw_unregister_device(dev); } break; case CXGB4_STATE_DETACH: printk(KERN_INFO MOD "%s: Detach\n", - pci_name(ctx->lldi.pdev)); - if (ctx->dev) - c4iw_remove(ctx); + pci_name(dev->rdev.lldi.pdev)); + mutex_lock(&dev_mutex); + c4iw_remove(dev); + mutex_unlock(&dev_mutex); break; } return 0; @@ -587,13 +576,11 @@ static int __init c4iw_init_module(void) static void __exit c4iw_exit_module(void) { - struct uld_ctx *ctx, *tmp; + struct c4iw_dev *dev, *tmp; mutex_lock(&dev_mutex); - list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) { - if (ctx->dev) - c4iw_remove(ctx); - kfree(ctx); + list_for_each_entry_safe(dev, tmp, &dev_list, entry) { + c4iw_remove(dev); } mutex_unlock(&dev_mutex); cxgb4_unregister_uld(CXGB4_ULD_RDMA); diff --git a/trunk/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/trunk/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 35d2a5dd9bb4..9f6166f59268 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/trunk/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -131,58 +131,42 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev) #define C4IW_WR_TO (10*HZ) -enum { - REPLY_READY = 0, -}; - struct c4iw_wr_wait { wait_queue_head_t wait; - unsigned long status; + int done; int ret; }; static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) { wr_waitp->ret = 0; - wr_waitp->status = 0; + wr_waitp->done = 0; init_waitqueue_head(&wr_waitp->wait); } -static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) -{ - wr_waitp->ret = ret; - set_bit(REPLY_READY, &wr_waitp->status); - wake_up(&wr_waitp->wait); -} - static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp, u32 hwtid, u32 qpid, const char *func) { unsigned to = C4IW_WR_TO; - int ret; - do { - ret = wait_event_timeout(wr_waitp->wait, - test_and_clear_bit(REPLY_READY, &wr_waitp->status), to); - if (!ret) { + + wait_event_timeout(wr_waitp->wait, wr_waitp->done, to); + if (!wr_waitp->done) { printk(KERN_ERR MOD "%s - Device %s not responding - " "tid %u qpid %u\n", func, pci_name(rdev->lldi.pdev), hwtid, qpid); - if (c4iw_fatal_error(rdev)) { - wr_waitp->ret = -EIO; - break; - } to = to << 2; } - } while (!ret); + } while (!wr_waitp->done); if (wr_waitp->ret) - PDBG("%s: FW reply %d tid %u qpid %u\n", - pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); + printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n", + pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); return wr_waitp->ret; } + struct c4iw_dev { struct ib_device ibdev; struct c4iw_rdev rdev; @@ -191,7 +175,9 @@ struct c4iw_dev { struct idr qpidr; struct idr mmidr; spinlock_t lock; + struct list_head entry; struct dentry *debugfs_root; + u8 registered; }; static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) diff --git a/trunk/drivers/infiniband/hw/cxgb4/provider.c b/trunk/drivers/infiniband/hw/cxgb4/provider.c index 5b9e4220ca08..f66dd8bf5128 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/provider.c +++ b/trunk/drivers/infiniband/hw/cxgb4/provider.c @@ -516,6 +516,7 @@ int c4iw_register_device(struct c4iw_dev *dev) if (ret) goto bail2; } + dev->registered = 1; return 0; bail2: ib_unregister_device(&dev->ibdev); @@ -534,5 +535,6 @@ void c4iw_unregister_device(struct c4iw_dev *dev) c4iw_class_attributes[i]); ib_unregister_device(&dev->ibdev); kfree(dev->ibdev.iwcm); + dev->registered = 0; return; } diff --git a/trunk/drivers/infiniband/hw/cxgb4/qp.c b/trunk/drivers/infiniband/hw/cxgb4/qp.c index 3b773b05a898..70a5a3c646da 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/qp.c +++ b/trunk/drivers/infiniband/hw/cxgb4/qp.c @@ -214,7 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ - (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) | + t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0 | V_FW_RI_RES_WR_IQID(scq->cqid)); res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( V_FW_RI_RES_WR_DCAEN(0) | @@ -1210,6 +1210,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, if (ret) { if (internal) c4iw_get_ep(&qhp->ep->com); + disconnect = abort = 1; goto err; } break; diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_driver.c b/trunk/drivers/infiniband/hw/ipath/ipath_driver.c index be24ac726114..58c0e417bc30 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_driver.c @@ -398,6 +398,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, struct ipath_devdata *dd; unsigned long long addr; u32 bar0 = 0, bar1 = 0; + u8 rev; dd = ipath_alloc_devdata(pdev); if (IS_ERR(dd)) { @@ -539,7 +540,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, goto bail_regions; } - dd->ipath_pcirev = pdev->revision; + ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); + if (ret) { + ipath_dev_err(dd, "Failed to read PCI revision ID unit " + "%u: err %d\n", dd->ipath_unit, -ret); + goto bail_regions; /* shouldn't ever happen */ + } + dd->ipath_pcirev = rev; #if defined(__powerpc__) /* There isn't a generic way to specify writethrough mappings */ diff --git a/trunk/drivers/infiniband/hw/nes/nes_cm.c b/trunk/drivers/infiniband/hw/nes/nes_cm.c index e74cdf9ef471..33c7eedaba6c 100644 --- a/trunk/drivers/infiniband/hw/nes/nes_cm.c +++ b/trunk/drivers/infiniband/hw/nes/nes_cm.c @@ -2563,7 +2563,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) u16 last_ae; u8 original_hw_tcp_state; u8 original_ibqp_state; - int disconn_status = 0; + enum iw_cm_event_status disconn_status = IW_CM_EVENT_STATUS_OK; int issue_disconn = 0; int issue_close = 0; int issue_flush = 0; @@ -2605,7 +2605,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { issue_disconn = 1; if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) - disconn_status = -ECONNRESET; + disconn_status = IW_CM_EVENT_STATUS_RESET; } if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || @@ -2666,7 +2666,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) cm_id->provider_data = nesqp; /* Send up the close complete event */ cm_event.event = IW_CM_EVENT_CLOSE; - cm_event.status = 0; + cm_event.status = IW_CM_EVENT_STATUS_OK; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; @@ -2966,7 +2966,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) nes_add_ref(&nesqp->ibqp); cm_event.event = IW_CM_EVENT_ESTABLISHED; - cm_event.status = 0; + cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; cm_event.provider_data = (void *)nesqp; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; @@ -3377,7 +3377,7 @@ static void cm_event_connected(struct nes_cm_event *event) /* notify OF layer we successfully created the requested connection */ cm_event.event = IW_CM_EVENT_CONNECT_REPLY; - cm_event.status = 0; + cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr.sin_family = AF_INET; cm_event.local_addr.sin_port = cm_id->local_addr.sin_port; @@ -3484,7 +3484,7 @@ static void cm_event_reset(struct nes_cm_event *event) nesqp->cm_id = NULL; /* cm_id->provider_data = NULL; */ cm_event.event = IW_CM_EVENT_DISCONNECT; - cm_event.status = -ECONNRESET; + cm_event.status = IW_CM_EVENT_STATUS_RESET; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; @@ -3495,7 +3495,7 @@ static void cm_event_reset(struct nes_cm_event *event) ret = cm_id->event_handler(cm_id, &cm_event); atomic_inc(&cm_closes); cm_event.event = IW_CM_EVENT_CLOSE; - cm_event.status = 0; + cm_event.status = IW_CM_EVENT_STATUS_OK; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; @@ -3534,7 +3534,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event) cm_node, cm_id, jiffies); cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; - cm_event.status = 0; + cm_event.status = IW_CM_EVENT_STATUS_OK; cm_event.provider_data = (void *)cm_node; cm_event.local_addr.sin_family = AF_INET; diff --git a/trunk/drivers/infiniband/hw/nes/nes_verbs.c b/trunk/drivers/infiniband/hw/nes/nes_verbs.c index 95ca93ceedac..26d8018c0a7c 100644 --- a/trunk/drivers/infiniband/hw/nes/nes_verbs.c +++ b/trunk/drivers/infiniband/hw/nes/nes_verbs.c @@ -1484,7 +1484,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp) (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) { cm_id = nesqp->cm_id; cm_event.event = IW_CM_EVENT_CONNECT_REPLY; - cm_event.status = -ETIMEDOUT; + cm_event.status = IW_CM_EVENT_STATUS_TIMEOUT; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; cm_event.private_data = NULL; diff --git a/trunk/drivers/infiniband/hw/qib/qib_iba6120.c b/trunk/drivers/infiniband/hw/qib/qib_iba6120.c index d8ca0a0b970d..7de4b7ebffc5 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/trunk/drivers/infiniband/hw/qib/qib_iba6120.c @@ -1799,7 +1799,7 @@ static int qib_6120_setup_reset(struct qib_devdata *dd) /* * Keep chip from being accessed until we are ready. Use * writeq() directly, to allow the write even though QIB_PRESENT - * isn't set. + * isn't' set. */ dd->flags &= ~(QIB_INITTED | QIB_PRESENT); dd->int_counter = 0; /* so we check interrupts work again */ diff --git a/trunk/drivers/infiniband/hw/qib/qib_iba7220.c b/trunk/drivers/infiniband/hw/qib/qib_iba7220.c index c765a2eb04cf..74fe0360bec7 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/trunk/drivers/infiniband/hw/qib/qib_iba7220.c @@ -2111,7 +2111,7 @@ static int qib_setup_7220_reset(struct qib_devdata *dd) /* * Keep chip from being accessed until we are ready. Use * writeq() directly, to allow the write even though QIB_PRESENT - * isn't set. + * isn't' set. */ dd->flags &= ~(QIB_INITTED | QIB_PRESENT); dd->int_counter = 0; /* so we check interrupts work again */ diff --git a/trunk/drivers/infiniband/hw/qib/qib_iba7322.c b/trunk/drivers/infiniband/hw/qib/qib_iba7322.c index 9f53e68a096a..55de3cf3441c 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/trunk/drivers/infiniband/hw/qib/qib_iba7322.c @@ -3299,7 +3299,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd) /* * Keep chip from being accessed until we are ready. Use * writeq() directly, to allow the write even though QIB_PRESENT - * isn't set. + * isn't' set. */ dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR); dd->flags |= QIB_DOING_RESET; @@ -7534,8 +7534,7 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); tstart = get_jiffies_64(); while (chan_done && - !time_after64(get_jiffies_64(), - tstart + msecs_to_jiffies(500))) { + !time_after64(tstart, tstart + msecs_to_jiffies(500))) { msleep(20); for (chan = 0; chan < SERDES_CHANS; ++chan) { rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), diff --git a/trunk/drivers/infiniband/hw/qib/qib_pcie.c b/trunk/drivers/infiniband/hw/qib/qib_pcie.c index 891cc2ff5f00..48b6674cbc49 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_pcie.c +++ b/trunk/drivers/infiniband/hw/qib/qib_pcie.c @@ -526,8 +526,11 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd) */ devid = parent->device; if (devid >= 0x25e2 && devid <= 0x25fa) { + u8 rev; + /* 5000 P/V/X/Z */ - if (parent->revision <= 0xb2) + pci_read_config_byte(parent, PCI_REVISION_ID, &rev); + if (rev <= 0xb2) bits = 1U << 10; else bits = 7U << 10; diff --git a/trunk/drivers/input/keyboard/atakbd.c b/trunk/drivers/input/keyboard/atakbd.c index 10bcd4ae5402..1839194ea987 100644 --- a/trunk/drivers/input/keyboard/atakbd.c +++ b/trunk/drivers/input/keyboard/atakbd.c @@ -223,9 +223,8 @@ static int __init atakbd_init(void) return -ENODEV; // need to init core driver if not already done so - error = atari_keyb_init(); - if (error) - return error; + if (atari_keyb_init()) + return -ENODEV; atakbd_dev = input_allocate_device(); if (!atakbd_dev) diff --git a/trunk/drivers/input/mouse/atarimouse.c b/trunk/drivers/input/mouse/atarimouse.c index 5c4a692bf73a..adf45b3040e9 100644 --- a/trunk/drivers/input/mouse/atarimouse.c +++ b/trunk/drivers/input/mouse/atarimouse.c @@ -77,15 +77,15 @@ static void atamouse_interrupt(char *buf) #endif /* only relative events get here */ - dx = buf[1]; - dy = buf[2]; + dx = buf[1]; + dy = -buf[2]; input_report_rel(atamouse_dev, REL_X, dx); input_report_rel(atamouse_dev, REL_Y, dy); - input_report_key(atamouse_dev, BTN_LEFT, buttons & 0x4); + input_report_key(atamouse_dev, BTN_LEFT, buttons & 0x1); input_report_key(atamouse_dev, BTN_MIDDLE, buttons & 0x2); - input_report_key(atamouse_dev, BTN_RIGHT, buttons & 0x1); + input_report_key(atamouse_dev, BTN_RIGHT, buttons & 0x4); input_sync(atamouse_dev); @@ -108,7 +108,7 @@ static int atamouse_open(struct input_dev *dev) static void atamouse_close(struct input_dev *dev) { ikbd_mouse_disable(); - atari_input_mouse_interrupt_hook = NULL; + atari_mouse_interrupt_hook = NULL; } static int __init atamouse_init(void) @@ -118,9 +118,8 @@ static int __init atamouse_init(void) if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP)) return -ENODEV; - error = atari_keyb_init(); - if (error) - return error; + if (!atari_keyb_init()) + return -ENODEV; atamouse_dev = input_allocate_device(); if (!atamouse_dev) diff --git a/trunk/drivers/input/touchscreen/ads7846.c b/trunk/drivers/input/touchscreen/ads7846.c index 1de1c19dad30..c24946f51256 100644 --- a/trunk/drivers/input/touchscreen/ads7846.c +++ b/trunk/drivers/input/touchscreen/ads7846.c @@ -281,24 +281,17 @@ struct ser_req { u8 command; u8 ref_off; u16 scratch; + __be16 sample; struct spi_message msg; struct spi_transfer xfer[6]; - /* - * DMA (thus cache coherency maintenance) requires the - * transfer buffers to live in their own cache lines. - */ - __be16 sample ____cacheline_aligned; }; struct ads7845_ser_req { u8 command[3]; + u8 pwrdown[3]; + u8 sample[3]; struct spi_message msg; struct spi_transfer xfer[2]; - /* - * DMA (thus cache coherency maintenance) requires the - * transfer buffers to live in their own cache lines. - */ - u8 sample[3] ____cacheline_aligned; }; static int ads7846_read12_ser(struct device *dev, unsigned command) diff --git a/trunk/drivers/input/touchscreen/wm831x-ts.c b/trunk/drivers/input/touchscreen/wm831x-ts.c index 9175d49d2546..6ae054f8e0aa 100644 --- a/trunk/drivers/input/touchscreen/wm831x-ts.c +++ b/trunk/drivers/input/touchscreen/wm831x-ts.c @@ -68,23 +68,8 @@ struct wm831x_ts { unsigned int pd_irq; bool pressure; bool pen_down; - struct work_struct pd_data_work; }; -static void wm831x_pd_data_work(struct work_struct *work) -{ - struct wm831x_ts *wm831x_ts = - container_of(work, struct wm831x_ts, pd_data_work); - - if (wm831x_ts->pen_down) { - enable_irq(wm831x_ts->data_irq); - dev_dbg(wm831x_ts->wm831x->dev, "IRQ PD->DATA done\n"); - } else { - enable_irq(wm831x_ts->pd_irq); - dev_dbg(wm831x_ts->wm831x->dev, "IRQ DATA->PD done\n"); - } -} - static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data) { struct wm831x_ts *wm831x_ts = irq_data; @@ -125,9 +110,6 @@ static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data) } if (!wm831x_ts->pen_down) { - /* Switch from data to pen down */ - dev_dbg(wm831x->dev, "IRQ DATA->PD\n"); - disable_irq_nosync(wm831x_ts->data_irq); /* Don't need data any more */ @@ -146,10 +128,6 @@ static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data) ABS_PRESSURE, 0); input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 0); - - schedule_work(&wm831x_ts->pd_data_work); - } else { - input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1); } input_sync(wm831x_ts->input_dev); @@ -163,11 +141,6 @@ static irqreturn_t wm831x_ts_pen_down_irq(int irq, void *irq_data) struct wm831x *wm831x = wm831x_ts->wm831x; int ena = 0; - if (wm831x_ts->pen_down) - return IRQ_HANDLED; - - disable_irq_nosync(wm831x_ts->pd_irq); - /* Start collecting data */ if (wm831x_ts->pressure) ena |= WM831X_TCH_Z_ENA; @@ -176,14 +149,14 @@ static irqreturn_t wm831x_ts_pen_down_irq(int irq, void *irq_data) WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA, WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | ena); + input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1); + input_sync(wm831x_ts->input_dev); + wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1, WM831X_TCHPD_EINT, WM831X_TCHPD_EINT); wm831x_ts->pen_down = true; - - /* Switch from pen down to data */ - dev_dbg(wm831x->dev, "IRQ PD->DATA\n"); - schedule_work(&wm831x_ts->pd_data_work); + enable_irq(wm831x_ts->data_irq); return IRQ_HANDLED; } @@ -209,28 +182,13 @@ static void wm831x_ts_input_close(struct input_dev *idev) struct wm831x_ts *wm831x_ts = input_get_drvdata(idev); struct wm831x *wm831x = wm831x_ts->wm831x; - /* Shut the controller down, disabling all other functionality too */ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1, - WM831X_TCH_ENA | WM831X_TCH_X_ENA | - WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA, 0); - - /* Make sure any pending IRQs are done, the above will prevent - * new ones firing. - */ - synchronize_irq(wm831x_ts->data_irq); - synchronize_irq(wm831x_ts->pd_irq); - - /* Make sure the IRQ completion work is quiesced */ - flush_work_sync(&wm831x_ts->pd_data_work); + WM831X_TCH_ENA | WM831X_TCH_CVT_ENA | + WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | + WM831X_TCH_Z_ENA, 0); - /* If we ended up with the pen down then make sure we revert back - * to pen detection state for the next time we start up. - */ - if (wm831x_ts->pen_down) { + if (wm831x_ts->pen_down) disable_irq(wm831x_ts->data_irq); - enable_irq(wm831x_ts->pd_irq); - wm831x_ts->pen_down = false; - } } static __devinit int wm831x_ts_probe(struct platform_device *pdev) @@ -240,7 +198,7 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev) struct wm831x_pdata *core_pdata = dev_get_platdata(pdev->dev.parent); struct wm831x_touch_pdata *pdata = NULL; struct input_dev *input_dev; - int error, irqf; + int error; if (core_pdata) pdata = core_pdata->touch; @@ -254,7 +212,6 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev) wm831x_ts->wm831x = wm831x; wm831x_ts->input_dev = input_dev; - INIT_WORK(&wm831x_ts->pd_data_work, wm831x_pd_data_work); /* * If we have a direct IRQ use it, otherwise use the interrupt @@ -313,14 +270,9 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev) wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1, WM831X_TCH_RATE_MASK, 6); - if (pdata && pdata->data_irqf) - irqf = pdata->data_irqf; - else - irqf = IRQF_TRIGGER_HIGH; - error = request_threaded_irq(wm831x_ts->data_irq, NULL, wm831x_ts_data_irq, - irqf | IRQF_ONESHOT, + IRQF_ONESHOT, "Touchscreen data", wm831x_ts); if (error) { dev_err(&pdev->dev, "Failed to request data IRQ %d: %d\n", @@ -329,14 +281,9 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev) } disable_irq(wm831x_ts->data_irq); - if (pdata && pdata->pd_irqf) - irqf = pdata->pd_irqf; - else - irqf = IRQF_TRIGGER_HIGH; - error = request_threaded_irq(wm831x_ts->pd_irq, NULL, wm831x_ts_pen_down_irq, - irqf | IRQF_ONESHOT, + IRQF_ONESHOT, "Touchscreen pen down", wm831x_ts); if (error) { dev_err(&pdev->dev, "Failed to request pen down IRQ %d: %d\n", diff --git a/trunk/drivers/leds/leds-lm3530.c b/trunk/drivers/leds/leds-lm3530.c index b37e6186d0fa..e7089a1f6cb6 100644 --- a/trunk/drivers/leds/leds-lm3530.c +++ b/trunk/drivers/leds/leds-lm3530.c @@ -349,7 +349,6 @@ static const struct i2c_device_id lm3530_id[] = { {LM3530_NAME, 0}, {} }; -MODULE_DEVICE_TABLE(i2c, lm3530_id); static struct i2c_driver lm3530_i2c_driver = { .probe = lm3530_probe, diff --git a/trunk/drivers/lguest/Kconfig b/trunk/drivers/lguest/Kconfig index 34ae49dc557c..0aaa0597a622 100644 --- a/trunk/drivers/lguest/Kconfig +++ b/trunk/drivers/lguest/Kconfig @@ -5,10 +5,8 @@ config LGUEST ---help--- This is a very simple module which allows you to run multiple instances of the same Linux kernel, using the - "lguest" command found in the Documentation/virtual/lguest - directory. - + "lguest" command found in the Documentation/lguest directory. Note that "lguest" is pronounced to rhyme with "fell quest", - not "rustyvisor". See Documentation/virtual/lguest/lguest.txt. + not "rustyvisor". See Documentation/lguest/lguest.txt. If unsure, say N. If curious, say M. If masochistic, say Y. diff --git a/trunk/drivers/lguest/Makefile b/trunk/drivers/lguest/Makefile index 8ac947c7e7c7..7d463c26124f 100644 --- a/trunk/drivers/lguest/Makefile +++ b/trunk/drivers/lguest/Makefile @@ -18,7 +18,7 @@ Mastery: PREFIX=M Beer: @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}" Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery: - @sh ../../Documentation/virtual/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'` + @sh ../../Documentation/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'` Puppy: @clear @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n" diff --git a/trunk/drivers/media/common/tuners/tda18271-common.c b/trunk/drivers/media/common/tuners/tda18271-common.c index aae40e52af5b..5466d47db899 100644 --- a/trunk/drivers/media/common/tuners/tda18271-common.c +++ b/trunk/drivers/media/common/tuners/tda18271-common.c @@ -533,7 +533,16 @@ int tda18271_calc_main_pll(struct dvb_frontend *fe, u32 freq) if (tda_fail(ret)) goto fail; - regs[R_MPD] = (0x7f & pd); + regs[R_MPD] = (0x77 & pd); + + switch (priv->mode) { + case TDA18271_ANALOG: + regs[R_MPD] &= ~0x08; + break; + case TDA18271_DIGITAL: + regs[R_MPD] |= 0x08; + break; + } div = ((d * (freq / 1000)) << 7) / 125; diff --git a/trunk/drivers/media/common/tuners/tda18271-fe.c b/trunk/drivers/media/common/tuners/tda18271-fe.c index d884f5eee73c..9ad4454a148d 100644 --- a/trunk/drivers/media/common/tuners/tda18271-fe.c +++ b/trunk/drivers/media/common/tuners/tda18271-fe.c @@ -579,8 +579,8 @@ static int tda18271_rf_tracking_filters_init(struct dvb_frontend *fe, u32 freq) #define RF3 2 u32 rf_default[3]; u32 rf_freq[3]; - s32 prog_cal[3]; - s32 prog_tab[3]; + u8 prog_cal[3]; + u8 prog_tab[3]; i = tda18271_lookup_rf_band(fe, &freq, NULL); @@ -602,33 +602,32 @@ static int tda18271_rf_tracking_filters_init(struct dvb_frontend *fe, u32 freq) return bcal; tda18271_calc_rf_cal(fe, &rf_freq[rf]); - prog_tab[rf] = (s32)regs[R_EB14]; + prog_tab[rf] = regs[R_EB14]; if (1 == bcal) - prog_cal[rf] = - (s32)tda18271_calibrate_rf(fe, rf_freq[rf]); + prog_cal[rf] = tda18271_calibrate_rf(fe, rf_freq[rf]); else prog_cal[rf] = prog_tab[rf]; switch (rf) { case RF1: map[i].rf_a1 = 0; - map[i].rf_b1 = (prog_cal[RF1] - prog_tab[RF1]); + map[i].rf_b1 = (s32)(prog_cal[RF1] - prog_tab[RF1]); map[i].rf1 = rf_freq[RF1] / 1000; break; case RF2: - dividend = (prog_cal[RF2] - prog_tab[RF2] - - prog_cal[RF1] + prog_tab[RF1]); + dividend = (s32)(prog_cal[RF2] - prog_tab[RF2]) - + (s32)(prog_cal[RF1] + prog_tab[RF1]); divisor = (s32)(rf_freq[RF2] - rf_freq[RF1]) / 1000; map[i].rf_a1 = (dividend / divisor); map[i].rf2 = rf_freq[RF2] / 1000; break; case RF3: - dividend = (prog_cal[RF3] - prog_tab[RF3] - - prog_cal[RF2] + prog_tab[RF2]); + dividend = (s32)(prog_cal[RF3] - prog_tab[RF3]) - + (s32)(prog_cal[RF2] + prog_tab[RF2]); divisor = (s32)(rf_freq[RF3] - rf_freq[RF2]) / 1000; map[i].rf_a2 = (dividend / divisor); - map[i].rf_b2 = (prog_cal[RF2] - prog_tab[RF2]); + map[i].rf_b2 = (s32)(prog_cal[RF2] - prog_tab[RF2]); map[i].rf3 = rf_freq[RF3] / 1000; break; default: diff --git a/trunk/drivers/media/common/tuners/tda18271-maps.c b/trunk/drivers/media/common/tuners/tda18271-maps.c index 3d5b6ab7e332..e7f84c705da8 100644 --- a/trunk/drivers/media/common/tuners/tda18271-maps.c +++ b/trunk/drivers/media/common/tuners/tda18271-maps.c @@ -229,7 +229,8 @@ static struct tda18271_map tda18271c2_km[] = { static struct tda18271_map tda18271_rf_band[] = { { .rfmax = 47900, .val = 0x00 }, { .rfmax = 61100, .val = 0x01 }, - { .rfmax = 152600, .val = 0x02 }, +/* { .rfmax = 152600, .val = 0x02 }, */ + { .rfmax = 121200, .val = 0x02 }, { .rfmax = 164700, .val = 0x03 }, { .rfmax = 203500, .val = 0x04 }, { .rfmax = 457800, .val = 0x05 }, @@ -447,7 +448,7 @@ static struct tda18271_map tda18271c2_rf_cal[] = { { .rfmax = 150000, .val = 0xb0 }, { .rfmax = 151000, .val = 0xb1 }, { .rfmax = 152000, .val = 0xb7 }, - { .rfmax = 152600, .val = 0xbd }, + { .rfmax = 153000, .val = 0xbd }, { .rfmax = 154000, .val = 0x20 }, { .rfmax = 155000, .val = 0x22 }, { .rfmax = 156000, .val = 0x24 }, @@ -458,7 +459,7 @@ static struct tda18271_map tda18271c2_rf_cal[] = { { .rfmax = 161000, .val = 0x2d }, { .rfmax = 163000, .val = 0x2e }, { .rfmax = 164000, .val = 0x2f }, - { .rfmax = 164700, .val = 0x30 }, + { .rfmax = 165000, .val = 0x30 }, { .rfmax = 166000, .val = 0x11 }, { .rfmax = 167000, .val = 0x12 }, { .rfmax = 168000, .val = 0x13 }, @@ -509,8 +510,7 @@ static struct tda18271_map tda18271c2_rf_cal[] = { { .rfmax = 236000, .val = 0x1b }, { .rfmax = 237000, .val = 0x1c }, { .rfmax = 240000, .val = 0x1d }, - { .rfmax = 242000, .val = 0x1e }, - { .rfmax = 244000, .val = 0x1f }, + { .rfmax = 242000, .val = 0x1f }, { .rfmax = 247000, .val = 0x20 }, { .rfmax = 249000, .val = 0x21 }, { .rfmax = 252000, .val = 0x22 }, @@ -624,7 +624,7 @@ static struct tda18271_map tda18271c2_rf_cal[] = { { .rfmax = 453000, .val = 0x93 }, { .rfmax = 454000, .val = 0x94 }, { .rfmax = 456000, .val = 0x96 }, - { .rfmax = 457800, .val = 0x98 }, + { .rfmax = 457000, .val = 0x98 }, { .rfmax = 461000, .val = 0x11 }, { .rfmax = 468000, .val = 0x12 }, { .rfmax = 472000, .val = 0x13 }, diff --git a/trunk/drivers/media/dvb/b2c2/flexcop-pci.c b/trunk/drivers/media/dvb/b2c2/flexcop-pci.c index 03f96d6ca894..955254090a0e 100644 --- a/trunk/drivers/media/dvb/b2c2/flexcop-pci.c +++ b/trunk/drivers/media/dvb/b2c2/flexcop-pci.c @@ -38,7 +38,7 @@ MODULE_PARM_DESC(debug, DEBSTATUS); #define DRIVER_VERSION "0.1" -#define DRIVER_NAME "flexcop-pci" +#define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV PCI Driver" #define DRIVER_AUTHOR "Patrick Boettcher " struct flexcop_pci { diff --git a/trunk/drivers/media/dvb/dvb-usb/Kconfig b/trunk/drivers/media/dvb/dvb-usb/Kconfig index c545039287ad..fe4f894183ff 100644 --- a/trunk/drivers/media/dvb/dvb-usb/Kconfig +++ b/trunk/drivers/media/dvb/dvb-usb/Kconfig @@ -356,15 +356,13 @@ config DVB_USB_LME2510 select DVB_TDA826X if !DVB_FE_CUSTOMISE select DVB_STV0288 if !DVB_FE_CUSTOMISE select DVB_IX2505V if !DVB_FE_CUSTOMISE - select DVB_STV0299 if !DVB_FE_CUSTOMISE - select DVB_PLL if !DVB_FE_CUSTOMISE help Say Y here to support the LME DM04/QQBOX DVB-S USB2.0 . config DVB_USB_TECHNISAT_USB2 tristate "Technisat DVB-S/S2 USB2.0 support" depends on DVB_USB - select DVB_STV090x if !DVB_FE_CUSTOMISE - select DVB_STV6110x if !DVB_FE_CUSTOMISE + select DVB_STB0899 if !DVB_FE_CUSTOMISE + select DVB_STB6100 if !DVB_FE_CUSTOMISE help Say Y here to support the Technisat USB2 DVB-S/S2 device diff --git a/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c b/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c index 65214af5cd74..97af266d7f1d 100644 --- a/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c +++ b/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c @@ -2162,7 +2162,7 @@ struct dibx000_agc_config dib7090_agc_config[2] = { .agc1_pt3 = 98, .agc1_slope1 = 0, .agc1_slope2 = 167, - .agc2_pt1 = 98, + .agc1_pt1 = 98, .agc2_pt2 = 255, .agc2_slope1 = 104, .agc2_slope2 = 0, @@ -2440,11 +2440,11 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap) dib0700_set_i2c_speed(adap->dev, 340); adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x90, &tfe7090pvr_dib7000p_config[0]); + dib7090_slave_reset(adap->fe); + if (adap->fe == NULL) return -ENODEV; - dib7090_slave_reset(adap->fe); - return 0; } diff --git a/trunk/drivers/media/dvb/ngene/ngene-core.c b/trunk/drivers/media/dvb/ngene/ngene-core.c index 6927c726ce35..ccc2d1af49d4 100644 --- a/trunk/drivers/media/dvb/ngene/ngene-core.c +++ b/trunk/drivers/media/dvb/ngene/ngene-core.c @@ -1520,7 +1520,6 @@ static int init_channel(struct ngene_channel *chan) if (dev->ci.en && (io & NGENE_IO_TSOUT)) { dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1); set_transfer(chan, 1); - chan->dev->channel[2].DataFormatFlags = DF_SWAP32; set_transfer(&chan->dev->channel[2], 1); dvb_register_device(adapter, &chan->ci_dev, &ngene_dvbdev_ci, (void *) chan, diff --git a/trunk/drivers/media/media-entity.c b/trunk/drivers/media/media-entity.c index 056138f63c7d..23640ed44d85 100644 --- a/trunk/drivers/media/media-entity.c +++ b/trunk/drivers/media/media-entity.c @@ -378,6 +378,7 @@ EXPORT_SYMBOL_GPL(media_entity_create_link); static int __media_entity_setup_link_notify(struct media_link *link, u32 flags) { + const u32 mask = MEDIA_LNK_FL_ENABLED; int ret; /* Notify both entities. */ @@ -394,7 +395,7 @@ static int __media_entity_setup_link_notify(struct media_link *link, u32 flags) return ret; } - link->flags = flags; + link->flags = (link->flags & ~mask) | (flags & mask); link->reverse->flags = link->flags; return 0; @@ -416,7 +417,6 @@ static int __media_entity_setup_link_notify(struct media_link *link, u32 flags) */ int __media_entity_setup_link(struct media_link *link, u32 flags) { - const u32 mask = MEDIA_LNK_FL_ENABLED; struct media_device *mdev; struct media_entity *source, *sink; int ret = -EBUSY; @@ -424,10 +424,6 @@ int __media_entity_setup_link(struct media_link *link, u32 flags) if (link == NULL) return -EINVAL; - /* The non-modifiable link flags must not be modified. */ - if ((link->flags & ~mask) != (flags & ~mask)) - return -EINVAL; - if (link->flags & MEDIA_LNK_FL_IMMUTABLE) return link->flags == flags ? 0 : -EINVAL; diff --git a/trunk/drivers/media/radio/radio-sf16fmr2.c b/trunk/drivers/media/radio/radio-sf16fmr2.c index 87bad7678d92..dc3f04c52d5e 100644 --- a/trunk/drivers/media/radio/radio-sf16fmr2.c +++ b/trunk/drivers/media/radio/radio-sf16fmr2.c @@ -170,7 +170,7 @@ static int fmr2_setfreq(struct fmr2 *dev) return 0; } -/* !!! not tested, in my card this doesn't work !!! */ +/* !!! not tested, in my card this does't work !!! */ static int fmr2_setvolume(struct fmr2 *dev) { int vol[16] = { 0x021, 0x084, 0x090, 0x104, diff --git a/trunk/drivers/media/radio/saa7706h.c b/trunk/drivers/media/radio/saa7706h.c index b1193dfc5087..585680ffbfb6 100644 --- a/trunk/drivers/media/radio/saa7706h.c +++ b/trunk/drivers/media/radio/saa7706h.c @@ -376,7 +376,7 @@ static int __devinit saa7706h_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%02x (%s)\n", client->addr << 1, client->adapter->name); - state = kzalloc(sizeof(struct saa7706h_state), GFP_KERNEL); + state = kmalloc(sizeof(struct saa7706h_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; diff --git a/trunk/drivers/media/radio/tef6862.c b/trunk/drivers/media/radio/tef6862.c index 0991e1973678..7c0d77751f6e 100644 --- a/trunk/drivers/media/radio/tef6862.c +++ b/trunk/drivers/media/radio/tef6862.c @@ -176,7 +176,7 @@ static int __devinit tef6862_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%02x (%s)\n", client->addr << 1, client->adapter->name); - state = kzalloc(sizeof(struct tef6862_state), GFP_KERNEL); + state = kmalloc(sizeof(struct tef6862_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; state->freq = TEF6862_LO_FREQ; diff --git a/trunk/drivers/media/rc/imon.c b/trunk/drivers/media/rc/imon.c index 8fc0f081b470..ebd68edf5b24 100644 --- a/trunk/drivers/media/rc/imon.c +++ b/trunk/drivers/media/rc/imon.c @@ -46,7 +46,7 @@ #define MOD_AUTHOR "Jarod Wilson " #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" #define MOD_NAME "imon" -#define MOD_VERSION "0.9.3" +#define MOD_VERSION "0.9.2" #define DISPLAY_MINOR_BASE 144 #define DEVICE_NAME "lcd%d" @@ -460,9 +460,8 @@ static int display_close(struct inode *inode, struct file *file) } /** - * Sends a packet to the device -- this function must be called with - * ictx->lock held, or its unlock/lock sequence while waiting for tx - * to complete can/will lead to a deadlock. + * Sends a packet to the device -- this function must be called + * with ictx->lock held. */ static int send_packet(struct imon_context *ictx) { @@ -992,21 +991,12 @@ static void imon_touch_display_timeout(unsigned long data) * the iMON remotes, and those used by the Windows MCE remotes (which is * really just RC-6), but only one or the other at a time, as the signals * are decoded onboard the receiver. - * - * This function gets called two different ways, one way is from - * rc_register_device, for initial protocol selection/setup, and the other is - * via a userspace-initiated protocol change request, either by direct sysfs - * prodding or by something like ir-keytable. In the rc_register_device case, - * the imon context lock is already held, but when initiated from userspace, - * it is not, so we must acquire it prior to calling send_packet, which - * requires that the lock is held. */ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type) { int retval; struct imon_context *ictx = rc->priv; struct device *dev = ictx->dev; - bool unlock = false; unsigned char ir_proto_packet[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 }; @@ -1039,11 +1029,6 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type) memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet)); - if (!mutex_is_locked(&ictx->lock)) { - unlock = true; - mutex_lock(&ictx->lock); - } - retval = send_packet(ictx); if (retval) goto out; @@ -1052,9 +1037,6 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type) ictx->pad_mouse = false; out: - if (unlock) - mutex_unlock(&ictx->lock); - return retval; } @@ -2152,7 +2134,6 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf) goto rdev_setup_failed; } - mutex_unlock(&ictx->lock); return ictx; rdev_setup_failed: @@ -2224,7 +2205,6 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf, goto urb_submit_failed; } - mutex_unlock(&ictx->lock); return ictx; urb_submit_failed: @@ -2319,8 +2299,6 @@ static int __devinit imon_probe(struct usb_interface *interface, usb_set_intfdata(interface, ictx); if (ifnum == 0) { - mutex_lock(&ictx->lock); - if (product == 0xffdc && ictx->rf_device) { sysfs_err = sysfs_create_group(&interface->dev.kobj, &imon_rf_attr_group); @@ -2331,14 +2309,13 @@ static int __devinit imon_probe(struct usb_interface *interface, if (ictx->display_supported) imon_init_display(ictx, interface); - - mutex_unlock(&ictx->lock); } dev_info(dev, "iMON device (%04x:%04x, intf%d) on " "usb<%d:%d> initialized\n", vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum); + mutex_unlock(&ictx->lock); mutex_unlock(&driver_lock); return 0; diff --git a/trunk/drivers/media/rc/ite-cir.c b/trunk/drivers/media/rc/ite-cir.c index 43908a70bd8b..accaf6c9789a 100644 --- a/trunk/drivers/media/rc/ite-cir.c +++ b/trunk/drivers/media/rc/ite-cir.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/drivers/media/rc/mceusb.c b/trunk/drivers/media/rc/mceusb.c index 0c273ec465c9..044fb7a382d6 100644 --- a/trunk/drivers/media/rc/mceusb.c +++ b/trunk/drivers/media/rc/mceusb.c @@ -220,8 +220,6 @@ static struct usb_device_id mceusb_dev_table[] = { { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, /* Philips/Spinel plus IR transceiver for ASUS */ { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, - /* Philips IR transceiver (Dell branded) */ - { USB_DEVICE(VENDOR_PHILIPS, 0x2093) }, /* Realtek MCE IR Receiver and card reader */ { USB_DEVICE(VENDOR_REALTEK, 0x0161), .driver_info = MULTIFUNCTION }, diff --git a/trunk/drivers/media/rc/rc-main.c b/trunk/drivers/media/rc/rc-main.c index a2706648e365..f53f9c68d38d 100644 --- a/trunk/drivers/media/rc/rc-main.c +++ b/trunk/drivers/media/rc/rc-main.c @@ -707,8 +707,7 @@ static void ir_close(struct input_dev *idev) { struct rc_dev *rdev = input_get_drvdata(idev); - if (rdev) - rdev->close(rdev); + rdev->close(rdev); } /* class for /sys/class/rc */ @@ -734,7 +733,6 @@ static struct { { RC_TYPE_SONY, "sony" }, { RC_TYPE_RC5_SZ, "rc-5-sz" }, { RC_TYPE_LIRC, "lirc" }, - { RC_TYPE_OTHER, "other" }, }; #define PROTO_NONE "none" diff --git a/trunk/drivers/media/video/Kconfig b/trunk/drivers/media/video/Kconfig index 00f51dd121f3..4498b944dec8 100644 --- a/trunk/drivers/media/video/Kconfig +++ b/trunk/drivers/media/video/Kconfig @@ -875,7 +875,7 @@ config MX3_VIDEO config VIDEO_MX3 tristate "i.MX3x Camera Sensor Interface driver" depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA - select VIDEOBUF2_DMA_CONTIG + select VIDEOBUF_DMA_CONTIG select MX3_VIDEO ---help--- This is a v4l2 driver for the i.MX3x Camera Sensor Interface diff --git a/trunk/drivers/media/video/cx18/cx18-streams.c b/trunk/drivers/media/video/cx18/cx18-streams.c index 6fbc356113c1..c6e2ca3b1149 100644 --- a/trunk/drivers/media/video/cx18/cx18-streams.c +++ b/trunk/drivers/media/video/cx18/cx18-streams.c @@ -350,17 +350,9 @@ void cx18_streams_cleanup(struct cx18 *cx, int unregister) /* No struct video_device, but can have buffers allocated */ if (type == CX18_ENC_STREAM_TYPE_IDX) { - /* If the module params didn't inhibit IDX ... */ if (cx->stream_buffers[type] != 0) { cx->stream_buffers[type] = 0; - /* - * Before calling cx18_stream_free(), - * check if the IDX stream was actually set up. - * Needed, since the cx18_probe() error path - * exits through here as well as normal clean up - */ - if (cx->streams[type].buffers != 0) - cx18_stream_free(&cx->streams[type]); + cx18_stream_free(&cx->streams[type]); } continue; } diff --git a/trunk/drivers/media/video/cx23885/Kconfig b/trunk/drivers/media/video/cx23885/Kconfig index caab1bfb79e2..3b6e7f28568e 100644 --- a/trunk/drivers/media/video/cx23885/Kconfig +++ b/trunk/drivers/media/video/cx23885/Kconfig @@ -22,7 +22,6 @@ config VIDEO_CX23885 select DVB_CX24116 if !DVB_FE_CUSTOMISE select DVB_STV0900 if !DVB_FE_CUSTOMISE select DVB_DS3000 if !DVB_FE_CUSTOMISE - select DVB_STV0367 if !DVB_FE_CUSTOMISE select MEDIA_TUNER_MT2131 if !MEDIA_TUNER_CUSTOMISE select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMISE diff --git a/trunk/drivers/media/video/cx88/cx88-input.c b/trunk/drivers/media/video/cx88/cx88-input.c index 3f442003623d..c820e2f53527 100644 --- a/trunk/drivers/media/video/cx88/cx88-input.c +++ b/trunk/drivers/media/video/cx88/cx88-input.c @@ -524,7 +524,7 @@ void cx88_ir_irq(struct cx88_core *core) for (todo = 32; todo > 0; todo -= bits) { ev.pulse = samples & 0x80000000 ? false : true; bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples)); - ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate; + ev.duration = (bits * NSEC_PER_SEC) / (1000 * ir_samplerate); ir_raw_event_store_with_filter(ir->dev, &ev); samples <<= bits; } diff --git a/trunk/drivers/media/video/imx074.c b/trunk/drivers/media/video/imx074.c index 0382ea752e6f..1a1169115716 100644 --- a/trunk/drivers/media/video/imx074.c +++ b/trunk/drivers/media/video/imx074.c @@ -298,7 +298,7 @@ static unsigned long imx074_query_bus_param(struct soc_camera_device *icd) static int imx074_set_bus_param(struct soc_camera_device *icd, unsigned long flags) { - return -EINVAL; + return -1; } static struct soc_camera_ops imx074_ops = { diff --git a/trunk/drivers/media/video/m52790.c b/trunk/drivers/media/video/m52790.c index 303ffa7df4ac..5e1c9a81984c 100644 --- a/trunk/drivers/media/video/m52790.c +++ b/trunk/drivers/media/video/m52790.c @@ -174,7 +174,7 @@ static int m52790_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - state = kzalloc(sizeof(struct m52790_state), GFP_KERNEL); + state = kmalloc(sizeof(struct m52790_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; diff --git a/trunk/drivers/media/video/omap3isp/isp.c b/trunk/drivers/media/video/omap3isp/isp.c index 472a69359e60..503bd7922bd6 100644 --- a/trunk/drivers/media/video/omap3isp/isp.c +++ b/trunk/drivers/media/video/omap3isp/isp.c @@ -215,21 +215,20 @@ static u32 isp_set_xclk(struct isp_device *isp, u32 xclk, u8 xclksel) } switch (xclksel) { - case ISP_XCLK_A: + case 0: isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, ISPTCTRL_CTRL_DIVA_MASK, divisor << ISPTCTRL_CTRL_DIVA_SHIFT); dev_dbg(isp->dev, "isp_set_xclk(): cam_xclka set to %d Hz\n", currentxclk); break; - case ISP_XCLK_B: + case 1: isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, ISPTCTRL_CTRL_DIVB_MASK, divisor << ISPTCTRL_CTRL_DIVB_SHIFT); dev_dbg(isp->dev, "isp_set_xclk(): cam_xclkb set to %d Hz\n", currentxclk); break; - case ISP_XCLK_NONE: default: omap3isp_put(isp); dev_dbg(isp->dev, "ISP_ERR: isp_set_xclk(): Invalid requested " @@ -238,13 +237,13 @@ static u32 isp_set_xclk(struct isp_device *isp, u32 xclk, u8 xclksel) } /* Do we go from stable whatever to clock? */ - if (divisor >= 2 && isp->xclk_divisor[xclksel - 1] < 2) + if (divisor >= 2 && isp->xclk_divisor[xclksel] < 2) omap3isp_get(isp); /* Stopping the clock. */ - else if (divisor < 2 && isp->xclk_divisor[xclksel - 1] >= 2) + else if (divisor < 2 && isp->xclk_divisor[xclksel] >= 2) omap3isp_put(isp); - isp->xclk_divisor[xclksel - 1] = divisor; + isp->xclk_divisor[xclksel] = divisor; omap3isp_put(isp); @@ -286,8 +285,7 @@ static void isp_power_settings(struct isp_device *isp, int idle) */ void omap3isp_configure_bridge(struct isp_device *isp, enum ccdc_input_entity input, - const struct isp_parallel_platform_data *pdata, - unsigned int shift) + const struct isp_parallel_platform_data *pdata) { u32 ispctrl_val; @@ -300,9 +298,9 @@ void omap3isp_configure_bridge(struct isp_device *isp, switch (input) { case CCDC_INPUT_PARALLEL: ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL; + ispctrl_val |= pdata->data_lane_shift << ISPCTRL_SHIFT_SHIFT; ispctrl_val |= pdata->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT; ispctrl_val |= pdata->bridge << ISPCTRL_PAR_BRIDGE_SHIFT; - shift += pdata->data_lane_shift * 2; break; case CCDC_INPUT_CSI2A: @@ -321,8 +319,6 @@ void omap3isp_configure_bridge(struct isp_device *isp, return; } - ispctrl_val |= ((shift/2) << ISPCTRL_SHIFT_SHIFT) & ISPCTRL_SHIFT_MASK; - ispctrl_val &= ~ISPCTRL_SYNC_DETECT_MASK; ispctrl_val |= ISPCTRL_SYNC_DETECT_VSRISE; @@ -662,8 +658,6 @@ int omap3isp_pipeline_pm_use(struct media_entity *entity, int use) /* Apply power change to connected non-nodes. */ ret = isp_pipeline_pm_power(entity, change); - if (ret < 0) - entity->use_count -= change; mutex_unlock(&entity->parent->graph_mutex); @@ -878,9 +872,6 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe) } } - if (failure < 0) - isp->needs_reset = true; - return failure; } @@ -893,8 +884,7 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe) * single-shot or continuous mode. * * Return 0 if successful, or the return value of the failed video::s_stream - * operation otherwise. The pipeline state is not updated when the operation - * fails, except when stopping the pipeline. + * operation otherwise. */ int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe, enum isp_pipeline_stream_state state) @@ -905,9 +895,7 @@ int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe, ret = isp_pipeline_disable(pipe); else ret = isp_pipeline_enable(pipe, state); - - if (ret == 0 || state == ISP_PIPELINE_STREAM_STOPPED) - pipe->stream_state = state; + pipe->stream_state = state; return ret; } @@ -1493,10 +1481,6 @@ void omap3isp_put(struct isp_device *isp) if (--isp->ref_count == 0) { isp_disable_interrupts(isp); isp_save_ctx(isp); - if (isp->needs_reset) { - isp_reset(isp); - isp->needs_reset = false; - } isp_disable_clocks(isp); } mutex_unlock(&isp->isp_mutex); diff --git a/trunk/drivers/media/video/omap3isp/isp.h b/trunk/drivers/media/video/omap3isp/isp.h index 2620c405f5e4..cf5214e95a92 100644 --- a/trunk/drivers/media/video/omap3isp/isp.h +++ b/trunk/drivers/media/video/omap3isp/isp.h @@ -132,6 +132,7 @@ struct isp_reg { /** * struct isp_parallel_platform_data - Parallel interface platform data + * @width: Parallel bus width in bits (8, 10, 11 or 12) * @data_lane_shift: Data lane shifter * 0 - CAMEXT[13:0] -> CAM[13:0] * 1 - CAMEXT[13:2] -> CAM[11:0] @@ -145,6 +146,7 @@ struct isp_reg { * ISPCTRL_PAR_BRIDGE_BENDIAN - Big endian */ struct isp_parallel_platform_data { + unsigned int width; unsigned int data_lane_shift:2; unsigned int clk_pol:1; unsigned int bridge:4; @@ -260,7 +262,6 @@ struct isp_device { /* ISP Obj */ spinlock_t stat_lock; /* common lock for statistic drivers */ struct mutex isp_mutex; /* For handling ref_count field */ - bool needs_reset; int has_context; int ref_count; unsigned int autoidle; @@ -310,12 +311,11 @@ int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe, enum isp_pipeline_stream_state state); void omap3isp_configure_bridge(struct isp_device *isp, enum ccdc_input_entity input, - const struct isp_parallel_platform_data *pdata, - unsigned int shift); + const struct isp_parallel_platform_data *pdata); -#define ISP_XCLK_NONE 0 -#define ISP_XCLK_A 1 -#define ISP_XCLK_B 2 +#define ISP_XCLK_NONE -1 +#define ISP_XCLK_A 0 +#define ISP_XCLK_B 1 struct isp_device *omap3isp_get(struct isp_device *isp); void omap3isp_put(struct isp_device *isp); diff --git a/trunk/drivers/media/video/omap3isp/ispccdc.c b/trunk/drivers/media/video/omap3isp/ispccdc.c index 39d501bda636..5ff9d14ce710 100644 --- a/trunk/drivers/media/video/omap3isp/ispccdc.c +++ b/trunk/drivers/media/video/omap3isp/ispccdc.c @@ -43,12 +43,6 @@ __ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh, static const unsigned int ccdc_fmts[] = { V4L2_MBUS_FMT_Y8_1X8, - V4L2_MBUS_FMT_Y10_1X10, - V4L2_MBUS_FMT_Y12_1X12, - V4L2_MBUS_FMT_SGRBG8_1X8, - V4L2_MBUS_FMT_SRGGB8_1X8, - V4L2_MBUS_FMT_SBGGR8_1X8, - V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10, @@ -1116,38 +1110,21 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc) struct isp_parallel_platform_data *pdata = NULL; struct v4l2_subdev *sensor; struct v4l2_mbus_framefmt *format; - const struct isp_format_info *fmt_info; - struct v4l2_subdev_format fmt_src; - unsigned int depth_out; - unsigned int depth_in = 0; struct media_pad *pad; unsigned long flags; - unsigned int shift; u32 syn_mode; u32 ccdc_pattern; - pad = media_entity_remote_source(&ccdc->pads[CCDC_PAD_SINK]); - sensor = media_entity_to_v4l2_subdev(pad->entity); - if (ccdc->input == CCDC_INPUT_PARALLEL) + if (ccdc->input == CCDC_INPUT_PARALLEL) { + pad = media_entity_remote_source(&ccdc->pads[CCDC_PAD_SINK]); + sensor = media_entity_to_v4l2_subdev(pad->entity); pdata = &((struct isp_v4l2_subdevs_group *)sensor->host_priv) ->bus.parallel; - - /* Compute shift value for lane shifter to configure the bridge. */ - fmt_src.pad = pad->index; - fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE; - if (!v4l2_subdev_call(sensor, pad, get_fmt, NULL, &fmt_src)) { - fmt_info = omap3isp_video_format_info(fmt_src.format.code); - depth_in = fmt_info->bpp; } - fmt_info = omap3isp_video_format_info - (isp->isp_ccdc.formats[CCDC_PAD_SINK].code); - depth_out = fmt_info->bpp; - - shift = depth_in - depth_out; - omap3isp_configure_bridge(isp, ccdc->input, pdata, shift); + omap3isp_configure_bridge(isp, ccdc->input, pdata); - ccdc->syncif.datsz = depth_out; + ccdc->syncif.datsz = pdata ? pdata->width : 10; ccdc_config_sync_if(ccdc, &ccdc->syncif); /* CCDC_PAD_SINK */ @@ -1361,7 +1338,7 @@ static int ccdc_sbl_wait_idle(struct isp_ccdc_device *ccdc, * @ccdc: Pointer to ISP CCDC device. * @event: Pointing which event trigger handler * - * Return 1 when the event and stopping request combination is satisfied, + * Return 1 when the event and stopping request combination is satisfyied, * zero otherwise. */ static int __ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event) @@ -1641,7 +1618,7 @@ static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer) ccdc_set_outaddr(ccdc, buffer->isp_addr); - /* We now have a buffer queued on the output, restart the pipeline + /* We now have a buffer queued on the output, restart the pipeline in * on the next CCDC interrupt if running in continuous mode (or when * starting the stream). */ diff --git a/trunk/drivers/media/video/omap3isp/isppreview.c b/trunk/drivers/media/video/omap3isp/isppreview.c index aba537af87e4..2b16988a501d 100644 --- a/trunk/drivers/media/video/omap3isp/isppreview.c +++ b/trunk/drivers/media/video/omap3isp/isppreview.c @@ -755,7 +755,7 @@ static struct preview_update update_attrs[] = { * @configs - pointer to update config structure. * @config - return pointer to appropriate structure field. * @bit - for which feature to return pointers. - * Return size of corresponding prev_params member + * Return size of coresponding prev_params member */ static u32 __preview_get_ptrs(struct prev_params *params, void **param, diff --git a/trunk/drivers/media/video/omap3isp/ispqueue.c b/trunk/drivers/media/video/omap3isp/ispqueue.c index 9c317148205f..8fddc5806b0d 100644 --- a/trunk/drivers/media/video/omap3isp/ispqueue.c +++ b/trunk/drivers/media/video/omap3isp/ispqueue.c @@ -339,7 +339,7 @@ static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf) up_read(¤t->mm->mmap_sem); if (ret != buf->npages) { - buf->npages = ret < 0 ? 0 : ret; + buf->npages = ret; isp_video_buffer_cleanup(buf); return -EFAULT; } @@ -408,8 +408,8 @@ static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf) * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address * * This function locates the VMAs for the buffer's userspace address and checks - * that their flags match. The only flag that we need to care for at the moment - * is VM_PFNMAP. + * that their flags match. The onlflag that we need to care for at the moment is + * VM_PFNMAP. * * The buffer vm_flags field is set to the first VMA flags. * diff --git a/trunk/drivers/media/video/omap3isp/ispresizer.c b/trunk/drivers/media/video/omap3isp/ispresizer.c index 0bb0f8cd36f5..653f88ba56db 100644 --- a/trunk/drivers/media/video/omap3isp/ispresizer.c +++ b/trunk/drivers/media/video/omap3isp/ispresizer.c @@ -714,50 +714,19 @@ static void resizer_print_status(struct isp_res_device *res) * iw and ih are the input width and height after cropping. Those equations need * to be satisfied exactly for the resizer to work correctly. * - * The equations can't be easily reverted, as the >> 8 operation is not linear. - * In addition, not all input sizes can be achieved for a given output size. To - * get the highest input size lower than or equal to the requested input size, - * we need to compute the highest resizing ratio that satisfies the following - * inequality (taking the 4-tap mode width equation as an example) - * - * iw >= (32 * sph + (ow - 1) * hrsz + 16) >> 8 - 7 - * - * (where iw is the requested input width) which can be rewritten as - * - * iw - 7 >= (32 * sph + (ow - 1) * hrsz + 16) >> 8 - * (iw - 7) << 8 >= 32 * sph + (ow - 1) * hrsz + 16 - b - * ((iw - 7) << 8) + b >= 32 * sph + (ow - 1) * hrsz + 16 - * - * where b is the value of the 8 least significant bits of the right hand side - * expression of the last inequality. The highest resizing ratio value will be - * achieved when b is equal to its maximum value of 255. That resizing ratio - * value will still satisfy the original inequality, as b will disappear when - * the expression will be shifted right by 8. - * - * The reverted the equations thus become + * Reverting the equations, we can compute the resizing ratios with * * - 8-phase, 4-tap mode - * hrsz = ((iw - 7) * 256 + 255 - 16 - 32 * sph) / (ow - 1) - * vrsz = ((ih - 4) * 256 + 255 - 16 - 32 * spv) / (oh - 1) + * hrsz = ((iw - 7) * 256 - 16 - 32 * sph) / (ow - 1) + * vrsz = ((ih - 4) * 256 - 16 - 32 * spv) / (oh - 1) * - 4-phase, 7-tap mode - * hrsz = ((iw - 7) * 256 + 255 - 32 - 64 * sph) / (ow - 1) - * vrsz = ((ih - 7) * 256 + 255 - 32 - 64 * spv) / (oh - 1) + * hrsz = ((iw - 7) * 256 - 32 - 64 * sph) / (ow - 1) + * vrsz = ((ih - 7) * 256 - 32 - 64 * spv) / (oh - 1) * - * The ratios are integer values, and are rounded down to ensure that the - * cropped input size is not bigger than the uncropped input size. - * - * As the number of phases/taps, used to select the correct equations to compute - * the ratio, depends on the ratio, we start with the 4-tap mode equations to - * compute an approximation of the ratio, and switch to the 7-tap mode equations - * if the approximation is higher than the ratio threshold. - * - * As the 7-tap mode equations will return a ratio smaller than or equal to the - * 4-tap mode equations, the resulting ratio could become lower than or equal to - * the ratio threshold. This 'equations loop' isn't an issue as long as the - * correct equations are used to compute the final input size. Starting with the - * 4-tap mode equations ensure that, in case of values resulting in a 'ratio - * loop', the smallest of the ratio values will be used, never exceeding the - * requested input size. + * The ratios are integer values, and must be rounded down to ensure that the + * cropped input size is not bigger than the uncropped input size. As the ratio + * in 7-tap mode is always smaller than the ratio in 4-tap mode, we can use the + * 7-tap mode equations to compute a ratio approximation. * * We first clamp the output size according to the hardware capabilitie to avoid * auto-cropping the input more than required to satisfy the TRM equations. The @@ -806,8 +775,6 @@ static void resizer_calc_ratios(struct isp_res_device *res, unsigned int max_width; unsigned int max_height; unsigned int width_alignment; - unsigned int width; - unsigned int height; /* * Clamp the output height based on the hardware capabilities and @@ -819,22 +786,19 @@ static void resizer_calc_ratios(struct isp_res_device *res, max_height = min_t(unsigned int, max_height, MAX_OUT_HEIGHT); output->height = clamp(output->height, min_height, max_height); - ratio->vert = ((input->height - 4) * 256 + 255 - 16 - 32 * spv) + ratio->vert = ((input->height - 7) * 256 - 32 - 64 * spv) / (output->height - 1); - if (ratio->vert > MID_RESIZE_VALUE) - ratio->vert = ((input->height - 7) * 256 + 255 - 32 - 64 * spv) - / (output->height - 1); ratio->vert = clamp_t(unsigned int, ratio->vert, MIN_RESIZE_VALUE, MAX_RESIZE_VALUE); if (ratio->vert <= MID_RESIZE_VALUE) { upscaled_height = (output->height - 1) * ratio->vert + 32 * spv + 16; - height = (upscaled_height >> 8) + 4; + input->height = (upscaled_height >> 8) + 4; } else { upscaled_height = (output->height - 1) * ratio->vert + 64 * spv + 32; - height = (upscaled_height >> 8) + 7; + input->height = (upscaled_height >> 8) + 7; } /* @@ -890,29 +854,20 @@ static void resizer_calc_ratios(struct isp_res_device *res, max_width & ~(width_alignment - 1)); output->width = ALIGN(output->width, width_alignment); - ratio->horz = ((input->width - 7) * 256 + 255 - 16 - 32 * sph) + ratio->horz = ((input->width - 7) * 256 - 32 - 64 * sph) / (output->width - 1); - if (ratio->horz > MID_RESIZE_VALUE) - ratio->horz = ((input->width - 7) * 256 + 255 - 32 - 64 * sph) - / (output->width - 1); ratio->horz = clamp_t(unsigned int, ratio->horz, MIN_RESIZE_VALUE, MAX_RESIZE_VALUE); if (ratio->horz <= MID_RESIZE_VALUE) { upscaled_width = (output->width - 1) * ratio->horz + 32 * sph + 16; - width = (upscaled_width >> 8) + 7; + input->width = (upscaled_width >> 8) + 7; } else { upscaled_width = (output->width - 1) * ratio->horz + 64 * sph + 32; - width = (upscaled_width >> 8) + 7; + input->width = (upscaled_width >> 8) + 7; } - - /* Center the new crop rectangle. */ - input->left += (input->width - width) / 2; - input->top += (input->height - height) / 2; - input->width = width; - input->height = height; } /* diff --git a/trunk/drivers/media/video/omap3isp/ispstat.h b/trunk/drivers/media/video/omap3isp/ispstat.h index d86da94fa50d..820950c9ef46 100644 --- a/trunk/drivers/media/video/omap3isp/ispstat.h +++ b/trunk/drivers/media/video/omap3isp/ispstat.h @@ -131,9 +131,9 @@ struct ispstat { struct ispstat_generic_config { /* * Fields must be in the same order as in: - * - omap3isp_h3a_aewb_config - * - omap3isp_h3a_af_config - * - omap3isp_hist_config + * - isph3a_aewb_config + * - isph3a_af_config + * - isphist_config */ u32 buf_size; u16 config_counter; diff --git a/trunk/drivers/media/video/omap3isp/ispvideo.c b/trunk/drivers/media/video/omap3isp/ispvideo.c index 9cd8f1aa567b..208a7ec739d7 100644 --- a/trunk/drivers/media/video/omap3isp/ispvideo.c +++ b/trunk/drivers/media/video/omap3isp/ispvideo.c @@ -47,59 +47,29 @@ static struct isp_format_info formats[] = { { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, - V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, - V4L2_PIX_FMT_GREY, 8, }, - { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10, - V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8, - V4L2_PIX_FMT_Y10, 10, }, - { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10, - V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8, - V4L2_PIX_FMT_Y12, 12, }, - { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, - V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, - V4L2_PIX_FMT_SBGGR8, 8, }, - { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, - V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, - V4L2_PIX_FMT_SGBRG8, 8, }, - { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, - V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, - V4L2_PIX_FMT_SGRBG8, 8, }, - { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, - V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, - V4L2_PIX_FMT_SRGGB8, 8, }, + V4L2_MBUS_FMT_Y8_1X8, V4L2_PIX_FMT_GREY, 8, }, { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, - V4L2_MBUS_FMT_SGRBG10_1X10, 0, - V4L2_PIX_FMT_SGRBG10DPCM8, 8, }, + V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10DPCM8, 8, }, { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10, - V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8, - V4L2_PIX_FMT_SBGGR10, 10, }, + V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10, 10, }, { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10, - V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8, - V4L2_PIX_FMT_SGBRG10, 10, }, + V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10, 10, }, { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10, - V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8, - V4L2_PIX_FMT_SGRBG10, 10, }, + V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10, 10, }, { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10, - V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8, - V4L2_PIX_FMT_SRGGB10, 10, }, + V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10, 10, }, { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10, - V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8, - V4L2_PIX_FMT_SBGGR12, 12, }, + V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12, 12, }, { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10, - V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8, - V4L2_PIX_FMT_SGBRG12, 12, }, + V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12, 12, }, { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10, - V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8, - V4L2_PIX_FMT_SGRBG12, 12, }, + V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12, 12, }, { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10, - V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8, - V4L2_PIX_FMT_SRGGB12, 12, }, + V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12, 12, }, { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16, - V4L2_MBUS_FMT_UYVY8_1X16, 0, - V4L2_PIX_FMT_UYVY, 16, }, + V4L2_MBUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_UYVY, 16, }, { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16, - V4L2_MBUS_FMT_YUYV8_1X16, 0, - V4L2_PIX_FMT_YUYV, 16, }, + V4L2_MBUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_YUYV, 16, }, }; const struct isp_format_info * @@ -115,37 +85,6 @@ omap3isp_video_format_info(enum v4l2_mbus_pixelcode code) return NULL; } -/* - * Decide whether desired output pixel code can be obtained with - * the lane shifter by shifting the input pixel code. - * @in: input pixelcode to shifter - * @out: output pixelcode from shifter - * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0] - * - * return true if the combination is possible - * return false otherwise - */ -static bool isp_video_is_shiftable(enum v4l2_mbus_pixelcode in, - enum v4l2_mbus_pixelcode out, - unsigned int additional_shift) -{ - const struct isp_format_info *in_info, *out_info; - - if (in == out) - return true; - - in_info = omap3isp_video_format_info(in); - out_info = omap3isp_video_format_info(out); - - if ((in_info->flavor == 0) || (out_info->flavor == 0)) - return false; - - if (in_info->flavor != out_info->flavor) - return false; - - return in_info->bpp - out_info->bpp + additional_shift <= 6; -} - /* * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format * @video: ISP video instance @@ -296,7 +235,6 @@ static int isp_video_validate_pipeline(struct isp_pipeline *pipe) return -EPIPE; while (1) { - unsigned int shifter_link; /* Retrieve the sink format */ pad = &subdev->entity.pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) @@ -325,10 +263,6 @@ static int isp_video_validate_pipeline(struct isp_pipeline *pipe) return -ENOSPC; } - /* If sink pad is on CCDC, the link has the lane shifter - * in the middle of it. */ - shifter_link = subdev == &isp->isp_ccdc.subdev; - /* Retrieve the source format */ pad = media_entity_remote_source(pad); if (pad == NULL || @@ -344,24 +278,10 @@ static int isp_video_validate_pipeline(struct isp_pipeline *pipe) return -EPIPE; /* Check if the two ends match */ - if (fmt_source.format.width != fmt_sink.format.width || + if (fmt_source.format.code != fmt_sink.format.code || + fmt_source.format.width != fmt_sink.format.width || fmt_source.format.height != fmt_sink.format.height) return -EPIPE; - - if (shifter_link) { - unsigned int parallel_shift = 0; - if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) { - struct isp_parallel_platform_data *pdata = - &((struct isp_v4l2_subdevs_group *) - subdev->host_priv)->bus.parallel; - parallel_shift = pdata->data_lane_shift * 2; - } - if (!isp_video_is_shiftable(fmt_source.format.code, - fmt_sink.format.code, - parallel_shift)) - return -EPIPE; - } else if (fmt_source.format.code != fmt_sink.format.code) - return -EPIPE; } return 0; diff --git a/trunk/drivers/media/video/omap3isp/ispvideo.h b/trunk/drivers/media/video/omap3isp/ispvideo.h index 911bea64e78a..524a1acd0906 100644 --- a/trunk/drivers/media/video/omap3isp/ispvideo.h +++ b/trunk/drivers/media/video/omap3isp/ispvideo.h @@ -49,8 +49,6 @@ struct v4l2_pix_format; * bits. Identical to @code if the format is 10 bits wide or less. * @uncompressed: V4L2 media bus format code for the corresponding uncompressed * format. Identical to @code if the format is not DPCM compressed. - * @flavor: V4L2 media bus format code for the same pixel layout but - * shifted to be 8 bits per pixel. =0 if format is not shiftable. * @pixelformat: V4L2 pixel format FCC identifier * @bpp: Bits per pixel */ @@ -58,7 +56,6 @@ struct isp_format_info { enum v4l2_mbus_pixelcode code; enum v4l2_mbus_pixelcode truncated; enum v4l2_mbus_pixelcode uncompressed; - enum v4l2_mbus_pixelcode flavor; u32 pixelformat; unsigned int bpp; }; diff --git a/trunk/drivers/media/video/s5p-fimc/fimc-capture.c b/trunk/drivers/media/video/s5p-fimc/fimc-capture.c index d142b40ea64e..95f8b4e11e46 100644 --- a/trunk/drivers/media/video/s5p-fimc/fimc-capture.c +++ b/trunk/drivers/media/video/s5p-fimc/fimc-capture.c @@ -527,7 +527,7 @@ static int fimc_cap_s_fmt_mplane(struct file *file, void *priv, if (ret) return ret; - if (vb2_is_busy(&fimc->vid_cap.vbq) || fimc_capture_active(fimc)) + if (vb2_is_streaming(&fimc->vid_cap.vbq) || fimc_capture_active(fimc)) return -EBUSY; frame = &ctx->d_frame; @@ -539,10 +539,8 @@ static int fimc_cap_s_fmt_mplane(struct file *file, void *priv, return -EINVAL; } - for (i = 0; i < frame->fmt->colplanes; i++) { - frame->payload[i] = - (pix->width * pix->height * frame->fmt->depth[i]) >> 3; - } + for (i = 0; i < frame->fmt->colplanes; i++) + frame->payload[i] = pix->plane_fmt[i].bytesperline * pix->height; /* Output DMA frame pixel size and offsets. */ frame->f_width = pix->plane_fmt[0].bytesperline * 8 diff --git a/trunk/drivers/media/video/s5p-fimc/fimc-core.c b/trunk/drivers/media/video/s5p-fimc/fimc-core.c index dc91a8511af6..6c919b38a3d8 100644 --- a/trunk/drivers/media/video/s5p-fimc/fimc-core.c +++ b/trunk/drivers/media/video/s5p-fimc/fimc-core.c @@ -361,20 +361,10 @@ static void fimc_capture_irq_handler(struct fimc_dev *fimc) { struct fimc_vid_cap *cap = &fimc->vid_cap; struct fimc_vid_buffer *v_buf; - struct timeval *tv; - struct timespec ts; if (!list_empty(&cap->active_buf_q) && test_bit(ST_CAPT_RUN, &fimc->state)) { - ktime_get_real_ts(&ts); - v_buf = active_queue_pop(cap); - - tv = &v_buf->vb.v4l2_buf.timestamp; - tv->tv_sec = ts.tv_sec; - tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC; - v_buf->vb.v4l2_buf.sequence = cap->frame_count++; - vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE); } @@ -768,7 +758,7 @@ static void fimc_unlock(struct vb2_queue *vq) mutex_unlock(&ctx->fimc_dev->lock); } -static struct vb2_ops fimc_qops = { +struct vb2_ops fimc_qops = { .queue_setup = fimc_queue_setup, .buf_prepare = fimc_buf_prepare, .buf_queue = fimc_buf_queue, @@ -937,23 +927,23 @@ int fimc_vidioc_try_fmt_mplane(struct file *file, void *priv, pix->num_planes = fmt->memplanes; pix->colorspace = V4L2_COLORSPACE_JPEG; - for (i = 0; i < pix->num_planes; ++i) { - u32 bpl = pix->plane_fmt[i].bytesperline; - u32 *sizeimage = &pix->plane_fmt[i].sizeimage; + int bpl = pix->plane_fmt[i].bytesperline; + + dbg("[%d] bpl: %d, depth: %d, w: %d, h: %d", + i, bpl, fmt->depth[i], pix->width, pix->height); - if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width)) - bpl = pix->width; /* Planar */ + if (!bpl || (bpl * 8 / fmt->depth[i]) > pix->width) + bpl = (pix->width * fmt->depth[0]) >> 3; - if (fmt->colplanes == 1 && /* Packed */ - (bpl == 0 || ((bpl * 8) / fmt->depth[i]) < pix->width)) - bpl = (pix->width * fmt->depth[0]) / 8; + if (!pix->plane_fmt[i].sizeimage) + pix->plane_fmt[i].sizeimage = pix->height * bpl; - if (i == 0) /* Same bytesperline for each plane. */ - mod_x = bpl; + pix->plane_fmt[i].bytesperline = bpl; - pix->plane_fmt[i].bytesperline = mod_x; - *sizeimage = (pix->width * pix->height * fmt->depth[i]) / 8; + dbg("[%d]: bpl: %d, sizeimage: %d", + i, pix->plane_fmt[i].bytesperline, + pix->plane_fmt[i].sizeimage); } return 0; @@ -975,7 +965,7 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *priv, vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); - if (vb2_is_busy(vq)) { + if (vb2_is_streaming(vq)) { v4l2_err(&fimc->m2m.v4l2_dev, "queue (%d) busy\n", f->type); return -EBUSY; } @@ -995,10 +985,8 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *priv, if (!frame->fmt) return -EINVAL; - for (i = 0; i < frame->fmt->colplanes; i++) { - frame->payload[i] = - (pix->width * pix->height * frame->fmt->depth[i]) / 8; - } + for (i = 0; i < frame->fmt->colplanes; i++) + frame->payload[i] = pix->plane_fmt[i].bytesperline * pix->height; frame->f_width = pix->plane_fmt[0].bytesperline * 8 / frame->fmt->depth[0]; @@ -1762,7 +1750,7 @@ static int __devexit fimc_remove(struct platform_device *pdev) } /* Image pixel limits, similar across several FIMC HW revisions. */ -static struct fimc_pix_limit s5p_pix_limit[4] = { +static struct fimc_pix_limit s5p_pix_limit[3] = { [0] = { .scaler_en_w = 3264, .scaler_dis_w = 8192, @@ -1787,14 +1775,6 @@ static struct fimc_pix_limit s5p_pix_limit[4] = { .out_rot_en_w = 1280, .out_rot_dis_w = 1920, }, - [3] = { - .scaler_en_w = 1920, - .scaler_dis_w = 8192, - .in_rot_en_h = 1366, - .in_rot_dis_w = 8192, - .out_rot_en_w = 1366, - .out_rot_dis_w = 1920, - }, }; static struct samsung_fimc_variant fimc0_variant_s5p = { @@ -1847,7 +1827,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv210 = { .pix_limit = &s5p_pix_limit[2], }; -static struct samsung_fimc_variant fimc0_variant_exynos4 = { +static struct samsung_fimc_variant fimc0_variant_s5pv310 = { .pix_hoff = 1, .has_inp_rot = 1, .has_out_rot = 1, @@ -1860,7 +1840,7 @@ static struct samsung_fimc_variant fimc0_variant_exynos4 = { .pix_limit = &s5p_pix_limit[1], }; -static struct samsung_fimc_variant fimc2_variant_exynos4 = { +static struct samsung_fimc_variant fimc2_variant_s5pv310 = { .pix_hoff = 1, .has_cistatus2 = 1, .has_mainscaler_ext = 1, @@ -1868,7 +1848,7 @@ static struct samsung_fimc_variant fimc2_variant_exynos4 = { .min_out_pixsize = 16, .hor_offs_align = 1, .out_buf_count = 32, - .pix_limit = &s5p_pix_limit[3], + .pix_limit = &s5p_pix_limit[2], }; /* S5PC100 */ @@ -1894,12 +1874,12 @@ static struct samsung_fimc_driverdata fimc_drvdata_s5pv210 = { }; /* S5PV310, S5PC210 */ -static struct samsung_fimc_driverdata fimc_drvdata_exynos4 = { +static struct samsung_fimc_driverdata fimc_drvdata_s5pv310 = { .variant = { - [0] = &fimc0_variant_exynos4, - [1] = &fimc0_variant_exynos4, - [2] = &fimc0_variant_exynos4, - [3] = &fimc2_variant_exynos4, + [0] = &fimc0_variant_s5pv310, + [1] = &fimc0_variant_s5pv310, + [2] = &fimc0_variant_s5pv310, + [3] = &fimc2_variant_s5pv310, }, .num_entities = 4, .lclk_frequency = 166000000UL, @@ -1913,8 +1893,8 @@ static struct platform_device_id fimc_driver_ids[] = { .name = "s5pv210-fimc", .driver_data = (unsigned long)&fimc_drvdata_s5pv210, }, { - .name = "exynos4-fimc", - .driver_data = (unsigned long)&fimc_drvdata_exynos4, + .name = "s5pv310-fimc", + .driver_data = (unsigned long)&fimc_drvdata_s5pv310, }, {}, }; diff --git a/trunk/drivers/media/video/sh_mobile_ceu_camera.c b/trunk/drivers/media/video/sh_mobile_ceu_camera.c index 134e86bf6d97..3fe54bf41142 100644 --- a/trunk/drivers/media/video/sh_mobile_ceu_camera.c +++ b/trunk/drivers/media/video/sh_mobile_ceu_camera.c @@ -922,7 +922,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int /* Try 2560x1920, 1280x960, 640x480, 320x240 */ mf.width = 2560 >> shift; mf.height = 1920 >> shift; - ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, + ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, s_mbus_fmt, &mf); if (ret < 0) return ret; @@ -1224,7 +1224,7 @@ static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_cropcap cap; int ret; - ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, + ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, s_mbus_fmt, mf); if (ret < 0) return ret; @@ -1254,7 +1254,7 @@ static int client_s_fmt(struct soc_camera_device *icd, tmp_h = min(2 * tmp_h, max_height); mf->width = tmp_w; mf->height = tmp_h; - ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, + ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, s_mbus_fmt, mf); dev_geo(dev, "Camera scaled to %ux%u\n", mf->width, mf->height); @@ -1658,7 +1658,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, mf.code = xlate->code; mf.colorspace = pix->colorspace; - ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf); + ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, try_mbus_fmt, &mf); if (ret < 0) return ret; @@ -1682,7 +1682,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, */ mf.width = 2560; mf.height = 1920; - ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, + ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, try_mbus_fmt, &mf); if (ret < 0) { /* Shouldn't actually happen... */ diff --git a/trunk/drivers/media/video/sh_mobile_csi2.c b/trunk/drivers/media/video/sh_mobile_csi2.c index 98b87481fa94..dd1b81b1442b 100644 --- a/trunk/drivers/media/video/sh_mobile_csi2.c +++ b/trunk/drivers/media/video/sh_mobile_csi2.c @@ -38,8 +38,6 @@ struct sh_csi2 { void __iomem *base; struct platform_device *pdev; struct sh_csi2_client_config *client; - unsigned long (*query_bus_param)(struct soc_camera_device *); - int (*set_bus_param)(struct soc_camera_device *, unsigned long); }; static int sh_csi2_try_fmt(struct v4l2_subdev *sd, @@ -210,7 +208,6 @@ static int sh_csi2_notify(struct notifier_block *nb, case BUS_NOTIFY_BOUND_DRIVER: snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s%s", dev_name(v4l2_dev->dev), ".mipi-csi"); - priv->subdev.grp_id = (long)icd; ret = v4l2_device_register_subdev(v4l2_dev, &priv->subdev); dev_dbg(dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret); if (ret < 0) @@ -218,8 +215,6 @@ static int sh_csi2_notify(struct notifier_block *nb, priv->client = pdata->clients + i; - priv->set_bus_param = icd->ops->set_bus_param; - priv->query_bus_param = icd->ops->query_bus_param; icd->ops->set_bus_param = sh_csi2_set_bus_param; icd->ops->query_bus_param = sh_csi2_query_bus_param; @@ -231,10 +226,8 @@ static int sh_csi2_notify(struct notifier_block *nb, priv->client = NULL; /* Driver is about to be unbound */ - icd->ops->set_bus_param = priv->set_bus_param; - icd->ops->query_bus_param = priv->query_bus_param; - priv->set_bus_param = NULL; - priv->query_bus_param = NULL; + icd->ops->set_bus_param = NULL; + icd->ops->query_bus_param = NULL; v4l2_device_unregister_subdev(&priv->subdev); diff --git a/trunk/drivers/media/video/soc_camera.c b/trunk/drivers/media/video/soc_camera.c index ddb4c091dedc..46284489e4eb 100644 --- a/trunk/drivers/media/video/soc_camera.c +++ b/trunk/drivers/media/video/soc_camera.c @@ -136,50 +136,11 @@ unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl, } EXPORT_SYMBOL(soc_camera_apply_sensor_flags); -#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ - ((x) >> 24) & 0xff - -static int soc_camera_try_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - struct v4l2_pix_format *pix = &f->fmt.pix; - int ret; - - dev_dbg(&icd->dev, "TRY_FMT(%c%c%c%c, %ux%u)\n", - pixfmtstr(pix->pixelformat), pix->width, pix->height); - - pix->bytesperline = 0; - pix->sizeimage = 0; - - ret = ici->ops->try_fmt(icd, f); - if (ret < 0) - return ret; - - if (!pix->sizeimage) { - if (!pix->bytesperline) { - const struct soc_camera_format_xlate *xlate; - - xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); - if (!xlate) - return -EINVAL; - - ret = soc_mbus_bytes_per_line(pix->width, - xlate->host_fmt); - if (ret > 0) - pix->bytesperline = ret; - } - if (pix->bytesperline) - pix->sizeimage = pix->bytesperline * pix->height; - } - - return 0; -} - static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct soc_camera_device *icd = file->private_data; + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); WARN_ON(priv != file->private_data); @@ -188,7 +149,7 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, return -EINVAL; /* limit format to hardware capabilities */ - return soc_camera_try_fmt(icd, f); + return ici->ops->try_fmt(icd, f); } static int soc_camera_enum_input(struct file *file, void *priv, @@ -401,6 +362,9 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd) icd->user_formats = NULL; } +#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ + ((x) >> 24) & 0xff + /* Called with .vb_lock held, or from the first open(2), see comment there */ static int soc_camera_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) @@ -413,7 +377,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd, pixfmtstr(pix->pixelformat), pix->width, pix->height); /* We always call try_fmt() before set_fmt() or set_crop() */ - ret = soc_camera_try_fmt(icd, f); + ret = ici->ops->try_fmt(icd, f); if (ret < 0) return ret; @@ -1032,11 +996,10 @@ static void soc_camera_free_i2c(struct soc_camera_device *icd) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); - struct i2c_adapter *adap = client->adapter; dev_set_drvdata(&icd->dev, NULL); v4l2_device_unregister_subdev(i2c_get_clientdata(client)); i2c_unregister_device(client); - i2c_put_adapter(adap); + i2c_put_adapter(client->adapter); } #else #define soc_camera_init_i2c(icd, icl) (-ENODEV) @@ -1108,9 +1071,6 @@ static int soc_camera_probe(struct device *dev) } } - sd = soc_camera_to_subdev(icd); - sd->grp_id = (long)icd; - /* At this point client .probe() should have run already */ ret = soc_camera_init_user_formats(icd); if (ret < 0) @@ -1132,6 +1092,7 @@ static int soc_camera_probe(struct device *dev) goto evidstart; /* Try to improve our guess of a reasonable window format */ + sd = soc_camera_to_subdev(icd); if (!v4l2_subdev_call(sd, video, g_mbus_fmt, &mf)) { icd->user_width = mf.width; icd->user_height = mf.height; diff --git a/trunk/drivers/media/video/tda9840.c b/trunk/drivers/media/video/tda9840.c index 22fa8202d5ca..5d4cf3b3d435 100644 --- a/trunk/drivers/media/video/tda9840.c +++ b/trunk/drivers/media/video/tda9840.c @@ -171,7 +171,7 @@ static int tda9840_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); + sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); if (sd == NULL) return -ENOMEM; v4l2_i2c_subdev_init(sd, client, &tda9840_ops); diff --git a/trunk/drivers/media/video/tea6415c.c b/trunk/drivers/media/video/tea6415c.c index 827425c5b866..19621ed523ec 100644 --- a/trunk/drivers/media/video/tea6415c.c +++ b/trunk/drivers/media/video/tea6415c.c @@ -152,7 +152,7 @@ static int tea6415c_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); + sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); if (sd == NULL) return -ENOMEM; v4l2_i2c_subdev_init(sd, client, &tea6415c_ops); diff --git a/trunk/drivers/media/video/tea6420.c b/trunk/drivers/media/video/tea6420.c index f350b6c24500..5ea840401f21 100644 --- a/trunk/drivers/media/video/tea6420.c +++ b/trunk/drivers/media/video/tea6420.c @@ -125,7 +125,7 @@ static int tea6420_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); + sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); if (sd == NULL) return -ENOMEM; v4l2_i2c_subdev_init(sd, client, &tea6420_ops); diff --git a/trunk/drivers/media/video/upd64031a.c b/trunk/drivers/media/video/upd64031a.c index 1aab96a88203..f8138c75be8b 100644 --- a/trunk/drivers/media/video/upd64031a.c +++ b/trunk/drivers/media/video/upd64031a.c @@ -230,7 +230,7 @@ static int upd64031a_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - state = kzalloc(sizeof(struct upd64031a_state), GFP_KERNEL); + state = kmalloc(sizeof(struct upd64031a_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; diff --git a/trunk/drivers/media/video/upd64083.c b/trunk/drivers/media/video/upd64083.c index 9bbe61700fd5..28e0e6b6ca84 100644 --- a/trunk/drivers/media/video/upd64083.c +++ b/trunk/drivers/media/video/upd64083.c @@ -202,7 +202,7 @@ static int upd64083_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - state = kzalloc(sizeof(struct upd64083_state), GFP_KERNEL); + state = kmalloc(sizeof(struct upd64083_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; diff --git a/trunk/drivers/media/video/v4l2-dev.c b/trunk/drivers/media/video/v4l2-dev.c index 6dc7196296b3..498e6742579e 100644 --- a/trunk/drivers/media/video/v4l2-dev.c +++ b/trunk/drivers/media/video/v4l2-dev.c @@ -389,8 +389,7 @@ static int v4l2_open(struct inode *inode, struct file *filp) video_get(vdev); mutex_unlock(&videodev_lock); #if defined(CONFIG_MEDIA_CONTROLLER) - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && - vdev->vfl_type != VFL_TYPE_SUBDEV) { + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) { entity = media_entity_get(&vdev->entity); if (!entity) { ret = -EBUSY; @@ -416,8 +415,7 @@ static int v4l2_open(struct inode *inode, struct file *filp) /* decrease the refcount in case of an error */ if (ret) { #if defined(CONFIG_MEDIA_CONTROLLER) - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && - vdev->vfl_type != VFL_TYPE_SUBDEV) + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) media_entity_put(entity); #endif video_put(vdev); @@ -439,8 +437,7 @@ static int v4l2_release(struct inode *inode, struct file *filp) mutex_unlock(vdev->lock); } #if defined(CONFIG_MEDIA_CONTROLLER) - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && - vdev->vfl_type != VFL_TYPE_SUBDEV) + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) media_entity_put(&vdev->entity); #endif /* decrease the refcount unconditionally since the release() @@ -689,8 +686,7 @@ int __video_register_device(struct video_device *vdev, int type, int nr, #if defined(CONFIG_MEDIA_CONTROLLER) /* Part 5: Register the entity. */ - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && - vdev->vfl_type != VFL_TYPE_SUBDEV) { + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) { vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L; vdev->entity.name = vdev->name; vdev->entity.v4l.major = VIDEO_MAJOR; @@ -737,8 +733,7 @@ void video_unregister_device(struct video_device *vdev) return; #if defined(CONFIG_MEDIA_CONTROLLER) - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && - vdev->vfl_type != VFL_TYPE_SUBDEV) + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) media_device_unregister_entity(&vdev->entity); #endif diff --git a/trunk/drivers/media/video/v4l2-device.c b/trunk/drivers/media/video/v4l2-device.c index 4aae501f02d0..5aeaf876ba9b 100644 --- a/trunk/drivers/media/video/v4l2-device.c +++ b/trunk/drivers/media/video/v4l2-device.c @@ -155,10 +155,8 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, sd->v4l2_dev = v4l2_dev; if (sd->internal_ops && sd->internal_ops->registered) { err = sd->internal_ops->registered(sd); - if (err) { - module_put(sd->owner); + if (err) return err; - } } /* This just returns 0 if either of the two args is NULL */ @@ -166,7 +164,6 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, if (err) { if (sd->internal_ops && sd->internal_ops->unregistered) sd->internal_ops->unregistered(sd); - module_put(sd->owner); return err; } diff --git a/trunk/drivers/media/video/v4l2-subdev.c b/trunk/drivers/media/video/v4l2-subdev.c index 812729ebf09e..0b8064490676 100644 --- a/trunk/drivers/media/video/v4l2-subdev.c +++ b/trunk/drivers/media/video/v4l2-subdev.c @@ -155,25 +155,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg) switch (cmd) { case VIDIOC_QUERYCTRL: - return v4l2_queryctrl(sd->ctrl_handler, arg); + return v4l2_subdev_queryctrl(sd, arg); case VIDIOC_QUERYMENU: - return v4l2_querymenu(sd->ctrl_handler, arg); + return v4l2_subdev_querymenu(sd, arg); case VIDIOC_G_CTRL: - return v4l2_g_ctrl(sd->ctrl_handler, arg); + return v4l2_subdev_g_ctrl(sd, arg); case VIDIOC_S_CTRL: - return v4l2_s_ctrl(sd->ctrl_handler, arg); + return v4l2_subdev_s_ctrl(sd, arg); case VIDIOC_G_EXT_CTRLS: - return v4l2_g_ext_ctrls(sd->ctrl_handler, arg); + return v4l2_subdev_g_ext_ctrls(sd, arg); case VIDIOC_S_EXT_CTRLS: - return v4l2_s_ext_ctrls(sd->ctrl_handler, arg); + return v4l2_subdev_s_ext_ctrls(sd, arg); case VIDIOC_TRY_EXT_CTRLS: - return v4l2_try_ext_ctrls(sd->ctrl_handler, arg); + return v4l2_subdev_try_ext_ctrls(sd, arg); case VIDIOC_DQEVENT: if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) diff --git a/trunk/drivers/media/video/videobuf2-core.c b/trunk/drivers/media/video/videobuf2-core.c index 6ba1461d51ef..6698c77e0f64 100644 --- a/trunk/drivers/media/video/videobuf2-core.c +++ b/trunk/drivers/media/video/videobuf2-core.c @@ -37,9 +37,6 @@ module_param(debug, int, 0644); #define call_qop(q, op, args...) \ (((q)->ops->op) ? ((q)->ops->op(args)) : 0) -#define V4L2_BUFFER_STATE_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \ - V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR) - /** * __vb2_buf_mem_alloc() - allocate video memory for the given buffer */ @@ -54,7 +51,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb, for (plane = 0; plane < vb->num_planes; ++plane) { mem_priv = call_memop(q, plane, alloc, q->alloc_ctx[plane], plane_sizes[plane]); - if (IS_ERR_OR_NULL(mem_priv)) + if (!mem_priv) goto free; /* Associate allocator private data with this plane */ @@ -287,7 +284,7 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) struct vb2_queue *q = vb->vb2_queue; int ret = 0; - /* Copy back data such as timestamp, flags, input, etc. */ + /* Copy back data such as timestamp, input, etc. */ memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m)); b->input = vb->v4l2_buf.input; b->reserved = vb->v4l2_buf.reserved; @@ -316,10 +313,7 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) b->m.userptr = vb->v4l2_planes[0].m.userptr; } - /* - * Clear any buffer state related flags. - */ - b->flags &= ~V4L2_BUFFER_STATE_FLAGS; + b->flags = 0; switch (vb->state) { case VB2_BUF_STATE_QUEUED: @@ -525,7 +519,6 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME); memset(plane_sizes, 0, sizeof(plane_sizes)); memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); - q->memory = req->memory; /* * Ask the driver how many buffers and planes per buffer it requires. @@ -567,6 +560,8 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) ret = num_buffers; } + q->memory = req->memory; + /* * Return the number of successfully allocated buffers * to the userspace. @@ -720,8 +715,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b, vb->v4l2_buf.field = b->field; vb->v4l2_buf.timestamp = b->timestamp; - vb->v4l2_buf.input = b->input; - vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS; return 0; } diff --git a/trunk/drivers/media/video/videobuf2-dma-contig.c b/trunk/drivers/media/video/videobuf2-dma-contig.c index a790a5f8c06f..58205d596138 100644 --- a/trunk/drivers/media/video/videobuf2-dma-contig.c +++ b/trunk/drivers/media/video/videobuf2-dma-contig.c @@ -46,7 +46,7 @@ static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size) GFP_KERNEL); if (!buf->vaddr) { dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n", - size); + buf->size); kfree(buf); return ERR_PTR(-ENOMEM); } diff --git a/trunk/drivers/message/i2o/i2o_block.c b/trunk/drivers/message/i2o/i2o_block.c index 4796bbf0ae4e..643ad52e3ca2 100644 --- a/trunk/drivers/message/i2o/i2o_block.c +++ b/trunk/drivers/message/i2o/i2o_block.c @@ -1000,6 +1000,7 @@ static struct i2o_block_device *i2o_block_device_alloc(void) gd->major = I2O_MAJOR; gd->queue = queue; gd->fops = &i2o_block_fops; + gd->events = DISK_EVENT_MEDIA_CHANGE; gd->private_data = dev; dev->gd = gd; diff --git a/trunk/drivers/mfd/asic3.c b/trunk/drivers/mfd/asic3.c index 0b4d5b23bec9..d4a851c6b5bf 100644 --- a/trunk/drivers/mfd/asic3.c +++ b/trunk/drivers/mfd/asic3.c @@ -144,7 +144,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) int iter, i; unsigned long flags; - data->chip->irq_ack(data); + data->chip->irq_ack(irq_data); for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { u32 status; diff --git a/trunk/drivers/mfd/omap-usb-host.c b/trunk/drivers/mfd/omap-usb-host.c index 3ab9ffa00aad..53450f433f10 100644 --- a/trunk/drivers/mfd/omap-usb-host.c +++ b/trunk/drivers/mfd/omap-usb-host.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #define USBHS_DRIVER_NAME "usbhs-omap" @@ -699,7 +700,8 @@ static int usbhs_enable(struct device *dev) dev_dbg(dev, "starting TI HSUSB Controller\n"); if (!pdata) { dev_dbg(dev, "missing platform_data\n"); - return -ENODEV; + ret = -ENODEV; + goto end_enable; } spin_lock_irqsave(&omap->lock, flags); @@ -717,14 +719,14 @@ static int usbhs_enable(struct device *dev) gpio_request(pdata->ehci_data->reset_gpio_port[0], "USB1 PHY reset"); gpio_direction_output - (pdata->ehci_data->reset_gpio_port[0], 0); + (pdata->ehci_data->reset_gpio_port[0], 1); } if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) { gpio_request(pdata->ehci_data->reset_gpio_port[1], "USB2 PHY reset"); gpio_direction_output - (pdata->ehci_data->reset_gpio_port[1], 0); + (pdata->ehci_data->reset_gpio_port[1], 1); } /* Hold the PHY in RESET for enough time till DIR is high */ @@ -904,17 +906,16 @@ static int usbhs_enable(struct device *dev) if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) gpio_set_value - (pdata->ehci_data->reset_gpio_port[0], 1); + (pdata->ehci_data->reset_gpio_port[0], 0); if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) gpio_set_value - (pdata->ehci_data->reset_gpio_port[1], 1); + (pdata->ehci_data->reset_gpio_port[1], 0); } end_count: omap->count++; - spin_unlock_irqrestore(&omap->lock, flags); - return 0; + goto end_enable; err_tll: if (pdata->ehci_data->phy_reset) { @@ -930,6 +931,8 @@ static int usbhs_enable(struct device *dev) clk_disable(omap->usbhost_fs_fck); clk_disable(omap->usbhost_hs_fck); clk_disable(omap->usbhost_ick); + +end_enable: spin_unlock_irqrestore(&omap->lock, flags); return ret; } diff --git a/trunk/drivers/mfd/twl4030-power.c b/trunk/drivers/mfd/twl4030-power.c index 2c0d4d16491a..16422de0823a 100644 --- a/trunk/drivers/mfd/twl4030-power.c +++ b/trunk/drivers/mfd/twl4030-power.c @@ -447,13 +447,12 @@ static int __init load_twl4030_script(struct twl4030_script *tscript, if (err) goto out; } - if (tscript->flags & TWL4030_SLEEP_SCRIPT) { + if (tscript->flags & TWL4030_SLEEP_SCRIPT) if (order) pr_warning("TWL4030: Bad order of scripts (sleep "\ "script before wakeup) Leads to boot"\ "failure on some boards\n"); err = twl4030_config_sleep_sequence(address); - } out: return err; } diff --git a/trunk/drivers/mmc/core/bus.c b/trunk/drivers/mmc/core/bus.c index d6d62fd07ee9..63667a8f140c 100644 --- a/trunk/drivers/mmc/core/bus.c +++ b/trunk/drivers/mmc/core/bus.c @@ -284,7 +284,6 @@ int mmc_add_card(struct mmc_card *card) type = "SD-combo"; if (mmc_card_blockaddr(card)) type = "SDHC-combo"; - break; default: type = "?"; break; diff --git a/trunk/drivers/mmc/host/omap.c b/trunk/drivers/mmc/host/omap.c index a6c329040140..2e032f0e8cf4 100644 --- a/trunk/drivers/mmc/host/omap.c +++ b/trunk/drivers/mmc/host/omap.c @@ -832,7 +832,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) return IRQ_HANDLED; } - if (end_command && host->cmd) + if (end_command) mmc_omap_cmd_done(host, host->cmd); if (host->data != NULL) { if (transfer_error) diff --git a/trunk/drivers/mmc/host/sdhci-of-core.c b/trunk/drivers/mmc/host/sdhci-of-core.c index 60e4186a4345..f9b611fc773e 100644 --- a/trunk/drivers/mmc/host/sdhci-of-core.c +++ b/trunk/drivers/mmc/host/sdhci-of-core.c @@ -124,10 +124,8 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np) #endif } -static const struct of_device_id sdhci_of_match[]; static int __devinit sdhci_of_probe(struct platform_device *ofdev) { - const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct sdhci_of_data *sdhci_of_data; struct sdhci_host *host; @@ -136,10 +134,9 @@ static int __devinit sdhci_of_probe(struct platform_device *ofdev) int size; int ret; - match = of_match_device(sdhci_of_match, &ofdev->dev); - if (!match) + if (!ofdev->dev.of_match) return -EINVAL; - sdhci_of_data = match->data; + sdhci_of_data = ofdev->dev.of_match->data; if (!of_device_is_available(np)) return -ENODEV; diff --git a/trunk/drivers/mmc/host/sdhci-pci.c b/trunk/drivers/mmc/host/sdhci-pci.c index f8b5f37007b2..a136be706347 100644 --- a/trunk/drivers/mmc/host/sdhci-pci.c +++ b/trunk/drivers/mmc/host/sdhci-pci.c @@ -957,7 +957,6 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( host->ioaddr = pci_ioremap_bar(pdev, bar); if (!host->ioaddr) { dev_err(&pdev->dev, "failed to remap registers\n"); - ret = -ENOMEM; goto release; } diff --git a/trunk/drivers/mmc/host/sdhci.c b/trunk/drivers/mmc/host/sdhci.c index 5d20661bc357..9e15f41f87be 100644 --- a/trunk/drivers/mmc/host/sdhci.c +++ b/trunk/drivers/mmc/host/sdhci.c @@ -1334,13 +1334,6 @@ static void sdhci_tasklet_finish(unsigned long param) host = (struct sdhci_host*)param; - /* - * If this tasklet gets rescheduled while running, it will - * be run again afterwards but without any active request. - */ - if (!host->mrq) - return; - spin_lock_irqsave(&host->lock, flags); del_timer(&host->timer); @@ -1352,7 +1345,7 @@ static void sdhci_tasklet_finish(unsigned long param) * upon error conditions. */ if (!(host->flags & SDHCI_DEVICE_DEAD) && - ((mrq->cmd && mrq->cmd->error) || + (mrq->cmd->error || (mrq->data && (mrq->data->error || (mrq->data->stop && mrq->data->stop->error))) || (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { diff --git a/trunk/drivers/mmc/host/tmio_mmc_pio.c b/trunk/drivers/mmc/host/tmio_mmc_pio.c index 710339a85c84..62d37de6de76 100644 --- a/trunk/drivers/mmc/host/tmio_mmc_pio.c +++ b/trunk/drivers/mmc/host/tmio_mmc_pio.c @@ -728,15 +728,15 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) tmio_mmc_set_clock(host, ios->clock); /* Power sequence - OFF -> UP -> ON */ - if (ios->power_mode == MMC_POWER_UP) { - /* power up SD bus */ - if (host->set_pwr) - host->set_pwr(host->pdev, 1); - } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { + if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { /* power down SD bus */ if (ios->power_mode == MMC_POWER_OFF && host->set_pwr) host->set_pwr(host->pdev, 0); tmio_mmc_clk_stop(host); + } else if (ios->power_mode == MMC_POWER_UP) { + /* power up SD bus */ + if (host->set_pwr) + host->set_pwr(host->pdev, 1); } else { /* start bus clock */ tmio_mmc_clk_start(host); diff --git a/trunk/drivers/mtd/maps/Kconfig b/trunk/drivers/mtd/maps/Kconfig index 5069111c81cc..44b1f46458ca 100644 --- a/trunk/drivers/mtd/maps/Kconfig +++ b/trunk/drivers/mtd/maps/Kconfig @@ -260,13 +260,6 @@ config MTD_BCM963XX Support for parsing CFE image tag and creating MTD partitions on Broadcom BCM63xx boards. -config MTD_LANTIQ - tristate "Lantiq SoC NOR support" - depends on LANTIQ - select MTD_PARTITIONS - help - Support for NOR flash attached to the Lantiq SoC's External Bus Unit. - config MTD_DILNETPC tristate "CFI Flash device mapped on DIL/Net PC" depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN diff --git a/trunk/drivers/mtd/maps/Makefile b/trunk/drivers/mtd/maps/Makefile index 6adf4c9b9057..08533bd5cba7 100644 --- a/trunk/drivers/mtd/maps/Makefile +++ b/trunk/drivers/mtd/maps/Makefile @@ -60,4 +60,3 @@ obj-$(CONFIG_MTD_VMU) += vmu-flash.o obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o -obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o diff --git a/trunk/drivers/mtd/maps/lantiq-flash.c b/trunk/drivers/mtd/maps/lantiq-flash.c deleted file mode 100644 index a90cabd7b84d..000000000000 --- a/trunk/drivers/mtd/maps/lantiq-flash.c +++ /dev/null @@ -1,251 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2004 Liu Peng Infineon IFAP DC COM CPE - * Copyright (C) 2010 John Crispin - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -/* - * The NOR flash is connected to the same external bus unit (EBU) as PCI. - * To make PCI work we need to enable the endianness swapping for the address - * written to the EBU. This endianness swapping works for PCI correctly but - * fails for attached NOR devices. To workaround this we need to use a complex - * map. The workaround involves swapping all addresses whilst probing the chip. - * Once probing is complete we stop swapping the addresses but swizzle the - * unlock addresses to ensure that access to the NOR device works correctly. - */ - -enum { - LTQ_NOR_PROBING, - LTQ_NOR_NORMAL -}; - -struct ltq_mtd { - struct resource *res; - struct mtd_info *mtd; - struct map_info *map; -}; - -static char ltq_map_name[] = "ltq_nor"; - -static map_word -ltq_read16(struct map_info *map, unsigned long adr) -{ - unsigned long flags; - map_word temp; - - if (map->map_priv_1 == LTQ_NOR_PROBING) - adr ^= 2; - spin_lock_irqsave(&ebu_lock, flags); - temp.x[0] = *(u16 *)(map->virt + adr); - spin_unlock_irqrestore(&ebu_lock, flags); - return temp; -} - -static void -ltq_write16(struct map_info *map, map_word d, unsigned long adr) -{ - unsigned long flags; - - if (map->map_priv_1 == LTQ_NOR_PROBING) - adr ^= 2; - spin_lock_irqsave(&ebu_lock, flags); - *(u16 *)(map->virt + adr) = d.x[0]; - spin_unlock_irqrestore(&ebu_lock, flags); -} - -/* - * The following 2 functions copy data between iomem and a cached memory - * section. As memcpy() makes use of pre-fetching we cannot use it here. - * The normal alternative of using memcpy_{to,from}io also makes use of - * memcpy() on MIPS so it is not applicable either. We are therefore stuck - * with having to use our own loop. - */ -static void -ltq_copy_from(struct map_info *map, void *to, - unsigned long from, ssize_t len) -{ - unsigned char *f = (unsigned char *)map->virt + from; - unsigned char *t = (unsigned char *)to; - unsigned long flags; - - spin_lock_irqsave(&ebu_lock, flags); - while (len--) - *t++ = *f++; - spin_unlock_irqrestore(&ebu_lock, flags); -} - -static void -ltq_copy_to(struct map_info *map, unsigned long to, - const void *from, ssize_t len) -{ - unsigned char *f = (unsigned char *)from; - unsigned char *t = (unsigned char *)map->virt + to; - unsigned long flags; - - spin_lock_irqsave(&ebu_lock, flags); - while (len--) - *t++ = *f++; - spin_unlock_irqrestore(&ebu_lock, flags); -} - -static const char const *part_probe_types[] = { "cmdlinepart", NULL }; - -static int __init -ltq_mtd_probe(struct platform_device *pdev) -{ - struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev); - struct ltq_mtd *ltq_mtd; - struct mtd_partition *parts; - struct resource *res; - int nr_parts = 0; - struct cfi_private *cfi; - int err; - - ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL); - platform_set_drvdata(pdev, ltq_mtd); - - ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!ltq_mtd->res) { - dev_err(&pdev->dev, "failed to get memory resource"); - err = -ENOENT; - goto err_out; - } - - res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start, - resource_size(ltq_mtd->res), dev_name(&pdev->dev)); - if (!ltq_mtd->res) { - dev_err(&pdev->dev, "failed to request mem resource"); - err = -EBUSY; - goto err_out; - } - - ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL); - ltq_mtd->map->phys = res->start; - ltq_mtd->map->size = resource_size(res); - ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev, - ltq_mtd->map->phys, ltq_mtd->map->size); - if (!ltq_mtd->map->virt) { - dev_err(&pdev->dev, "failed to ioremap!\n"); - err = -ENOMEM; - goto err_free; - } - - ltq_mtd->map->name = ltq_map_name; - ltq_mtd->map->bankwidth = 2; - ltq_mtd->map->read = ltq_read16; - ltq_mtd->map->write = ltq_write16; - ltq_mtd->map->copy_from = ltq_copy_from; - ltq_mtd->map->copy_to = ltq_copy_to; - - ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING; - ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map); - ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL; - - if (!ltq_mtd->mtd) { - dev_err(&pdev->dev, "probing failed\n"); - err = -ENXIO; - goto err_unmap; - } - - ltq_mtd->mtd->owner = THIS_MODULE; - - cfi = ltq_mtd->map->fldrv_priv; - cfi->addr_unlock1 ^= 1; - cfi->addr_unlock2 ^= 1; - - nr_parts = parse_mtd_partitions(ltq_mtd->mtd, - part_probe_types, &parts, 0); - if (nr_parts > 0) { - dev_info(&pdev->dev, - "using %d partitions from cmdline", nr_parts); - } else { - nr_parts = ltq_mtd_data->nr_parts; - parts = ltq_mtd_data->parts; - } - - err = add_mtd_partitions(ltq_mtd->mtd, parts, nr_parts); - if (err) { - dev_err(&pdev->dev, "failed to add partitions\n"); - goto err_destroy; - } - - return 0; - -err_destroy: - map_destroy(ltq_mtd->mtd); -err_unmap: - iounmap(ltq_mtd->map->virt); -err_free: - kfree(ltq_mtd->map); -err_out: - kfree(ltq_mtd); - return err; -} - -static int __devexit -ltq_mtd_remove(struct platform_device *pdev) -{ - struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev); - - if (ltq_mtd) { - if (ltq_mtd->mtd) { - del_mtd_partitions(ltq_mtd->mtd); - map_destroy(ltq_mtd->mtd); - } - if (ltq_mtd->map->virt) - iounmap(ltq_mtd->map->virt); - kfree(ltq_mtd->map); - kfree(ltq_mtd); - } - return 0; -} - -static struct platform_driver ltq_mtd_driver = { - .remove = __devexit_p(ltq_mtd_remove), - .driver = { - .name = "ltq_nor", - .owner = THIS_MODULE, - }, -}; - -static int __init -init_ltq_mtd(void) -{ - int ret = platform_driver_probe(<q_mtd_driver, ltq_mtd_probe); - - if (ret) - pr_err("ltq_nor: error registering platform driver"); - return ret; -} - -static void __exit -exit_ltq_mtd(void) -{ - platform_driver_unregister(<q_mtd_driver); -} - -module_init(init_ltq_mtd); -module_exit(exit_ltq_mtd); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("John Crispin "); -MODULE_DESCRIPTION("Lantiq SoC NOR"); diff --git a/trunk/drivers/mtd/maps/physmap_of.c b/trunk/drivers/mtd/maps/physmap_of.c index c1d33464aee8..bd483f0c57e1 100644 --- a/trunk/drivers/mtd/maps/physmap_of.c +++ b/trunk/drivers/mtd/maps/physmap_of.c @@ -214,13 +214,11 @@ static void __devinit of_free_probes(const char **probes) } #endif -static struct of_device_id of_flash_match[]; static int __devinit of_flash_probe(struct platform_device *dev) { #ifdef CONFIG_MTD_PARTITIONS const char **part_probe_types; #endif - const struct of_device_id *match; struct device_node *dp = dev->dev.of_node; struct resource res; struct of_flash *info; @@ -234,10 +232,9 @@ static int __devinit of_flash_probe(struct platform_device *dev) struct mtd_info **mtd_list = NULL; resource_size_t res_size; - match = of_match_device(of_flash_match, &dev->dev); - if (!match) + if (!dev->dev.of_match) return -EINVAL; - probe_type = match->data; + probe_type = dev->dev.of_match->data; reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); diff --git a/trunk/drivers/mtd/nand/au1550nd.c b/trunk/drivers/mtd/nand/au1550nd.c index 5d513b54a7d7..3ffe05db4923 100644 --- a/trunk/drivers/mtd/nand/au1550nd.c +++ b/trunk/drivers/mtd/nand/au1550nd.c @@ -10,7 +10,6 @@ */ #include -#include #include #include #include @@ -471,7 +470,7 @@ static int __init au1xxx_nand_init(void) #ifdef CONFIG_MIPS_PB1550 /* set gpio206 high */ - gpio_direction_input(206); + au_writel(au_readl(GPIO2_DIR) & ~(1 << 6), GPIO2_DIR); boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1); diff --git a/trunk/drivers/mtd/nand/diskonchip.c b/trunk/drivers/mtd/nand/diskonchip.c index 657b9f4b6f9b..96c0b34ba8db 100644 --- a/trunk/drivers/mtd/nand/diskonchip.c +++ b/trunk/drivers/mtd/nand/diskonchip.c @@ -400,7 +400,7 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE); doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); - /* We can't use dev_ready here, but at least we wait for the + /* We can't' use dev_ready here, but at least we wait for the * command to complete */ udelay(50); diff --git a/trunk/drivers/net/Kconfig b/trunk/drivers/net/Kconfig index 19f04a34783a..dc280bc8eba2 100644 --- a/trunk/drivers/net/Kconfig +++ b/trunk/drivers/net/Kconfig @@ -2017,13 +2017,6 @@ config FTMAC100 from Faraday. It is used on Faraday A320, Andes AG101 and some other ARM/NDS32 SoC's. -config LANTIQ_ETOP - tristate "Lantiq SoC ETOP driver" - depends on SOC_TYPE_XWAY - help - Support for the MII0 inside the Lantiq SoC - - source "drivers/net/fs_enet/Kconfig" source "drivers/net/octeon/Kconfig" @@ -2543,7 +2536,7 @@ config S6GMAC source "drivers/net/stmmac/Kconfig" config PCH_GBE - tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" + tristate "PCH Gigabit Ethernet" depends on PCI select MII ---help--- @@ -2555,12 +2548,6 @@ config PCH_GBE to Gigabit Ethernet. This driver enables Gigabit Ethernet function. - This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ - Output Hub), ML7223. - ML7223 IOH is for MP(Media Phone) use. - ML7223 is companion chip for Intel Atom E6xx series. - ML7223 is completely compatible for Intel EG20T PCH. - endif # NETDEV_1000 # diff --git a/trunk/drivers/net/Makefile b/trunk/drivers/net/Makefile index 209fbb70619b..01b604ad155e 100644 --- a/trunk/drivers/net/Makefile +++ b/trunk/drivers/net/Makefile @@ -144,7 +144,7 @@ obj-$(CONFIG_NE3210) += ne3210.o 8390.o obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o obj-$(CONFIG_B44) += b44.o obj-$(CONFIG_FORCEDETH) += forcedeth.o -obj-$(CONFIG_NE_H8300) += ne-h8300.o +obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o obj-$(CONFIG_AX88796) += ax88796.o obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o obj-$(CONFIG_FTMAC100) += ftmac100.o @@ -219,7 +219,7 @@ obj-$(CONFIG_SC92031) += sc92031.o obj-$(CONFIG_LP486E) += lp486e.o obj-$(CONFIG_ETH16I) += eth16i.o -obj-$(CONFIG_ZORRO8390) += zorro8390.o +obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o obj-$(CONFIG_HPLANCE) += hplance.o 7990.o obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o obj-$(CONFIG_EQUALIZER) += eql.o @@ -231,7 +231,7 @@ obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o obj-$(CONFIG_DECLANCE) += declance.o obj-$(CONFIG_ATARILANCE) += atarilance.o obj-$(CONFIG_A2065) += a2065.o -obj-$(CONFIG_HYDRA) += hydra.o +obj-$(CONFIG_HYDRA) += hydra.o 8390.o obj-$(CONFIG_ARIADNE) += ariadne.o obj-$(CONFIG_CS89x0) += cs89x0.o obj-$(CONFIG_MACSONIC) += macsonic.o @@ -259,7 +259,6 @@ obj-$(CONFIG_MLX4_CORE) += mlx4/ obj-$(CONFIG_ENC28J60) += enc28j60.o obj-$(CONFIG_ETHOC) += ethoc.o obj-$(CONFIG_GRETH) += greth.o -obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o diff --git a/trunk/drivers/net/amd8111e.c b/trunk/drivers/net/amd8111e.c index 241b185e6569..88495c48a81d 100644 --- a/trunk/drivers/net/amd8111e.c +++ b/trunk/drivers/net/amd8111e.c @@ -106,7 +106,7 @@ MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "M MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl); module_param_array(speed_duplex, int, NULL, 0); -MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex"); +MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex"); module_param_array(coalesce, bool, NULL, 0); MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable"); module_param_array(dynamic_ipg, bool, NULL, 0); diff --git a/trunk/drivers/net/arm/etherh.c b/trunk/drivers/net/arm/etherh.c index fbfb5b47c506..4af235d41fda 100644 --- a/trunk/drivers/net/arm/etherh.c +++ b/trunk/drivers/net/arm/etherh.c @@ -527,7 +527,7 @@ static void __init etherh_banner(void) * Read the ethernet address string from the on board rom. * This is an ascii string... */ -static int __devinit etherh_addr(char *addr, struct expansion_card *ec) +static int __init etherh_addr(char *addr, struct expansion_card *ec) { struct in_chunk_dir cd; char *s; @@ -655,7 +655,7 @@ static const struct net_device_ops etherh_netdev_ops = { static u32 etherh_regoffsets[16]; static u32 etherm_regoffsets[16]; -static int __devinit +static int __init etherh_probe(struct expansion_card *ec, const struct ecard_id *id) { const struct etherh_data *data = id->data; diff --git a/trunk/drivers/net/atarilance.c b/trunk/drivers/net/atarilance.c index 1264d781b554..ce0091eb06f5 100644 --- a/trunk/drivers/net/atarilance.c +++ b/trunk/drivers/net/atarilance.c @@ -554,7 +554,7 @@ static unsigned long __init lance_probe1( struct net_device *dev, memaddr == (unsigned short *)0xffe00000) { /* PAMs card and Riebl on ST use level 5 autovector */ if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, - "PAM,Riebl-ST Ethernet", dev)) { + "PAM/Riebl-ST Ethernet", dev)) { printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); return 0; } diff --git a/trunk/drivers/net/atl1c/atl1c.h b/trunk/drivers/net/atl1c/atl1c.h index 925929d764ca..7cb375e0e29c 100644 --- a/trunk/drivers/net/atl1c/atl1c.h +++ b/trunk/drivers/net/atl1c/atl1c.h @@ -566,9 +566,9 @@ struct atl1c_adapter { #define __AT_TESTING 0x0001 #define __AT_RESETTING 0x0002 #define __AT_DOWN 0x0003 - unsigned long work_event; -#define ATL1C_WORK_EVENT_RESET 0 -#define ATL1C_WORK_EVENT_LINK_CHANGE 1 + u8 work_event; +#define ATL1C_WORK_EVENT_RESET 0x01 +#define ATL1C_WORK_EVENT_LINK_CHANGE 0x02 u32 msg_enable; bool have_msi; diff --git a/trunk/drivers/net/atl1c/atl1c_main.c b/trunk/drivers/net/atl1c/atl1c_main.c index a6e1c36e48e6..7d9d5067a65c 100644 --- a/trunk/drivers/net/atl1c/atl1c_main.c +++ b/trunk/drivers/net/atl1c/atl1c_main.c @@ -325,7 +325,7 @@ static void atl1c_link_chg_event(struct atl1c_adapter *adapter) } } - set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event); + adapter->work_event |= ATL1C_WORK_EVENT_LINK_CHANGE; schedule_work(&adapter->common_task); } @@ -337,16 +337,20 @@ static void atl1c_common_task(struct work_struct *work) adapter = container_of(work, struct atl1c_adapter, common_task); netdev = adapter->netdev; - if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) { + if (adapter->work_event & ATL1C_WORK_EVENT_RESET) { + adapter->work_event &= ~ATL1C_WORK_EVENT_RESET; netif_device_detach(netdev); atl1c_down(adapter); atl1c_up(adapter); netif_device_attach(netdev); + return; } - if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE, - &adapter->work_event)) + if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) { + adapter->work_event &= ~ATL1C_WORK_EVENT_LINK_CHANGE; atl1c_check_link_status(adapter); + } + return; } @@ -365,7 +369,7 @@ static void atl1c_tx_timeout(struct net_device *netdev) struct atl1c_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ - set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); + adapter->work_event |= ATL1C_WORK_EVENT_RESET; schedule_work(&adapter->common_task); } diff --git a/trunk/drivers/net/benet/be.h b/trunk/drivers/net/benet/be.h index 2353eca32593..66823eded7a3 100644 --- a/trunk/drivers/net/benet/be.h +++ b/trunk/drivers/net/benet/be.h @@ -213,7 +213,7 @@ struct be_rx_stats { struct be_rx_compl_info { u32 rss_hash; - u16 vlan_tag; + u16 vid; u16 pkt_size; u16 rxq_idx; u16 mac_id; diff --git a/trunk/drivers/net/benet/be_cmds.c b/trunk/drivers/net/benet/be_cmds.c index 9dc9394fd4ca..1e2d825bb94a 100644 --- a/trunk/drivers/net/benet/be_cmds.c +++ b/trunk/drivers/net/benet/be_cmds.c @@ -132,7 +132,7 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, struct be_async_event_grp5_pvid_state *evt) { if (evt->enabled) - adapter->pvid = le16_to_cpu(evt->tag); + adapter->pvid = evt->tag; else adapter->pvid = 0; } diff --git a/trunk/drivers/net/benet/be_main.c b/trunk/drivers/net/benet/be_main.c index 9187fb4e08f1..7cb5a114c733 100644 --- a/trunk/drivers/net/benet/be_main.c +++ b/trunk/drivers/net/benet/be_main.c @@ -1018,8 +1018,7 @@ static void be_rx_compl_process(struct be_adapter *adapter, kfree_skb(skb); return; } - vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, - rxcp->vlan_tag); + vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid); } else { netif_receive_skb(skb); } @@ -1077,8 +1076,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, if (likely(!rxcp->vlanf)) napi_gro_frags(&eq_obj->napi); else - vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, - rxcp->vlan_tag); + vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid); } static void be_parse_rx_compl_v1(struct be_adapter *adapter, @@ -1104,8 +1102,7 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter, rxcp->pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl); - rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, - compl); + rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl); } static void be_parse_rx_compl_v0(struct be_adapter *adapter, @@ -1131,8 +1128,7 @@ static void be_parse_rx_compl_v0(struct be_adapter *adapter, rxcp->pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl); - rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, - compl); + rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl); } static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) @@ -1159,11 +1155,9 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) rxcp->vlanf = 0; if (!lancer_chip(adapter)) - rxcp->vlan_tag = swab16(rxcp->vlan_tag); + rxcp->vid = swab16(rxcp->vid); - if (((adapter->pvid & VLAN_VID_MASK) == - (rxcp->vlan_tag & VLAN_VID_MASK)) && - !adapter->vlan_tag[rxcp->vlan_tag]) + if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid]) rxcp->vlanf = 0; /* As the compl has been parsed, reset it; we wont touch it again */ @@ -1879,7 +1873,6 @@ static void be_worker(struct work_struct *work) be_detect_dump_ue(adapter); reschedule: - adapter->work_counter++; schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); } diff --git a/trunk/drivers/net/bnx2.c b/trunk/drivers/net/bnx2.c index d8383a9af9ad..8e6d618b5305 100644 --- a/trunk/drivers/net/bnx2.c +++ b/trunk/drivers/net/bnx2.c @@ -8413,8 +8413,6 @@ bnx2_remove_one(struct pci_dev *pdev) unregister_netdev(dev); - del_timer_sync(&bp->timer); - if (bp->mips_firmware) release_firmware(bp->mips_firmware); if (bp->rv2p_firmware) diff --git a/trunk/drivers/net/bnx2x/bnx2x_cmn.c b/trunk/drivers/net/bnx2x/bnx2x_cmn.c index 16581df5ee4e..e83ac6dd6fc0 100644 --- a/trunk/drivers/net/bnx2x/bnx2x_cmn.c +++ b/trunk/drivers/net/bnx2x/bnx2x_cmn.c @@ -2019,23 +2019,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, u32 *parsing_data, u32 xmit_type) { - *parsing_data |= - ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; + *parsing_data |= ((tcp_hdrlen(skb)/4) << + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; - if (xmit_type & XMIT_CSUM_TCP) { - *parsing_data |= ((tcp_hdrlen(skb) / 4) << - ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & - ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; + *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) << + ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & + ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; - return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; - } else - /* We support checksum offload for TCP and UDP only. - * No need to pass the UDP header length - it's a constant. - */ - return skb_transport_header(skb) + - sizeof(struct udphdr) - skb->data; + return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; } /** @@ -2051,7 +2043,7 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, struct eth_tx_parse_bd_e1x *pbd, u32 xmit_type) { - u8 hlen = (skb_network_header(skb) - skb->data) >> 1; + u8 hlen = (skb_network_header(skb) - skb->data) / 2; /* for now NS flag is not used in Linux */ pbd->global_data = @@ -2059,15 +2051,9 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); pbd->ip_hlen_w = (skb_transport_header(skb) - - skb_network_header(skb)) >> 1; + skb_network_header(skb)) / 2; - hlen += pbd->ip_hlen_w; - - /* We support checksum offload for TCP and UDP only */ - if (xmit_type & XMIT_CSUM_TCP) - hlen += tcp_hdrlen(skb) / 2; - else - hlen += sizeof(struct udphdr) / 2; + hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2; pbd->total_hlen_w = cpu_to_le16(hlen); hlen = hlen*2; diff --git a/trunk/drivers/net/bonding/bond_3ad.c b/trunk/drivers/net/bonding/bond_3ad.c index 31912f17653f..494bf960442d 100644 --- a/trunk/drivers/net/bonding/bond_3ad.c +++ b/trunk/drivers/net/bonding/bond_3ad.c @@ -1482,11 +1482,8 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best, static int agg_device_up(const struct aggregator *agg) { - struct port *port = agg->lag_ports; - if (!port) - return 0; - return (netif_running(port->slave->dev) && - netif_carrier_ok(port->slave->dev)); + return (netif_running(agg->slave->dev) && + netif_carrier_ok(agg->slave->dev)); } /** diff --git a/trunk/drivers/net/bonding/bond_3ad.h b/trunk/drivers/net/bonding/bond_3ad.h index 01b8a6af275b..b28baff70864 100644 --- a/trunk/drivers/net/bonding/bond_3ad.h +++ b/trunk/drivers/net/bonding/bond_3ad.h @@ -39,7 +39,7 @@ typedef struct mac_addr { u8 mac_addr_value[ETH_ALEN]; -} __packed mac_addr_t; +} mac_addr_t; enum { BOND_AD_STABLE = 0, @@ -134,12 +134,12 @@ typedef struct lacpdu { u8 tlv_type_terminator; // = terminator u8 terminator_length; // = 0 u8 reserved_50[50]; // = 0 -} __packed lacpdu_t; +} lacpdu_t; typedef struct lacpdu_header { struct ethhdr hdr; struct lacpdu lacpdu; -} __packed lacpdu_header_t; +} lacpdu_header_t; // Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) typedef struct bond_marker { @@ -155,12 +155,12 @@ typedef struct bond_marker { u8 tlv_type_terminator; // = 0x00 u8 terminator_length; // = 0x00 u8 reserved_90[90]; // = 0 -} __packed bond_marker_t; +} bond_marker_t; typedef struct bond_marker_header { struct ethhdr hdr; struct bond_marker marker; -} __packed bond_marker_header_t; +} bond_marker_header_t; #pragma pack() diff --git a/trunk/drivers/net/can/mscan/mpc5xxx_can.c b/trunk/drivers/net/can/mscan/mpc5xxx_can.c index 5fedc3375562..bd1d811c204f 100644 --- a/trunk/drivers/net/can/mscan/mpc5xxx_can.c +++ b/trunk/drivers/net/can/mscan/mpc5xxx_can.c @@ -247,10 +247,8 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, } #endif /* CONFIG_PPC_MPC512x */ -static struct of_device_id mpc5xxx_can_table[]; static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) { - const struct of_device_id *match; struct mpc5xxx_can_data *data; struct device_node *np = ofdev->dev.of_node; struct net_device *dev; @@ -260,10 +258,9 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) int irq, mscan_clksrc = 0; int err = -ENOMEM; - match = of_match_device(mpc5xxx_can_table, &ofdev->dev); - if (!match) + if (!ofdev->dev.of_match) return -EINVAL; - data = match->data; + data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data; base = of_iomap(np, 0); if (!base) { diff --git a/trunk/drivers/net/can/sja1000/sja1000.c b/trunk/drivers/net/can/sja1000/sja1000.c index f501bba1fc6f..a358ea9445a2 100644 --- a/trunk/drivers/net/can/sja1000/sja1000.c +++ b/trunk/drivers/net/can/sja1000/sja1000.c @@ -346,10 +346,10 @@ static void sja1000_rx(struct net_device *dev) | (priv->read_reg(priv, REG_ID2) >> 5); } - cf->can_dlc = get_can_dlc(fi & 0x0F); if (fi & FI_RTR) { id |= CAN_RTR_FLAG; } else { + cf->can_dlc = get_can_dlc(fi & 0x0F); for (i = 0; i < cf->can_dlc; i++) cf->data[i] = priv->read_reg(priv, dreg++); } diff --git a/trunk/drivers/net/can/slcan.c b/trunk/drivers/net/can/slcan.c index 1b49df6b2470..b423965a78d1 100644 --- a/trunk/drivers/net/can/slcan.c +++ b/trunk/drivers/net/can/slcan.c @@ -583,9 +583,7 @@ static int slcan_open(struct tty_struct *tty) /* Done. We have linked the TTY line to a channel. */ rtnl_unlock(); tty->receive_room = 65536; /* We don't flow control */ - - /* TTY layer expects 0 on success */ - return 0; + return sl->dev->base_addr; err_free_chan: sl->tty = NULL; diff --git a/trunk/drivers/net/ehea/ehea_ethtool.c b/trunk/drivers/net/ehea/ehea_ethtool.c index f3bbdcef338c..3e2e734fecb7 100644 --- a/trunk/drivers/net/ehea/ehea_ethtool.c +++ b/trunk/drivers/net/ehea/ehea_ethtool.c @@ -55,20 +55,15 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->duplex = -1; } - if (cmd->speed == SPEED_10000) { - cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); - cmd->port = PORT_FIBRE; - } else { - cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full - | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full - | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg - | SUPPORTED_TP); - cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg - | ADVERTISED_TP); - cmd->port = PORT_TP; - } + cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full + | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half + | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half + | SUPPORTED_Autoneg | SUPPORTED_FIBRE); + + cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg + | ADVERTISED_FIBRE); + cmd->port = PORT_FIBRE; cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; return 0; diff --git a/trunk/drivers/net/ehea/ehea_main.c b/trunk/drivers/net/ehea/ehea_main.c index cf79cf759e13..f75d3144b8a5 100644 --- a/trunk/drivers/net/ehea/ehea_main.c +++ b/trunk/drivers/net/ehea/ehea_main.c @@ -2688,6 +2688,9 @@ static int ehea_open(struct net_device *dev) netif_start_queue(dev); } + init_waitqueue_head(&port->swqe_avail_wq); + init_waitqueue_head(&port->restart_wq); + mutex_unlock(&port->port_lock); return ret; @@ -3037,14 +3040,11 @@ static void ehea_rereg_mrs(void) if (dev->flags & IFF_UP) { mutex_lock(&port->port_lock); + port_napi_enable(port); ret = ehea_restart_qps(dev); - if (!ret) { - check_sqs(port); - port_napi_enable(port); + check_sqs(port); + if (!ret) netif_wake_queue(dev); - } else { - netdev_err(dev, "Unable to restart QPS\n"); - } mutex_unlock(&port->port_lock); } } @@ -3273,9 +3273,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, INIT_WORK(&port->reset_task, ehea_reset_port); - init_waitqueue_head(&port->swqe_avail_wq); - init_waitqueue_head(&port->restart_wq); - ret = register_netdev(dev); if (ret) { pr_err("register_netdev failed. ret=%d\n", ret); diff --git a/trunk/drivers/net/fs_enet/fs_enet-main.c b/trunk/drivers/net/fs_enet/fs_enet-main.c index 5131e61c358c..24cb953900dd 100644 --- a/trunk/drivers/net/fs_enet/fs_enet-main.c +++ b/trunk/drivers/net/fs_enet/fs_enet-main.c @@ -998,10 +998,8 @@ static const struct net_device_ops fs_enet_netdev_ops = { #endif }; -static struct of_device_id fs_enet_match[]; static int __devinit fs_enet_probe(struct platform_device *ofdev) { - const struct of_device_id *match; struct net_device *ndev; struct fs_enet_private *fep; struct fs_platform_info *fpi; @@ -1009,15 +1007,14 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) const u8 *mac_addr; int privsize, len, ret = -ENODEV; - match = of_match_device(fs_enet_match, &ofdev->dev); - if (!match) + if (!ofdev->dev.of_match) return -EINVAL; fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); if (!fpi) return -ENOMEM; - if (!IS_FEC(match)) { + if (!IS_FEC(ofdev->dev.of_match)) { data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); if (!data || len != 4) goto out_free_fpi; @@ -1052,7 +1049,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) fep->dev = &ofdev->dev; fep->ndev = ndev; fep->fpi = fpi; - fep->ops = match->data; + fep->ops = ofdev->dev.of_match->data; ret = fep->ops->setup_data(ndev); if (ret) diff --git a/trunk/drivers/net/fs_enet/mac-fec.c b/trunk/drivers/net/fs_enet/mac-fec.c index b9fbc83d64a7..61035fc5599b 100644 --- a/trunk/drivers/net/fs_enet/mac-fec.c +++ b/trunk/drivers/net/fs_enet/mac-fec.c @@ -226,8 +226,8 @@ static void set_multicast_finish(struct net_device *dev) } FC(fecp, r_cntrl, FEC_RCNTRL_PROM); - FW(fecp, grp_hash_table_high, fep->fec.hthi); - FW(fecp, grp_hash_table_low, fep->fec.htlo); + FW(fecp, hash_table_high, fep->fec.hthi); + FW(fecp, hash_table_low, fep->fec.htlo); } static void set_multicast_list(struct net_device *dev) @@ -273,8 +273,8 @@ static void restart(struct net_device *dev) /* * Reset all multicast. */ - FW(fecp, grp_hash_table_high, fep->fec.hthi); - FW(fecp, grp_hash_table_low, fep->fec.htlo); + FW(fecp, hash_table_high, fep->fec.hthi); + FW(fecp, hash_table_low, fep->fec.htlo); /* * Set maximum receive buffer size. diff --git a/trunk/drivers/net/fs_enet/mii-fec.c b/trunk/drivers/net/fs_enet/mii-fec.c index 6a2e150e75bb..7e840d373ab3 100644 --- a/trunk/drivers/net/fs_enet/mii-fec.c +++ b/trunk/drivers/net/fs_enet/mii-fec.c @@ -101,20 +101,17 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus) return 0; } -static struct of_device_id fs_enet_mdio_fec_match[]; static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) { - const struct of_device_id *match; struct resource res; struct mii_bus *new_bus; struct fec_info *fec; int (*get_bus_freq)(struct device_node *); int ret = -ENOMEM, clock, speed; - match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev); - if (!match) + if (!ofdev->dev.of_match) return -EINVAL; - get_bus_freq = match->data; + get_bus_freq = ofdev->dev.of_match->data; new_bus = mdiobus_alloc(); if (!new_bus) diff --git a/trunk/drivers/net/ftmac100.c b/trunk/drivers/net/ftmac100.c index 9bd7746cbfcf..a31661948c42 100644 --- a/trunk/drivers/net/ftmac100.c +++ b/trunk/drivers/net/ftmac100.c @@ -139,11 +139,11 @@ static int ftmac100_reset(struct ftmac100 *priv) * that hardware reset completed (what the f*ck). * We still need to wait for a while. */ - udelay(500); + usleep_range(500, 1000); return 0; } - udelay(1000); + usleep_range(1000, 10000); } netdev_err(netdev, "software reset failed\n"); @@ -772,7 +772,7 @@ static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg) if ((phycr & FTMAC100_PHYCR_MIIRD) == 0) return phycr & FTMAC100_PHYCR_MIIRDATA; - udelay(100); + usleep_range(100, 1000); } netdev_err(netdev, "mdio read timed out\n"); @@ -801,7 +801,7 @@ static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg, if ((phycr & FTMAC100_PHYCR_MIIWR) == 0) return; - udelay(100); + usleep_range(100, 1000); } netdev_err(netdev, "mdio write timed out\n"); diff --git a/trunk/drivers/net/hydra.c b/trunk/drivers/net/hydra.c index 1cd481c04202..c5ef62ceb840 100644 --- a/trunk/drivers/net/hydra.c +++ b/trunk/drivers/net/hydra.c @@ -98,15 +98,15 @@ static const struct net_device_ops hydra_netdev_ops = { .ndo_open = hydra_open, .ndo_stop = hydra_close, - .ndo_start_xmit = __ei_start_xmit, - .ndo_tx_timeout = __ei_tx_timeout, - .ndo_get_stats = __ei_get_stats, - .ndo_set_multicast_list = __ei_set_multicast_list, + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_multicast_list = ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = __ei_poll, + .ndo_poll_controller = ei_poll, #endif }; @@ -125,7 +125,7 @@ static int __devinit hydra_init(struct zorro_dev *z) 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, }; - dev = ____alloc_ei_netdev(0); + dev = alloc_ei_netdev(); if (!dev) return -ENOMEM; diff --git a/trunk/drivers/net/lantiq_etop.c b/trunk/drivers/net/lantiq_etop.c deleted file mode 100644 index 45f252b7da30..000000000000 --- a/trunk/drivers/net/lantiq_etop.c +++ /dev/null @@ -1,805 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) 2011 John Crispin - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -#define LTQ_ETOP_MDIO 0x11804 -#define MDIO_REQUEST 0x80000000 -#define MDIO_READ 0x40000000 -#define MDIO_ADDR_MASK 0x1f -#define MDIO_ADDR_OFFSET 0x15 -#define MDIO_REG_MASK 0x1f -#define MDIO_REG_OFFSET 0x10 -#define MDIO_VAL_MASK 0xffff - -#define PPE32_CGEN 0x800 -#define LQ_PPE32_ENET_MAC_CFG 0x1840 - -#define LTQ_ETOP_ENETS0 0x11850 -#define LTQ_ETOP_MAC_DA0 0x1186C -#define LTQ_ETOP_MAC_DA1 0x11870 -#define LTQ_ETOP_CFG 0x16020 -#define LTQ_ETOP_IGPLEN 0x16080 - -#define MAX_DMA_CHAN 0x8 -#define MAX_DMA_CRC_LEN 0x4 -#define MAX_DMA_DATA_LEN 0x600 - -#define ETOP_FTCU BIT(28) -#define ETOP_MII_MASK 0xf -#define ETOP_MII_NORMAL 0xd -#define ETOP_MII_REVERSE 0xe -#define ETOP_PLEN_UNDER 0x40 -#define ETOP_CGEN 0x800 - -/* use 2 static channels for TX/RX */ -#define LTQ_ETOP_TX_CHANNEL 1 -#define LTQ_ETOP_RX_CHANNEL 6 -#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL) -#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL) - -#define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x)) -#define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y)) -#define ltq_etop_w32_mask(x, y, z) \ - ltq_w32_mask(x, y, ltq_etop_membase + (z)) - -#define DRV_VERSION "1.0" - -static void __iomem *ltq_etop_membase; - -struct ltq_etop_chan { - int idx; - int tx_free; - struct net_device *netdev; - struct napi_struct napi; - struct ltq_dma_channel dma; - struct sk_buff *skb[LTQ_DESC_NUM]; -}; - -struct ltq_etop_priv { - struct net_device *netdev; - struct ltq_eth_data *pldata; - struct resource *res; - - struct mii_bus *mii_bus; - struct phy_device *phydev; - - struct ltq_etop_chan ch[MAX_DMA_CHAN]; - int tx_free[MAX_DMA_CHAN >> 1]; - - spinlock_t lock; -}; - -static int -ltq_etop_alloc_skb(struct ltq_etop_chan *ch) -{ - ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN); - if (!ch->skb[ch->dma.desc]) - return -ENOMEM; - ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL, - ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN, - DMA_FROM_DEVICE); - ch->dma.desc_base[ch->dma.desc].addr = - CPHYSADDR(ch->skb[ch->dma.desc]->data); - ch->dma.desc_base[ch->dma.desc].ctl = - LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | - MAX_DMA_DATA_LEN; - skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN); - return 0; -} - -static void -ltq_etop_hw_receive(struct ltq_etop_chan *ch) -{ - struct ltq_etop_priv *priv = netdev_priv(ch->netdev); - struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; - struct sk_buff *skb = ch->skb[ch->dma.desc]; - int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN; - unsigned long flags; - - spin_lock_irqsave(&priv->lock, flags); - if (ltq_etop_alloc_skb(ch)) { - netdev_err(ch->netdev, - "failed to allocate new rx buffer, stopping DMA\n"); - ltq_dma_close(&ch->dma); - } - ch->dma.desc++; - ch->dma.desc %= LTQ_DESC_NUM; - spin_unlock_irqrestore(&priv->lock, flags); - - skb_put(skb, len); - skb->dev = ch->netdev; - skb->protocol = eth_type_trans(skb, ch->netdev); - netif_receive_skb(skb); -} - -static int -ltq_etop_poll_rx(struct napi_struct *napi, int budget) -{ - struct ltq_etop_chan *ch = container_of(napi, - struct ltq_etop_chan, napi); - int rx = 0; - int complete = 0; - - while ((rx < budget) && !complete) { - struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; - - if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { - ltq_etop_hw_receive(ch); - rx++; - } else { - complete = 1; - } - } - if (complete || !rx) { - napi_complete(&ch->napi); - ltq_dma_ack_irq(&ch->dma); - } - return rx; -} - -static int -ltq_etop_poll_tx(struct napi_struct *napi, int budget) -{ - struct ltq_etop_chan *ch = - container_of(napi, struct ltq_etop_chan, napi); - struct ltq_etop_priv *priv = netdev_priv(ch->netdev); - struct netdev_queue *txq = - netdev_get_tx_queue(ch->netdev, ch->idx >> 1); - unsigned long flags; - - spin_lock_irqsave(&priv->lock, flags); - while ((ch->dma.desc_base[ch->tx_free].ctl & - (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { - dev_kfree_skb_any(ch->skb[ch->tx_free]); - ch->skb[ch->tx_free] = NULL; - memset(&ch->dma.desc_base[ch->tx_free], 0, - sizeof(struct ltq_dma_desc)); - ch->tx_free++; - ch->tx_free %= LTQ_DESC_NUM; - } - spin_unlock_irqrestore(&priv->lock, flags); - - if (netif_tx_queue_stopped(txq)) - netif_tx_start_queue(txq); - napi_complete(&ch->napi); - ltq_dma_ack_irq(&ch->dma); - return 1; -} - -static irqreturn_t -ltq_etop_dma_irq(int irq, void *_priv) -{ - struct ltq_etop_priv *priv = _priv; - int ch = irq - LTQ_DMA_CH0_INT; - - napi_schedule(&priv->ch[ch].napi); - return IRQ_HANDLED; -} - -static void -ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - - ltq_dma_free(&ch->dma); - if (ch->dma.irq) - free_irq(ch->dma.irq, priv); - if (IS_RX(ch->idx)) { - int desc; - for (desc = 0; desc < LTQ_DESC_NUM; desc++) - dev_kfree_skb_any(ch->skb[ch->dma.desc]); - } -} - -static void -ltq_etop_hw_exit(struct net_device *dev) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - int i; - - ltq_pmu_disable(PMU_PPE); - for (i = 0; i < MAX_DMA_CHAN; i++) - if (IS_TX(i) || IS_RX(i)) - ltq_etop_free_channel(dev, &priv->ch[i]); -} - -static int -ltq_etop_hw_init(struct net_device *dev) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - int i; - - ltq_pmu_enable(PMU_PPE); - - switch (priv->pldata->mii_mode) { - case PHY_INTERFACE_MODE_RMII: - ltq_etop_w32_mask(ETOP_MII_MASK, - ETOP_MII_REVERSE, LTQ_ETOP_CFG); - break; - - case PHY_INTERFACE_MODE_MII: - ltq_etop_w32_mask(ETOP_MII_MASK, - ETOP_MII_NORMAL, LTQ_ETOP_CFG); - break; - - default: - netdev_err(dev, "unknown mii mode %d\n", - priv->pldata->mii_mode); - return -ENOTSUPP; - } - - /* enable crc generation */ - ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG); - - ltq_dma_init_port(DMA_PORT_ETOP); - - for (i = 0; i < MAX_DMA_CHAN; i++) { - int irq = LTQ_DMA_CH0_INT + i; - struct ltq_etop_chan *ch = &priv->ch[i]; - - ch->idx = ch->dma.nr = i; - - if (IS_TX(i)) { - ltq_dma_alloc_tx(&ch->dma); - request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, - "etop_tx", priv); - } else if (IS_RX(i)) { - ltq_dma_alloc_rx(&ch->dma); - for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; - ch->dma.desc++) - if (ltq_etop_alloc_skb(ch)) - return -ENOMEM; - ch->dma.desc = 0; - request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, - "etop_rx", priv); - } - ch->dma.irq = irq; - } - return 0; -} - -static void -ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) -{ - strcpy(info->driver, "Lantiq ETOP"); - strcpy(info->bus_info, "internal"); - strcpy(info->version, DRV_VERSION); -} - -static int -ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - - return phy_ethtool_gset(priv->phydev, cmd); -} - -static int -ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - - return phy_ethtool_sset(priv->phydev, cmd); -} - -static int -ltq_etop_nway_reset(struct net_device *dev) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - - return phy_start_aneg(priv->phydev); -} - -static const struct ethtool_ops ltq_etop_ethtool_ops = { - .get_drvinfo = ltq_etop_get_drvinfo, - .get_settings = ltq_etop_get_settings, - .set_settings = ltq_etop_set_settings, - .nway_reset = ltq_etop_nway_reset, -}; - -static int -ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data) -{ - u32 val = MDIO_REQUEST | - ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) | - ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) | - phy_data; - - while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) - ; - ltq_etop_w32(val, LTQ_ETOP_MDIO); - return 0; -} - -static int -ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg) -{ - u32 val = MDIO_REQUEST | MDIO_READ | - ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) | - ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET); - - while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) - ; - ltq_etop_w32(val, LTQ_ETOP_MDIO); - while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) - ; - val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK; - return val; -} - -static void -ltq_etop_mdio_link(struct net_device *dev) -{ - /* nothing to do */ -} - -static int -ltq_etop_mdio_probe(struct net_device *dev) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - struct phy_device *phydev = NULL; - int phy_addr; - - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { - if (priv->mii_bus->phy_map[phy_addr]) { - phydev = priv->mii_bus->phy_map[phy_addr]; - break; - } - } - - if (!phydev) { - netdev_err(dev, "no PHY found\n"); - return -ENODEV; - } - - phydev = phy_connect(dev, dev_name(&phydev->dev), <q_etop_mdio_link, - 0, priv->pldata->mii_mode); - - if (IS_ERR(phydev)) { - netdev_err(dev, "Could not attach to PHY\n"); - return PTR_ERR(phydev); - } - - phydev->supported &= (SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_Autoneg - | SUPPORTED_MII - | SUPPORTED_TP); - - phydev->advertising = phydev->supported; - priv->phydev = phydev; - pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n", - dev->name, phydev->drv->name, - dev_name(&phydev->dev), phydev->irq); - - return 0; -} - -static int -ltq_etop_mdio_init(struct net_device *dev) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - int i; - int err; - - priv->mii_bus = mdiobus_alloc(); - if (!priv->mii_bus) { - netdev_err(dev, "failed to allocate mii bus\n"); - err = -ENOMEM; - goto err_out; - } - - priv->mii_bus->priv = dev; - priv->mii_bus->read = ltq_etop_mdio_rd; - priv->mii_bus->write = ltq_etop_mdio_wr; - priv->mii_bus->name = "ltq_mii"; - snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0); - priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!priv->mii_bus->irq) { - err = -ENOMEM; - goto err_out_free_mdiobus; - } - - for (i = 0; i < PHY_MAX_ADDR; ++i) - priv->mii_bus->irq[i] = PHY_POLL; - - if (mdiobus_register(priv->mii_bus)) { - err = -ENXIO; - goto err_out_free_mdio_irq; - } - - if (ltq_etop_mdio_probe(dev)) { - err = -ENXIO; - goto err_out_unregister_bus; - } - return 0; - -err_out_unregister_bus: - mdiobus_unregister(priv->mii_bus); -err_out_free_mdio_irq: - kfree(priv->mii_bus->irq); -err_out_free_mdiobus: - mdiobus_free(priv->mii_bus); -err_out: - return err; -} - -static void -ltq_etop_mdio_cleanup(struct net_device *dev) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - - phy_disconnect(priv->phydev); - mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); - mdiobus_free(priv->mii_bus); -} - -static int -ltq_etop_open(struct net_device *dev) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - int i; - - for (i = 0; i < MAX_DMA_CHAN; i++) { - struct ltq_etop_chan *ch = &priv->ch[i]; - - if (!IS_TX(i) && (!IS_RX(i))) - continue; - ltq_dma_open(&ch->dma); - napi_enable(&ch->napi); - } - phy_start(priv->phydev); - netif_tx_start_all_queues(dev); - return 0; -} - -static int -ltq_etop_stop(struct net_device *dev) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - int i; - - netif_tx_stop_all_queues(dev); - phy_stop(priv->phydev); - for (i = 0; i < MAX_DMA_CHAN; i++) { - struct ltq_etop_chan *ch = &priv->ch[i]; - - if (!IS_RX(i) && !IS_TX(i)) - continue; - napi_disable(&ch->napi); - ltq_dma_close(&ch->dma); - } - return 0; -} - -static int -ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) -{ - int queue = skb_get_queue_mapping(skb); - struct netdev_queue *txq = netdev_get_tx_queue(dev, queue); - struct ltq_etop_priv *priv = netdev_priv(dev); - struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1]; - struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; - int len; - unsigned long flags; - u32 byte_offset; - - len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; - - if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { - dev_kfree_skb_any(skb); - netdev_err(dev, "tx ring full\n"); - netif_tx_stop_queue(txq); - return NETDEV_TX_BUSY; - } - - /* dma needs to start on a 16 byte aligned address */ - byte_offset = CPHYSADDR(skb->data) % 16; - ch->skb[ch->dma.desc] = skb; - - dev->trans_start = jiffies; - - spin_lock_irqsave(&priv->lock, flags); - desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len, - DMA_TO_DEVICE)) - byte_offset; - wmb(); - desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | - LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK); - ch->dma.desc++; - ch->dma.desc %= LTQ_DESC_NUM; - spin_unlock_irqrestore(&priv->lock, flags); - - if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN) - netif_tx_stop_queue(txq); - - return NETDEV_TX_OK; -} - -static int -ltq_etop_change_mtu(struct net_device *dev, int new_mtu) -{ - int ret = eth_change_mtu(dev, new_mtu); - - if (!ret) { - struct ltq_etop_priv *priv = netdev_priv(dev); - unsigned long flags; - - spin_lock_irqsave(&priv->lock, flags); - ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, - LTQ_ETOP_IGPLEN); - spin_unlock_irqrestore(&priv->lock, flags); - } - return ret; -} - -static int -ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - - /* TODO: mii-toll reports "No MII transceiver present!." ?!*/ - return phy_mii_ioctl(priv->phydev, rq, cmd); -} - -static int -ltq_etop_set_mac_address(struct net_device *dev, void *p) -{ - int ret = eth_mac_addr(dev, p); - - if (!ret) { - struct ltq_etop_priv *priv = netdev_priv(dev); - unsigned long flags; - - /* store the mac for the unicast filter */ - spin_lock_irqsave(&priv->lock, flags); - ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0); - ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16, - LTQ_ETOP_MAC_DA1); - spin_unlock_irqrestore(&priv->lock, flags); - } - return ret; -} - -static void -ltq_etop_set_multicast_list(struct net_device *dev) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - unsigned long flags; - - /* ensure that the unicast filter is not enabled in promiscious mode */ - spin_lock_irqsave(&priv->lock, flags); - if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) - ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0); - else - ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0); - spin_unlock_irqrestore(&priv->lock, flags); -} - -static u16 -ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) -{ - /* we are currently only using the first queue */ - return 0; -} - -static int -ltq_etop_init(struct net_device *dev) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - struct sockaddr mac; - int err; - - ether_setup(dev); - dev->watchdog_timeo = 10 * HZ; - err = ltq_etop_hw_init(dev); - if (err) - goto err_hw; - ltq_etop_change_mtu(dev, 1500); - - memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr)); - if (!is_valid_ether_addr(mac.sa_data)) { - pr_warn("etop: invalid MAC, using random\n"); - random_ether_addr(mac.sa_data); - } - - err = ltq_etop_set_mac_address(dev, &mac); - if (err) - goto err_netdev; - ltq_etop_set_multicast_list(dev); - err = ltq_etop_mdio_init(dev); - if (err) - goto err_netdev; - return 0; - -err_netdev: - unregister_netdev(dev); - free_netdev(dev); -err_hw: - ltq_etop_hw_exit(dev); - return err; -} - -static void -ltq_etop_tx_timeout(struct net_device *dev) -{ - int err; - - ltq_etop_hw_exit(dev); - err = ltq_etop_hw_init(dev); - if (err) - goto err_hw; - dev->trans_start = jiffies; - netif_wake_queue(dev); - return; - -err_hw: - ltq_etop_hw_exit(dev); - netdev_err(dev, "failed to restart etop after TX timeout\n"); -} - -static const struct net_device_ops ltq_eth_netdev_ops = { - .ndo_open = ltq_etop_open, - .ndo_stop = ltq_etop_stop, - .ndo_start_xmit = ltq_etop_tx, - .ndo_change_mtu = ltq_etop_change_mtu, - .ndo_do_ioctl = ltq_etop_ioctl, - .ndo_set_mac_address = ltq_etop_set_mac_address, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_multicast_list = ltq_etop_set_multicast_list, - .ndo_select_queue = ltq_etop_select_queue, - .ndo_init = ltq_etop_init, - .ndo_tx_timeout = ltq_etop_tx_timeout, -}; - -static int __init -ltq_etop_probe(struct platform_device *pdev) -{ - struct net_device *dev; - struct ltq_etop_priv *priv; - struct resource *res; - int err; - int i; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "failed to get etop resource\n"); - err = -ENOENT; - goto err_out; - } - - res = devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), dev_name(&pdev->dev)); - if (!res) { - dev_err(&pdev->dev, "failed to request etop resource\n"); - err = -EBUSY; - goto err_out; - } - - ltq_etop_membase = devm_ioremap_nocache(&pdev->dev, - res->start, resource_size(res)); - if (!ltq_etop_membase) { - dev_err(&pdev->dev, "failed to remap etop engine %d\n", - pdev->id); - err = -ENOMEM; - goto err_out; - } - - dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4); - strcpy(dev->name, "eth%d"); - dev->netdev_ops = <q_eth_netdev_ops; - dev->ethtool_ops = <q_etop_ethtool_ops; - priv = netdev_priv(dev); - priv->res = res; - priv->pldata = dev_get_platdata(&pdev->dev); - priv->netdev = dev; - spin_lock_init(&priv->lock); - - for (i = 0; i < MAX_DMA_CHAN; i++) { - if (IS_TX(i)) - netif_napi_add(dev, &priv->ch[i].napi, - ltq_etop_poll_tx, 8); - else if (IS_RX(i)) - netif_napi_add(dev, &priv->ch[i].napi, - ltq_etop_poll_rx, 32); - priv->ch[i].netdev = dev; - } - - err = register_netdev(dev); - if (err) - goto err_free; - - platform_set_drvdata(pdev, dev); - return 0; - -err_free: - kfree(dev); -err_out: - return err; -} - -static int __devexit -ltq_etop_remove(struct platform_device *pdev) -{ - struct net_device *dev = platform_get_drvdata(pdev); - - if (dev) { - netif_tx_stop_all_queues(dev); - ltq_etop_hw_exit(dev); - ltq_etop_mdio_cleanup(dev); - unregister_netdev(dev); - } - return 0; -} - -static struct platform_driver ltq_mii_driver = { - .remove = __devexit_p(ltq_etop_remove), - .driver = { - .name = "ltq_etop", - .owner = THIS_MODULE, - }, -}; - -int __init -init_ltq_etop(void) -{ - int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe); - - if (ret) - pr_err("ltq_etop: Error registering platfom driver!"); - return ret; -} - -static void __exit -exit_ltq_etop(void) -{ - platform_driver_unregister(<q_mii_driver); -} - -module_init(init_ltq_etop); -module_exit(exit_ltq_etop); - -MODULE_AUTHOR("John Crispin "); -MODULE_DESCRIPTION("Lantiq SoC ETOP"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/net/mii.c b/trunk/drivers/net/mii.c index d4fc00b1ff93..0a6c6a2e7550 100644 --- a/trunk/drivers/net/mii.c +++ b/trunk/drivers/net/mii.c @@ -49,10 +49,6 @@ static u32 mii_get_an(struct mii_if_info *mii, u16 addr) result |= ADVERTISED_100baseT_Half; if (advert & ADVERTISE_100FULL) result |= ADVERTISED_100baseT_Full; - if (advert & ADVERTISE_PAUSE_CAP) - result |= ADVERTISED_Pause; - if (advert & ADVERTISE_PAUSE_ASYM) - result |= ADVERTISED_Asym_Pause; return result; } diff --git a/trunk/drivers/net/ne-h8300.c b/trunk/drivers/net/ne-h8300.c index 7298a34bc795..30be8c634ebd 100644 --- a/trunk/drivers/net/ne-h8300.c +++ b/trunk/drivers/net/ne-h8300.c @@ -167,7 +167,7 @@ static void cleanup_card(struct net_device *dev) #ifndef MODULE struct net_device * __init ne_probe(int unit) { - struct net_device *dev = ____alloc_ei_netdev(0); + struct net_device *dev = alloc_ei_netdev(); int err; if (!dev) @@ -197,15 +197,15 @@ static const struct net_device_ops ne_netdev_ops = { .ndo_open = ne_open, .ndo_stop = ne_close, - .ndo_start_xmit = __ei_start_xmit, - .ndo_tx_timeout = __ei_tx_timeout, - .ndo_get_stats = __ei_get_stats, - .ndo_set_multicast_list = __ei_set_multicast_list, + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_multicast_list = ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = __ei_poll, + .ndo_poll_controller = ei_poll, #endif }; @@ -637,7 +637,7 @@ int init_module(void) int err; for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - struct net_device *dev = ____alloc_ei_netdev(0); + struct net_device *dev = alloc_ei_netdev(); if (!dev) break; if (io[this_dev]) { diff --git a/trunk/drivers/net/netconsole.c b/trunk/drivers/net/netconsole.c index eb41e44921e6..dfb67eb2a94b 100644 --- a/trunk/drivers/net/netconsole.c +++ b/trunk/drivers/net/netconsole.c @@ -671,7 +671,6 @@ static int netconsole_netdev_event(struct notifier_block *this, goto done; spin_lock_irqsave(&target_list_lock, flags); -restart: list_for_each_entry(nt, &target_list, list) { netconsole_target_get(nt); if (nt->np.dev == dev) { @@ -684,16 +683,9 @@ static int netconsole_netdev_event(struct notifier_block *this, * rtnl_lock already held */ if (nt->np.dev) { - spin_unlock_irqrestore( - &target_list_lock, - flags); __netpoll_cleanup(&nt->np); - spin_lock_irqsave(&target_list_lock, - flags); dev_put(nt->np.dev); nt->np.dev = NULL; - netconsole_target_put(nt); - goto restart; } /* Fall through */ case NETDEV_GOING_DOWN: diff --git a/trunk/drivers/net/pch_gbe/pch_gbe_main.c b/trunk/drivers/net/pch_gbe/pch_gbe_main.c index 56d049a472da..2ef2f9cdefa6 100644 --- a/trunk/drivers/net/pch_gbe/pch_gbe_main.c +++ b/trunk/drivers/net/pch_gbe/pch_gbe_main.c @@ -34,10 +34,6 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_COPYBREAK_DEFAULT 256 #define PCH_GBE_PCI_BAR 1 -/* Macros for ML7223 */ -#define PCI_VENDOR_ID_ROHM 0x10db -#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 - #define PCH_GBE_TX_WEIGHT 64 #define PCH_GBE_RX_WEIGHT 64 #define PCH_GBE_RX_BUFFER_WRITE 16 @@ -47,7 +43,8 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \ PCH_GBE_CHIP_TYPE_INTERNAL | \ - PCH_GBE_RGMII_MODE_RGMII \ + PCH_GBE_RGMII_MODE_RGMII | \ + PCH_GBE_CRS_SEL \ ) /* Ethertype field values */ @@ -1497,11 +1494,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, /* Write meta date of skb */ skb_put(skb, length); skb->protocol = eth_type_trans(skb, netdev); - if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) - skb->ip_summed = CHECKSUM_NONE; - else + if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) == + PCH_GBE_RXD_ACC_STAT_TCPIPOK) { skb->ip_summed = CHECKSUM_UNNECESSARY; - + } else { + skb->ip_summed = CHECKSUM_NONE; + } napi_gro_receive(&adapter->napi, skb); (*work_done)++; pr_debug("Receive skb->ip_summed: %d length: %d\n", @@ -2422,13 +2420,6 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = (0xFFFF00) }, - {.vendor = PCI_VENDOR_ID_ROHM, - .device = PCI_DEVICE_ID_ROHM_ML7223_GBE, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .class = (PCI_CLASS_NETWORK_ETHERNET << 8), - .class_mask = (0xFFFF00) - }, /* required last entry */ {0} }; diff --git a/trunk/drivers/net/r8169.c b/trunk/drivers/net/r8169.c index 397c36810a15..493b0de3848b 100644 --- a/trunk/drivers/net/r8169.c +++ b/trunk/drivers/net/r8169.c @@ -170,16 +170,6 @@ static const struct { }; #undef _R -static const struct rtl_firmware_info { - int mac_version; - const char *fw_name; -} rtl_firmware_infos[] = { - { .mac_version = RTL_GIGA_MAC_VER_25, .fw_name = FIRMWARE_8168D_1 }, - { .mac_version = RTL_GIGA_MAC_VER_26, .fw_name = FIRMWARE_8168D_2 }, - { .mac_version = RTL_GIGA_MAC_VER_29, .fw_name = FIRMWARE_8105E_1 }, - { .mac_version = RTL_GIGA_MAC_VER_30, .fw_name = FIRMWARE_8105E_1 } -}; - enum cfg_version { RTL_CFG_0 = 0x00, RTL_CFG_1, @@ -575,7 +565,6 @@ struct rtl8169_private { u32 saved_wolopts; const struct firmware *fw; -#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN); }; MODULE_AUTHOR("Realtek and the Linux r8169 crew "); @@ -1800,26 +1789,25 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw) static void rtl_release_firmware(struct rtl8169_private *tp) { - if (!IS_ERR_OR_NULL(tp->fw)) - release_firmware(tp->fw); - tp->fw = RTL_FIRMWARE_UNKNOWN; + release_firmware(tp->fw); + tp->fw = NULL; } -static void rtl_apply_firmware(struct rtl8169_private *tp) +static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name) { - const struct firmware *fw = tp->fw; + const struct firmware **fw = &tp->fw; + int rc = !*fw; - /* TODO: release firmware once rtl_phy_write_fw signals failures. */ - if (!IS_ERR_OR_NULL(fw)) - rtl_phy_write_fw(tp, fw); -} + if (rc) { + rc = request_firmware(fw, fw_name, &tp->pci_dev->dev); + if (rc < 0) + goto out; + } -static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) -{ - if (rtl_readphy(tp, reg) != val) - netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n"); - else - rtl_apply_firmware(tp); + /* TODO: release firmware once rtl_phy_write_fw signals failures. */ + rtl_phy_write_fw(tp, *fw); +out: + return rc; } static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) @@ -2258,8 +2246,10 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x001b); - - rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00); + if ((rtl_readphy(tp, 0x06) != 0xbf00) || + (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) { + netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); + } rtl_writephy(tp, 0x1f, 0x0000); } @@ -2361,8 +2351,10 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0005); rtl_writephy(tp, 0x05, 0x001b); - - rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300); + if ((rtl_readphy(tp, 0x06) != 0xb300) || + (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) { + netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); + } rtl_writephy(tp, 0x1f, 0x0000); } @@ -2482,7 +2474,8 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x18, 0x0310); msleep(100); - rtl_apply_firmware(tp); + if (rtl_apply_firmware(tp, FIRMWARE_8105E_1) < 0) + netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); } @@ -3244,8 +3237,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->timer.data = (unsigned long) dev; tp->timer.function = rtl8169_phy_timer; - tp->fw = RTL_FIRMWARE_UNKNOWN; - rc = register_netdev(dev); if (rc < 0) goto err_out_msi_4; @@ -3297,10 +3288,10 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) cancel_delayed_work_sync(&tp->task); - unregister_netdev(dev); - rtl_release_firmware(tp); + unregister_netdev(dev); + if (pci_dev_run_wake(pdev)) pm_runtime_get_noresume(&pdev->dev); @@ -3312,37 +3303,6 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } -static void rtl_request_firmware(struct rtl8169_private *tp) -{ - int i; - - /* Return early if the firmware is already loaded / cached. */ - if (!IS_ERR(tp->fw)) - goto out; - - for (i = 0; i < ARRAY_SIZE(rtl_firmware_infos); i++) { - const struct rtl_firmware_info *info = rtl_firmware_infos + i; - - if (info->mac_version == tp->mac_version) { - const char *name = info->fw_name; - int rc; - - rc = request_firmware(&tp->fw, name, &tp->pci_dev->dev); - if (rc < 0) { - netif_warn(tp, ifup, tp->dev, "unable to load " - "firmware patch %s (%d)\n", name, rc); - goto out_disable_request_firmware; - } - goto out; - } - } - -out_disable_request_firmware: - tp->fw = NULL; -out: - return; -} - static int rtl8169_open(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -3374,13 +3334,11 @@ static int rtl8169_open(struct net_device *dev) smp_mb(); - rtl_request_firmware(tp); - retval = request_irq(dev->irq, rtl8169_interrupt, (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, dev->name, dev); if (retval < 0) - goto err_release_fw_2; + goto err_release_ring_2; napi_enable(&tp->napi); @@ -3401,8 +3359,7 @@ static int rtl8169_open(struct net_device *dev) out: return retval; -err_release_fw_2: - rtl_release_firmware(tp); +err_release_ring_2: rtl8169_rx_clear(tp); err_free_rx_1: dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, diff --git a/trunk/drivers/net/sfc/mcdi.c b/trunk/drivers/net/sfc/mcdi.c index 3dd45ed61f0a..d98479030ef2 100644 --- a/trunk/drivers/net/sfc/mcdi.c +++ b/trunk/drivers/net/sfc/mcdi.c @@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) return &nic_data->mcdi; } -static inline void -efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) -{ - struct siena_nic_data *nic_data = efx->nic_data; - value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); -} - -static inline void -efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) -{ - struct siena_nic_data *nic_data = efx->nic_data; - __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); -} - void efx_mcdi_init(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; @@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - unsigned pdu = MCDI_PDU(efx); - unsigned doorbell = MCDI_DOORBELL(efx); + unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); unsigned int i; efx_dword_t hdr; u32 xflags, seqno; @@ -106,28 +92,30 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, MCDI_HEADER_SEQ, seqno, MCDI_HEADER_XFLAGS, xflags); - efx_mcdi_writed(efx, &hdr, pdu); + efx_writed(efx, &hdr, pdu); - for (i = 0; i < inlen; i += 4) - efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), - pdu + 4 + i); + for (i = 0; i < inlen; i += 4) { + _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); + /* use wmb() within loop to inhibit write combining */ + wmb(); + } /* ring the doorbell with a distinctive value */ - EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); - efx_mcdi_writed(efx, &hdr, doorbell); + _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); + wmb(); } static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - unsigned int pdu = MCDI_PDU(efx); + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); int i; BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); BUG_ON(outlen & 3 || outlen >= 0x100); for (i = 0; i < outlen; i += 4) - efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); + *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); } static int efx_mcdi_poll(struct efx_nic *efx) @@ -135,7 +123,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned int time, finish; unsigned int respseq, respcmd, error; - unsigned int pdu = MCDI_PDU(efx); + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); unsigned int rc, spins; efx_dword_t reg; @@ -161,7 +149,8 @@ static int efx_mcdi_poll(struct efx_nic *efx) time = get_seconds(); - efx_mcdi_readd(efx, ®, pdu); + rmb(); + efx_readd(efx, ®, pdu); /* All 1's indicates that shared memory is in reset (and is * not a valid header). Wait for it to come out reset before @@ -188,7 +177,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) respseq, mcdi->seqno); rc = EIO; } else if (error) { - efx_mcdi_readd(efx, ®, pdu + 4); + efx_readd(efx, ®, pdu + 4); switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { #define TRANSLATE_ERROR(name) \ case MC_CMD_ERR_ ## name: \ @@ -222,21 +211,21 @@ static int efx_mcdi_poll(struct efx_nic *efx) /* Test and clear MC-rebooted flag for this port/function */ int efx_mcdi_poll_reboot(struct efx_nic *efx) { - unsigned int addr = MCDI_REBOOT_FLAG(efx); + unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); efx_dword_t reg; uint32_t value; if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) return false; - efx_mcdi_readd(efx, ®, addr); + efx_readd(efx, ®, addr); value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); if (value == 0) return 0; EFX_ZERO_DWORD(reg); - efx_mcdi_writed(efx, ®, addr); + efx_writed(efx, ®, addr); if (value == MC_STATUS_DWORD_ASSERT) return -EINTR; diff --git a/trunk/drivers/net/sfc/nic.c b/trunk/drivers/net/sfc/nic.c index 9b29a8d7c449..10f1cb79c147 100644 --- a/trunk/drivers/net/sfc/nic.c +++ b/trunk/drivers/net/sfc/nic.c @@ -1937,13 +1937,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) size = min_t(size_t, table->step, 16); - if (table->offset >= efx->type->mem_map_size) { - /* No longer mapped; return dummy data */ - memcpy(buf, "\xde\xc0\xad\xde", 4); - buf += table->rows * size; - continue; - } - for (i = 0; i < table->rows; i++) { switch (table->step) { case 4: /* 32-bit register or SRAM */ diff --git a/trunk/drivers/net/sfc/nic.h b/trunk/drivers/net/sfc/nic.h index d91701abd331..a42db6e35be3 100644 --- a/trunk/drivers/net/sfc/nic.h +++ b/trunk/drivers/net/sfc/nic.h @@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx) /** * struct siena_nic_data - Siena NIC state * @mcdi: Management-Controller-to-Driver Interface - * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable. * @wol_filter_id: Wake-on-LAN packet filter id */ struct siena_nic_data { struct efx_mcdi_iface mcdi; - void __iomem *mcdi_smem; int wol_filter_id; }; diff --git a/trunk/drivers/net/sfc/siena.c b/trunk/drivers/net/sfc/siena.c index 837869b71db9..e4dd8986b1fe 100644 --- a/trunk/drivers/net/sfc/siena.c +++ b/trunk/drivers/net/sfc/siena.c @@ -220,26 +220,12 @@ static int siena_probe_nic(struct efx_nic *efx) efx_reado(efx, ®, FR_AZ_CS_DEBUG); efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; - /* Initialise MCDI */ - nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + - FR_CZ_MC_TREG_SMEM, - FR_CZ_MC_TREG_SMEM_STEP * - FR_CZ_MC_TREG_SMEM_ROWS); - if (!nic_data->mcdi_smem) { - netif_err(efx, probe, efx->net_dev, - "could not map MCDI at %llx+%x\n", - (unsigned long long)efx->membase_phys + - FR_CZ_MC_TREG_SMEM, - FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); - rc = -ENOMEM; - goto fail1; - } efx_mcdi_init(efx); /* Recover from a failed assertion before probing */ rc = efx_mcdi_handle_assertion(efx); if (rc) - goto fail2; + goto fail1; /* Let the BMC know that the driver is now in charge of link and * filter settings. We must do this before we reset the NIC */ @@ -294,7 +280,6 @@ static int siena_probe_nic(struct efx_nic *efx) fail3: efx_mcdi_drv_attach(efx, false, NULL); fail2: - iounmap(nic_data->mcdi_smem); fail1: kfree(efx->nic_data); return rc; @@ -374,8 +359,6 @@ static int siena_init_nic(struct efx_nic *efx) static void siena_remove_nic(struct efx_nic *efx) { - struct siena_nic_data *nic_data = efx->nic_data; - efx_nic_free_buffer(efx, &efx->irq_status); siena_reset_hw(efx, RESET_TYPE_ALL); @@ -385,8 +368,7 @@ static void siena_remove_nic(struct efx_nic *efx) efx_mcdi_drv_attach(efx, false, NULL); /* Tear down the private nic state */ - iounmap(nic_data->mcdi_smem); - kfree(nic_data); + kfree(efx->nic_data); efx->nic_data = NULL; } @@ -624,7 +606,8 @@ struct efx_nic_type siena_a0_nic_type = { .default_mac_ops = &efx_mcdi_mac_operations, .revision = EFX_REV_SIENA_A0, - .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ + .mem_map_size = (FR_CZ_MC_TREG_SMEM + + FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, .buf_tbl_base = FR_BZ_BUF_FULL_TBL, diff --git a/trunk/drivers/net/slip.c b/trunk/drivers/net/slip.c index 8ec1a9a0bb9a..86cbb9ea2f26 100644 --- a/trunk/drivers/net/slip.c +++ b/trunk/drivers/net/slip.c @@ -853,9 +853,7 @@ static int slip_open(struct tty_struct *tty) /* Done. We have linked the TTY line to a channel. */ rtnl_unlock(); tty->receive_room = 65536; /* We don't flow control */ - - /* TTY layer expects 0 on success */ - return 0; + return sl->dev->base_addr; err_free_bufs: sl_free_bufs(sl); diff --git a/trunk/drivers/net/sunhme.c b/trunk/drivers/net/sunhme.c index bff2f7999ff0..eb4f59fb01e9 100644 --- a/trunk/drivers/net/sunhme.c +++ b/trunk/drivers/net/sunhme.c @@ -3237,18 +3237,15 @@ static void happy_meal_pci_exit(void) #endif #ifdef CONFIG_SBUS -static const struct of_device_id hme_sbus_match[]; static int __devinit hme_sbus_probe(struct platform_device *op) { - const struct of_device_id *match; struct device_node *dp = op->dev.of_node; const char *model = of_get_property(dp, "model", NULL); int is_qfe; - match = of_match_device(hme_sbus_match, &op->dev); - if (!match) + if (!op->dev.of_match) return -EINVAL; - is_qfe = (match->data != NULL); + is_qfe = (op->dev.of_match->data != NULL); if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) is_qfe = 1; diff --git a/trunk/drivers/net/tg3.c b/trunk/drivers/net/tg3.c index 7a5daefb6f33..b8c5f35577e4 100644 --- a/trunk/drivers/net/tg3.c +++ b/trunk/drivers/net/tg3.c @@ -12327,10 +12327,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) if (val & VCPU_CFGSHDW_ASPM_DBNC) tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; if ((val & VCPU_CFGSHDW_WOL_ENABLE) && - (val & VCPU_CFGSHDW_WOL_MAGPKT)) { + (val & VCPU_CFGSHDW_WOL_MAGPKT)) tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; - device_set_wakeup_enable(&tp->pdev->dev, true); - } goto done; } @@ -12463,10 +12461,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && - (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { + (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; - device_set_wakeup_enable(&tp->pdev->dev, true); - } if (cfg2 & (1 << 17)) tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; diff --git a/trunk/drivers/net/usb/cdc_ether.c b/trunk/drivers/net/usb/cdc_ether.c index c924ea2bce07..341f7056a800 100644 --- a/trunk/drivers/net/usb/cdc_ether.c +++ b/trunk/drivers/net/usb/cdc_ether.c @@ -460,7 +460,7 @@ static const struct driver_info cdc_info = { .manage_power = cdc_manage_power, }; -static const struct driver_info wwan_info = { +static const struct driver_info mbm_info = { .description = "Mobile Broadband Network Device", .flags = FLAG_WWAN, .bind = usbnet_cdc_bind, @@ -471,7 +471,6 @@ static const struct driver_info wwan_info = { /*-------------------------------------------------------------------------*/ -#define HUAWEI_VENDOR_ID 0x12D1 static const struct usb_device_id products [] = { /* @@ -567,7 +566,7 @@ static const struct usb_device_id products [] = { { USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = (unsigned long)&wwan_info, + .driver_info = 0, }, /* @@ -588,17 +587,8 @@ static const struct usb_device_id products [] = { }, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), - .driver_info = (unsigned long)&wwan_info, + .driver_info = (unsigned long)&mbm_info, -}, { - /* Various Huawei modems with a network port like the UMG1831 */ - .match_flags = USB_DEVICE_ID_MATCH_VENDOR - | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = HUAWEI_VENDOR_ID, - .bInterfaceClass = USB_CLASS_COMM, - .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, - .bInterfaceProtocol = 255, - .driver_info = (unsigned long)&wwan_info, }, { }, // END }; diff --git a/trunk/drivers/net/usb/cdc_ncm.c b/trunk/drivers/net/usb/cdc_ncm.c index 1033ef6476a4..967371f04454 100644 --- a/trunk/drivers/net/usb/cdc_ncm.c +++ b/trunk/drivers/net/usb/cdc_ncm.c @@ -54,13 +54,13 @@ #include #include -#define DRIVER_VERSION "23-Apr-2011" +#define DRIVER_VERSION "7-Feb-2011" /* CDC NCM subclass 3.2.1 */ #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 /* Maximum NTB length */ -#define CDC_NCM_NTB_MAX_SIZE_TX (16384 + 4) /* bytes, must be short terminated */ +#define CDC_NCM_NTB_MAX_SIZE_TX 16384 /* bytes */ #define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */ /* Minimum value for MaxDatagramSize, ch. 6.2.9 */ diff --git a/trunk/drivers/net/usb/ipheth.c b/trunk/drivers/net/usb/ipheth.c index 81126ff85e05..7d42f9a2c068 100644 --- a/trunk/drivers/net/usb/ipheth.c +++ b/trunk/drivers/net/usb/ipheth.c @@ -65,7 +65,6 @@ #define IPHETH_USBINTF_PROTO 1 #define IPHETH_BUF_SIZE 1516 -#define IPHETH_IP_ALIGN 2 /* padding at front of URB */ #define IPHETH_TX_TIMEOUT (5 * HZ) #define IPHETH_INTFNUM 2 @@ -203,21 +202,18 @@ static void ipheth_rcvbulk_callback(struct urb *urb) return; } - if (urb->actual_length <= IPHETH_IP_ALIGN) { - dev->net->stats.rx_length_errors++; - return; - } - len = urb->actual_length - IPHETH_IP_ALIGN; - buf = urb->transfer_buffer + IPHETH_IP_ALIGN; + len = urb->actual_length; + buf = urb->transfer_buffer; - skb = dev_alloc_skb(len); + skb = dev_alloc_skb(NET_IP_ALIGN + len); if (!skb) { err("%s: dev_alloc_skb: -ENOMEM", __func__); dev->net->stats.rx_dropped++; return; } - memcpy(skb_put(skb, len), buf, len); + skb_reserve(skb, NET_IP_ALIGN); + memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN); skb->dev = dev->net; skb->protocol = eth_type_trans(skb, dev->net); diff --git a/trunk/drivers/net/usb/smsc95xx.c b/trunk/drivers/net/usb/smsc95xx.c index 48d4efdb4959..47a6c870b51f 100644 --- a/trunk/drivers/net/usb/smsc95xx.c +++ b/trunk/drivers/net/usb/smsc95xx.c @@ -730,7 +730,7 @@ static int smsc95xx_phy_initialize(struct usbnet *dev) msleep(10); bmcr = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); timeout++; - } while ((bmcr & BMCR_RESET) && (timeout < 100)); + } while ((bmcr & MII_BMCR) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout on PHY Reset"); diff --git a/trunk/drivers/net/usb/usbnet.c b/trunk/drivers/net/usb/usbnet.c index 9ab439d144ed..069c1cf0fdf7 100644 --- a/trunk/drivers/net/usb/usbnet.c +++ b/trunk/drivers/net/usb/usbnet.c @@ -645,7 +645,6 @@ int usbnet_stop (struct net_device *net) struct driver_info *info = dev->driver_info; int retval; - clear_bit(EVENT_DEV_OPEN, &dev->flags); netif_stop_queue (net); netif_info(dev, ifdown, dev->net, @@ -737,7 +736,6 @@ int usbnet_open (struct net_device *net) } } - set_bit(EVENT_DEV_OPEN, &dev->flags); netif_start_queue (net); netif_info(dev, ifup, dev->net, "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n", @@ -1261,9 +1259,6 @@ void usbnet_disconnect (struct usb_interface *intf) if (dev->driver_info->unbind) dev->driver_info->unbind (dev, intf); - usb_kill_urb(dev->interrupt); - usb_free_urb(dev->interrupt); - free_netdev(net); usb_put_dev (xdev); } @@ -1503,10 +1498,6 @@ int usbnet_resume (struct usb_interface *intf) int retval; if (!--dev->suspend_count) { - /* resume interrupt URBs */ - if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags)) - usb_submit_urb(dev->interrupt, GFP_NOIO); - spin_lock_irq(&dev->txq.lock); while ((res = usb_get_from_anchor(&dev->deferred))) { @@ -1525,12 +1516,9 @@ int usbnet_resume (struct usb_interface *intf) smp_mb(); clear_bit(EVENT_DEV_ASLEEP, &dev->flags); spin_unlock_irq(&dev->txq.lock); - - if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { - if (!(dev->txq.qlen >= TX_QLEN(dev))) - netif_start_queue(dev->net); - tasklet_schedule (&dev->bh); - } + if (!(dev->txq.qlen >= TX_QLEN(dev))) + netif_start_queue(dev->net); + tasklet_schedule (&dev->bh); } return 0; } diff --git a/trunk/drivers/net/veth.c b/trunk/drivers/net/veth.c index 3b99f64104fd..2de9b90c5f8f 100644 --- a/trunk/drivers/net/veth.c +++ b/trunk/drivers/net/veth.c @@ -403,17 +403,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (tb[IFLA_ADDRESS] == NULL) random_ether_addr(dev->dev_addr); - if (tb[IFLA_IFNAME]) - nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); - else - snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); - - if (strchr(dev->name, '%')) { - err = dev_alloc_name(dev, dev->name); - if (err < 0) - goto err_alloc_name; - } - err = register_netdevice(dev); if (err < 0) goto err_register_dev; @@ -433,7 +422,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, err_register_dev: /* nothing to do */ -err_alloc_name: err_configure_peer: unregister_netdevice(peer); return err; diff --git a/trunk/drivers/net/vmxnet3/vmxnet3_drv.c b/trunk/drivers/net/vmxnet3/vmxnet3_drv.c index c16ed961153a..0d47c3a05307 100644 --- a/trunk/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/trunk/drivers/net/vmxnet3/vmxnet3_drv.c @@ -178,7 +178,6 @@ static void vmxnet3_process_events(struct vmxnet3_adapter *adapter) { int i; - unsigned long flags; u32 events = le32_to_cpu(adapter->shared->ecr); if (!events) return; @@ -191,10 +190,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) /* Check if there is an error on xmit/recv queues */ if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { - spin_lock_irqsave(&adapter->cmd_lock, flags); + spin_lock(&adapter->cmd_lock); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS); - spin_unlock_irqrestore(&adapter->cmd_lock, flags); + spin_unlock(&adapter->cmd_lock); for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tqd_start[i].status.stopped) @@ -2734,14 +2733,13 @@ static void vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) { u32 cfg; - unsigned long flags; /* intr settings */ - spin_lock_irqsave(&adapter->cmd_lock, flags); + spin_lock(&adapter->cmd_lock); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_CONF_INTR); cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); - spin_unlock_irqrestore(&adapter->cmd_lock, flags); + spin_unlock(&adapter->cmd_lock); adapter->intr.type = cfg & 0x3; adapter->intr.mask_mode = (cfg >> 2) & 0x3; diff --git a/trunk/drivers/net/vmxnet3/vmxnet3_ethtool.c b/trunk/drivers/net/vmxnet3/vmxnet3_ethtool.c index 976467253d20..51f2ef142a5b 100644 --- a/trunk/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/trunk/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -311,9 +311,6 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) /* toggle the LRO feature*/ netdev->features ^= NETIF_F_LRO; - /* Update private LRO flag */ - adapter->lro = lro_requested; - /* update harware LRO capability accordingly */ if (lro_requested) adapter->shared->devRead.misc.uptFeatures |= diff --git a/trunk/drivers/net/wireless/ath/ath9k/main.c b/trunk/drivers/net/wireless/ath/ath9k/main.c index 1482fa650833..17d04ff8d678 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/main.c +++ b/trunk/drivers/net/wireless/ath/ath9k/main.c @@ -2141,8 +2141,6 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) static void ath9k_flush(struct ieee80211_hw *hw, bool drop) { struct ath_softc *sc = hw->priv; - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); int timeout = 200; /* ms */ int i, j; @@ -2151,12 +2149,6 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) cancel_delayed_work_sync(&sc->tx_complete_work); - if (sc->sc_flags & SC_OP_INVALID) { - ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); - mutex_unlock(&sc->mutex); - return; - } - if (drop) timeout = 1; diff --git a/trunk/drivers/net/wireless/ath/ath9k/recv.c b/trunk/drivers/net/wireless/ath/ath9k/recv.c index b29c80def35e..dcd19bc337d1 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/recv.c +++ b/trunk/drivers/net/wireless/ath/ath9k/recv.c @@ -506,7 +506,7 @@ bool ath_stoprecv(struct ath_softc *sc) "confusing the DMA engine when we start RX up\n"); ATH_DBG_WARN_ON_ONCE(!stopped); } - return stopped && !reset; + return stopped || reset; } void ath_flushrecv(struct ath_softc *sc) diff --git a/trunk/drivers/net/wireless/b43/main.c b/trunk/drivers/net/wireless/b43/main.c index 5af40d9170a0..d59b0168c14a 100644 --- a/trunk/drivers/net/wireless/b43/main.c +++ b/trunk/drivers/net/wireless/b43/main.c @@ -72,7 +72,6 @@ MODULE_FIRMWARE("b43/ucode11.fw"); MODULE_FIRMWARE("b43/ucode13.fw"); MODULE_FIRMWARE("b43/ucode14.fw"); MODULE_FIRMWARE("b43/ucode15.fw"); -MODULE_FIRMWARE("b43/ucode16_mimo.fw"); MODULE_FIRMWARE("b43/ucode5.fw"); MODULE_FIRMWARE("b43/ucode9.fw"); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-tx.c index 79ac081832fb..5c40502f869a 100644 --- a/trunk/drivers/net/wireless/iwlegacy/iwl-4965-tx.c +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-4965-tx.c @@ -316,18 +316,12 @@ int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) hdr_len = ieee80211_hdrlen(fc); - /* For management frames use broadcast id to do not break aggregation */ - if (!ieee80211_is_data(fc)) - sta_id = ctx->bcast_sta_id; - else { - /* Find index into station table for destination station */ - sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta); - - if (sta_id == IWL_INVALID_STATION) { - IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", - hdr->addr1); - goto drop_unlock; - } + /* Find index into station table for destination station */ + sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", + hdr->addr1); + goto drop_unlock; } IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); @@ -1133,16 +1127,12 @@ int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { tx_info = &txq->txb[txq->q.read_ptr]; - - if (WARN_ON_ONCE(tx_info->skb == NULL)) - continue; + iwl4965_tx_status(priv, tx_info, + txq_id >= IWL4965_FIRST_AMPDU_QUEUE); hdr = (struct ieee80211_hdr *)tx_info->skb->data; - if (ieee80211_is_data_qos(hdr->frame_control)) + if (hdr && ieee80211_is_data_qos(hdr->frame_control)) nfreed++; - - iwl4965_tx_status(priv, tx_info, - txq_id >= IWL4965_FIRST_AMPDU_QUEUE); tx_info->skb = NULL; priv->cfg->ops->lib->txq_free_tfd(priv, txq); diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-core.c b/trunk/drivers/net/wireless/iwlegacy/iwl-core.c index 42db0fc8b921..c1511b14b239 100644 --- a/trunk/drivers/net/wireless/iwlegacy/iwl-core.c +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-core.c @@ -2155,13 +2155,6 @@ int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed) goto set_ch_out; } - if (priv->iw_mode == NL80211_IFTYPE_ADHOC && - !iwl_legacy_is_channel_ibss(ch_info)) { - IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n"); - ret = -EINVAL; - goto set_ch_out; - } - spin_lock_irqsave(&priv->lock, flags); for_each_context(priv, ctx) { diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-dev.h b/trunk/drivers/net/wireless/iwlegacy/iwl-dev.h index f43ac1eb9014..9ee849d669f3 100644 --- a/trunk/drivers/net/wireless/iwlegacy/iwl-dev.h +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-dev.h @@ -1411,12 +1411,6 @@ iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch) return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; } -static inline int -iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch) -{ - return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0; -} - static inline void __iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page) { diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl-led.c b/trunk/drivers/net/wireless/iwlegacy/iwl-led.c index bda0d61b2c0d..15eb8b707157 100644 --- a/trunk/drivers/net/wireless/iwlegacy/iwl-led.c +++ b/trunk/drivers/net/wireless/iwlegacy/iwl-led.c @@ -48,21 +48,8 @@ module_param(led_mode, int, S_IRUGO); MODULE_PARM_DESC(led_mode, "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking"); -/* Throughput OFF time(ms) ON time (ms) - * >300 25 25 - * >200 to 300 40 40 - * >100 to 200 55 55 - * >70 to 100 65 65 - * >50 to 70 75 75 - * >20 to 50 85 85 - * >10 to 20 95 95 - * >5 to 10 110 110 - * >1 to 5 130 130 - * >0 to 1 167 167 - * <=0 SOLID ON - */ static const struct ieee80211_tpt_blink iwl_blink[] = { - { .throughput = 0, .blink_time = 334 }, + { .throughput = 0 * 1024 - 1, .blink_time = 334 }, { .throughput = 1 * 1024 - 1, .blink_time = 260 }, { .throughput = 5 * 1024 - 1, .blink_time = 220 }, { .throughput = 10 * 1024 - 1, .blink_time = 190 }, @@ -114,11 +101,6 @@ static int iwl_legacy_led_cmd(struct iwl_priv *priv, if (priv->blink_on == on && priv->blink_off == off) return 0; - if (off == 0) { - /* led is SOLID_ON */ - on = IWL_LED_SOLID; - } - IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", priv->cfg->base_params->led_compensation); led_cmd.on = iwl_legacy_blink_compensation(priv, on, diff --git a/trunk/drivers/net/wireless/iwlegacy/iwl4965-base.c b/trunk/drivers/net/wireless/iwlegacy/iwl4965-base.c index a62fe24ee594..d484c3678163 100644 --- a/trunk/drivers/net/wireless/iwlegacy/iwl4965-base.c +++ b/trunk/drivers/net/wireless/iwlegacy/iwl4965-base.c @@ -2984,15 +2984,15 @@ static void iwl4965_bg_txpower_work(struct work_struct *work) struct iwl_priv *priv = container_of(work, struct iwl_priv, txpower_work); - mutex_lock(&priv->mutex); - /* If a scan happened to start before we got here * then just return; the statistics notification will * kick off another scheduled work to compensate for * any temperature delta we missed here. */ if (test_bit(STATUS_EXIT_PENDING, &priv->status) || test_bit(STATUS_SCANNING, &priv->status)) - goto out; + return; + + mutex_lock(&priv->mutex); /* Regardless of if we are associated, we must reconfigure the * TX power since frames can be sent on non-radar channels while @@ -3002,7 +3002,7 @@ static void iwl4965_bg_txpower_work(struct work_struct *work) /* Update last_temperature to keep is_calib_needed from running * when it isn't needed... */ priv->last_temperature = priv->temperature; -out: + mutex_unlock(&priv->mutex); } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index fbbde0712fa5..dfdbea6e8f99 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -335,6 +335,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) struct ieee80211_channel *channel = conf->channel; const struct iwl_channel_info *ch_info; int ret = 0; + bool ht_changed[NUM_IWL_RXON_CTX] = {}; IWL_DEBUG_MAC80211(priv, "changed %#x", changed); @@ -382,8 +383,10 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) for_each_context(priv, ctx) { /* Configure HT40 channels */ - if (ctx->ht.enabled != conf_is_ht(conf)) + if (ctx->ht.enabled != conf_is_ht(conf)) { ctx->ht.enabled = conf_is_ht(conf); + ht_changed[ctx->ctxid] = true; + } if (ctx->ht.enabled) { if (conf_is_ht40_minus(conf)) { @@ -452,6 +455,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) continue; iwlagn_commit_rxon(priv, ctx); + if (ht_changed[ctx->ctxid]) + iwlagn_update_qos(priv, ctx); } out: mutex_unlock(&priv->mutex); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index 0712b67283a4..a709d05c5868 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -568,17 +568,12 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) hdr_len = ieee80211_hdrlen(fc); - /* For management frames use broadcast id to do not break aggregation */ - if (!ieee80211_is_data(fc)) - sta_id = ctx->bcast_sta_id; - else { - /* Find index into station table for destination station */ - sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta); - if (sta_id == IWL_INVALID_STATION) { - IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", - hdr->addr1); - goto drop_unlock; - } + /* Find index into station table for destination station */ + sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", + hdr->addr1); + goto drop_unlock; } IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); @@ -1229,16 +1224,12 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { tx_info = &txq->txb[txq->q.read_ptr]; - - if (WARN_ON_ONCE(tx_info->skb == NULL)) - continue; + iwlagn_tx_status(priv, tx_info, + txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); hdr = (struct ieee80211_hdr *)tx_info->skb->data; - if (ieee80211_is_data_qos(hdr->frame_control)) + if (hdr && ieee80211_is_data_qos(hdr->frame_control)) nfreed++; - - iwlagn_tx_status(priv, tx_info, - txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); tx_info->skb = NULL; if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) diff --git a/trunk/drivers/net/wireless/libertas/cmd.c b/trunk/drivers/net/wireless/libertas/cmd.c index f3ac62431a30..7e8a658b7670 100644 --- a/trunk/drivers/net/wireless/libertas/cmd.c +++ b/trunk/drivers/net/wireless/libertas/cmd.c @@ -1339,8 +1339,8 @@ int lbs_execute_next_command(struct lbs_private *priv) cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { lbs_deb_host( "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); - spin_lock_irqsave(&priv->driver_lock, flags); list_del(&cmdnode->list); + spin_lock_irqsave(&priv->driver_lock, flags); lbs_complete_command(priv, cmdnode, 0); spin_unlock_irqrestore(&priv->driver_lock, flags); @@ -1352,8 +1352,8 @@ int lbs_execute_next_command(struct lbs_private *priv) (priv->psstate == PS_STATE_PRE_SLEEP)) { lbs_deb_host( "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n"); - spin_lock_irqsave(&priv->driver_lock, flags); list_del(&cmdnode->list); + spin_lock_irqsave(&priv->driver_lock, flags); lbs_complete_command(priv, cmdnode, 0); spin_unlock_irqrestore(&priv->driver_lock, flags); priv->needtowakeup = 1; @@ -1366,9 +1366,7 @@ int lbs_execute_next_command(struct lbs_private *priv) "EXEC_NEXT_CMD: sending EXIT_PS\n"); } } - spin_lock_irqsave(&priv->driver_lock, flags); list_del(&cmdnode->list); - spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n", le16_to_cpu(cmd->command)); lbs_submit_command(priv, cmdnode); diff --git a/trunk/drivers/net/zorro8390.c b/trunk/drivers/net/zorro8390.c index 8c7c522a056a..b78a38d9172a 100644 --- a/trunk/drivers/net/zorro8390.c +++ b/trunk/drivers/net/zorro8390.c @@ -126,7 +126,7 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, board = z->resource.start; ioaddr = board+cards[i].offset; - dev = ____alloc_ei_netdev(0); + dev = alloc_ei_netdev(); if (!dev) return -ENOMEM; if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) { @@ -146,15 +146,15 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, static const struct net_device_ops zorro8390_netdev_ops = { .ndo_open = zorro8390_open, .ndo_stop = zorro8390_close, - .ndo_start_xmit = __ei_start_xmit, - .ndo_tx_timeout = __ei_tx_timeout, - .ndo_get_stats = __ei_get_stats, - .ndo_set_multicast_list = __ei_set_multicast_list, + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_multicast_list = ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = __ei_poll, + .ndo_poll_controller = ei_poll, #endif }; diff --git a/trunk/drivers/pci/intel-iommu.c b/trunk/drivers/pci/intel-iommu.c index 6af6b628175b..d552d2c77844 100644 --- a/trunk/drivers/pci/intel-iommu.c +++ b/trunk/drivers/pci/intel-iommu.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include "pci.h" diff --git a/trunk/drivers/pci/iov.c b/trunk/drivers/pci/iov.c index 42fae4776515..553d8ee55c1c 100644 --- a/trunk/drivers/pci/iov.c +++ b/trunk/drivers/pci/iov.c @@ -13,7 +13,6 @@ #include #include #include -#include #include "pci.h" #define VIRTFN_ID_LEN 16 diff --git a/trunk/drivers/pci/pci.h b/trunk/drivers/pci/pci.h index 4020025f854e..a6ec200fe5ee 100644 --- a/trunk/drivers/pci/pci.h +++ b/trunk/drivers/pci/pci.h @@ -250,6 +250,15 @@ struct pci_sriov { u8 __iomem *mstate; /* VF Migration State Array */ }; +/* Address Translation Service */ +struct pci_ats { + int pos; /* capability position */ + int stu; /* Smallest Translation Unit */ + int qdep; /* Invalidate Queue Depth */ + int ref_cnt; /* Physical Function reference count */ + unsigned int is_enabled:1; /* Enable bit is set */ +}; + #ifdef CONFIG_PCI_IOV extern int pci_iov_init(struct pci_dev *dev); extern void pci_iov_release(struct pci_dev *dev); @@ -260,6 +269,19 @@ extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, extern void pci_restore_iov_state(struct pci_dev *dev); extern int pci_iov_bus_range(struct pci_bus *bus); +extern int pci_enable_ats(struct pci_dev *dev, int ps); +extern void pci_disable_ats(struct pci_dev *dev); +extern int pci_ats_queue_depth(struct pci_dev *dev); +/** + * pci_ats_enabled - query the ATS status + * @dev: the PCI device + * + * Returns 1 if ATS capability is enabled, or 0 if not. + */ +static inline int pci_ats_enabled(struct pci_dev *dev) +{ + return dev->ats && dev->ats->is_enabled; +} #else static inline int pci_iov_init(struct pci_dev *dev) { @@ -282,6 +304,21 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) return 0; } +static inline int pci_enable_ats(struct pci_dev *dev, int ps) +{ + return -ENODEV; +} +static inline void pci_disable_ats(struct pci_dev *dev) +{ +} +static inline int pci_ats_queue_depth(struct pci_dev *dev) +{ + return -ENODEV; +} +static inline int pci_ats_enabled(struct pci_dev *dev) +{ + return 0; +} #endif /* CONFIG_PCI_IOV */ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, diff --git a/trunk/drivers/pci/setup-bus.c b/trunk/drivers/pci/setup-bus.c index a806cb321d2e..ebf51ad1b714 100644 --- a/trunk/drivers/pci/setup-bus.c +++ b/trunk/drivers/pci/setup-bus.c @@ -579,7 +579,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, } size0 = calculate_iosize(size, min_size, size1, resource_size(b_res), 4096); - size1 = (!add_head || (add_head && !add_size)) ? size0 : + size1 = !add_size? size0: calculate_iosize(size, min_size+add_size, size1, resource_size(b_res), 4096); if (!size0 && !size1) { @@ -677,7 +677,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, align += aligns[order]; } size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); - size1 = (!add_head || (add_head && !add_size)) ? size0 : + size1 = !add_size ? size : calculate_memsize(size, min_size+add_size, 0, resource_size(b_res), min_align); if (!size0 && !size1) { diff --git a/trunk/drivers/pcmcia/pcmcia_resource.c b/trunk/drivers/pcmcia/pcmcia_resource.c index e8c19def1b0f..fe77e8223841 100644 --- a/trunk/drivers/pcmcia/pcmcia_resource.c +++ b/trunk/drivers/pcmcia/pcmcia_resource.c @@ -173,7 +173,7 @@ static int pcmcia_access_config(struct pcmcia_device *p_dev, c = p_dev->function_config; if (!(c->state & CONFIG_LOCKED)) { - dev_dbg(&p_dev->dev, "Configuration isn't locked\n"); + dev_dbg(&p_dev->dev, "Configuration isn't't locked\n"); mutex_unlock(&s->ops_mutex); return -EACCES; } diff --git a/trunk/drivers/platform/x86/eeepc-laptop.c b/trunk/drivers/platform/x86/eeepc-laptop.c index 2c1abf63957f..5f2dd386152b 100644 --- a/trunk/drivers/platform/x86/eeepc-laptop.c +++ b/trunk/drivers/platform/x86/eeepc-laptop.c @@ -585,9 +585,8 @@ static bool eeepc_wlan_rfkill_blocked(struct eeepc_laptop *eeepc) return true; } -static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) +static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc) { - struct pci_dev *port; struct pci_dev *dev; struct pci_bus *bus; bool blocked = eeepc_wlan_rfkill_blocked(eeepc); @@ -600,16 +599,9 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) mutex_lock(&eeepc->hotplug_lock); if (eeepc->hotplug_slot) { - port = acpi_get_pci_dev(handle); - if (!port) { - pr_warning("Unable to find port\n"); - goto out_unlock; - } - - bus = port->subordinate; - + bus = pci_find_bus(0, 1); if (!bus) { - pr_warning("Unable to find PCI bus?\n"); + pr_warning("Unable to find PCI bus 1?\n"); goto out_unlock; } @@ -617,7 +609,6 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) pr_err("Unable to read PCI config space?\n"); goto out_unlock; } - absent = (l == 0xffffffff); if (blocked != absent) { @@ -656,17 +647,6 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) mutex_unlock(&eeepc->hotplug_lock); } -static void eeepc_rfkill_hotplug_update(struct eeepc_laptop *eeepc, char *node) -{ - acpi_status status = AE_OK; - acpi_handle handle; - - status = acpi_get_handle(NULL, node, &handle); - - if (ACPI_SUCCESS(status)) - eeepc_rfkill_hotplug(eeepc, handle); -} - static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) { struct eeepc_laptop *eeepc = data; @@ -674,7 +654,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) if (event != ACPI_NOTIFY_BUS_CHECK) return; - eeepc_rfkill_hotplug(eeepc, handle); + eeepc_rfkill_hotplug(eeepc); } static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc, @@ -692,11 +672,6 @@ static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc, eeepc); if (ACPI_FAILURE(status)) pr_warning("Failed to register notify on %s\n", node); - /* - * Refresh pci hotplug in case the rfkill state was - * changed during setup. - */ - eeepc_rfkill_hotplug(eeepc, handle); } else return -ENODEV; @@ -718,12 +693,6 @@ static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc, if (ACPI_FAILURE(status)) pr_err("Error removing rfkill notify handler %s\n", node); - /* - * Refresh pci hotplug in case the rfkill - * state was changed after - * eeepc_unregister_rfkill_notifier() - */ - eeepc_rfkill_hotplug(eeepc, handle); } } @@ -847,7 +816,11 @@ static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc) rfkill_destroy(eeepc->wlan_rfkill); eeepc->wlan_rfkill = NULL; } - + /* + * Refresh pci hotplug in case the rfkill state was changed after + * eeepc_unregister_rfkill_notifier() + */ + eeepc_rfkill_hotplug(eeepc); if (eeepc->hotplug_slot) pci_hp_deregister(eeepc->hotplug_slot); @@ -916,6 +889,11 @@ static int eeepc_rfkill_init(struct eeepc_laptop *eeepc) eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5"); eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6"); eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7"); + /* + * Refresh pci hotplug in case the rfkill state was changed during + * setup. + */ + eeepc_rfkill_hotplug(eeepc); exit: if (result && result != -ENODEV) @@ -950,11 +928,8 @@ static int eeepc_hotk_restore(struct device *device) struct eeepc_laptop *eeepc = dev_get_drvdata(device); /* Refresh both wlan rfkill state and pci hotplug */ - if (eeepc->wlan_rfkill) { - eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P5"); - eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P6"); - eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P7"); - } + if (eeepc->wlan_rfkill) + eeepc_rfkill_hotplug(eeepc); if (eeepc->bluetooth_rfkill) rfkill_set_sw_state(eeepc->bluetooth_rfkill, diff --git a/trunk/drivers/platform/x86/sony-laptop.c b/trunk/drivers/platform/x86/sony-laptop.c index 6fe8cd6e23b5..8f709aec4da0 100644 --- a/trunk/drivers/platform/x86/sony-laptop.c +++ b/trunk/drivers/platform/x86/sony-laptop.c @@ -934,14 +934,6 @@ static ssize_t sony_nc_sysfs_store(struct device *dev, /* * Backlight device */ -struct sony_backlight_props { - struct backlight_device *dev; - int handle; - u8 offset; - u8 maxlvl; -}; -struct sony_backlight_props sony_bl_props; - static int sony_backlight_update_status(struct backlight_device *bd) { return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", @@ -962,26 +954,21 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd) { int result; int *handle = (int *)bl_get_data(bd); - struct sony_backlight_props *sdev = - (struct sony_backlight_props *)bl_get_data(bd); - sony_call_snc_handle(sdev->handle, 0x0200, &result); + sony_call_snc_handle(*handle, 0x0200, &result); - return (result & 0xff) - sdev->offset; + return result & 0xff; } static int sony_nc_update_status_ng(struct backlight_device *bd) { int value, result; int *handle = (int *)bl_get_data(bd); - struct sony_backlight_props *sdev = - (struct sony_backlight_props *)bl_get_data(bd); - value = bd->props.brightness + sdev->offset; - if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result)) - return -EIO; + value = bd->props.brightness; + sony_call_snc_handle(*handle, 0x0100 | (value << 16), &result); - return value; + return sony_nc_get_brightness_ng(bd); } static const struct backlight_ops sony_backlight_ops = { @@ -994,6 +981,8 @@ static const struct backlight_ops sony_backlight_ng_ops = { .update_status = sony_nc_update_status_ng, .get_brightness = sony_nc_get_brightness_ng, }; +static int backlight_ng_handle; +static struct backlight_device *sony_backlight_device; /* * New SNC-only Vaios event mapping to driver known keys @@ -1560,75 +1549,6 @@ static void sony_nc_kbd_backlight_resume(void) &ignore); } -static void sony_nc_backlight_ng_read_limits(int handle, - struct sony_backlight_props *props) -{ - int offset; - acpi_status status; - u8 brlvl, i; - u8 min = 0xff, max = 0x00; - struct acpi_object_list params; - union acpi_object in_obj; - union acpi_object *lvl_enum; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - - props->handle = handle; - props->offset = 0; - props->maxlvl = 0xff; - - offset = sony_find_snc_handle(handle); - if (offset < 0) - return; - - /* try to read the boundaries from ACPI tables, if we fail the above - * defaults should be reasonable - */ - params.count = 1; - params.pointer = &in_obj; - in_obj.type = ACPI_TYPE_INTEGER; - in_obj.integer.value = offset; - status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", ¶ms, - &buffer); - if (ACPI_FAILURE(status)) - return; - - lvl_enum = (union acpi_object *) buffer.pointer; - if (!lvl_enum) { - pr_err("No SN06 return object."); - return; - } - if (lvl_enum->type != ACPI_TYPE_BUFFER) { - pr_err("Invalid SN06 return object 0x%.2x\n", - lvl_enum->type); - goto out_invalid; - } - - /* the buffer lists brightness levels available, brightness levels are - * from 0 to 8 in the array, other values are used by ALS control. - */ - for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) { - - brlvl = *(lvl_enum->buffer.pointer + i); - dprintk("Brightness level: %d\n", brlvl); - - if (!brlvl) - break; - - if (brlvl > max) - max = brlvl; - if (brlvl < min) - min = brlvl; - } - props->offset = min; - props->maxlvl = max; - dprintk("Brightness levels: min=%d max=%d\n", props->offset, - props->maxlvl); - -out_invalid: - kfree(buffer.pointer); - return; -} - static void sony_nc_backlight_setup(void) { acpi_handle unused; @@ -1637,14 +1557,14 @@ static void sony_nc_backlight_setup(void) struct backlight_properties props; if (sony_find_snc_handle(0x12f) != -1) { + backlight_ng_handle = 0x12f; ops = &sony_backlight_ng_ops; - sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props); - max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; + max_brightness = 0xff; } else if (sony_find_snc_handle(0x137) != -1) { + backlight_ng_handle = 0x137; ops = &sony_backlight_ng_ops; - sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props); - max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; + max_brightness = 0xff; } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", &unused))) { @@ -1657,22 +1577,22 @@ static void sony_nc_backlight_setup(void) memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = max_brightness; - sony_bl_props.dev = backlight_device_register("sony", NULL, - &sony_bl_props, - ops, &props); + sony_backlight_device = backlight_device_register("sony", NULL, + &backlight_ng_handle, + ops, &props); - if (IS_ERR(sony_bl_props.dev)) { - pr_warn(DRV_PFX "unable to register backlight device\n"); - sony_bl_props.dev = NULL; + if (IS_ERR(sony_backlight_device)) { + pr_warning(DRV_PFX "unable to register backlight device\n"); + sony_backlight_device = NULL; } else - sony_bl_props.dev->props.brightness = - ops->get_brightness(sony_bl_props.dev); + sony_backlight_device->props.brightness = + ops->get_brightness(sony_backlight_device); } static void sony_nc_backlight_cleanup(void) { - if (sony_bl_props.dev) - backlight_device_unregister(sony_bl_props.dev); + if (sony_backlight_device) + backlight_device_unregister(sony_backlight_device); } static int sony_nc_add(struct acpi_device *device) @@ -2670,7 +2590,7 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, mutex_lock(&spic_dev.lock); switch (cmd) { case SONYPI_IOCGBRT: - if (sony_bl_props.dev == NULL) { + if (sony_backlight_device == NULL) { ret = -EIO; break; } @@ -2683,7 +2603,7 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, ret = -EFAULT; break; case SONYPI_IOCSBRT: - if (sony_bl_props.dev == NULL) { + if (sony_backlight_device == NULL) { ret = -EIO; break; } @@ -2697,8 +2617,8 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, break; } /* sync the backlight device status */ - sony_bl_props.dev->props.brightness = - sony_backlight_get_brightness(sony_bl_props.dev); + sony_backlight_device->props.brightness = + sony_backlight_get_brightness(sony_backlight_device); break; case SONYPI_IOCGBAT1CAP: if (ec_read16(SONYPI_BAT1_FULL, &val16)) { diff --git a/trunk/drivers/platform/x86/thinkpad_acpi.c b/trunk/drivers/platform/x86/thinkpad_acpi.c index 562fcf0dd2b5..efb3b6b9bcdb 100644 --- a/trunk/drivers/platform/x86/thinkpad_acpi.c +++ b/trunk/drivers/platform/x86/thinkpad_acpi.c @@ -128,8 +128,7 @@ enum { }; /* ACPI HIDs */ -#define TPACPI_ACPI_IBM_HKEY_HID "IBM0068" -#define TPACPI_ACPI_LENOVO_HKEY_HID "LEN0068" +#define TPACPI_ACPI_HKEY_HID "IBM0068" #define TPACPI_ACPI_EC_HID "PNP0C09" /* Input IDs */ @@ -3880,8 +3879,7 @@ static int hotkey_write(char *buf) } static const struct acpi_device_id ibm_htk_device_ids[] = { - {TPACPI_ACPI_IBM_HKEY_HID, 0}, - {TPACPI_ACPI_LENOVO_HKEY_HID, 0}, + {TPACPI_ACPI_HKEY_HID, 0}, {"", 0}, }; diff --git a/trunk/drivers/rapidio/switches/idt_gen2.c b/trunk/drivers/rapidio/switches/idt_gen2.c index 043ee3136e40..ac2701b22e71 100644 --- a/trunk/drivers/rapidio/switches/idt_gen2.c +++ b/trunk/drivers/rapidio/switches/idt_gen2.c @@ -95,9 +95,6 @@ idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, else table++; - if (route_port == RIO_INVALID_ROUTE) - route_port = IDT_DEFAULT_ROUTE; - rio_mport_write_config_32(mport, destid, hopcount, LOCAL_RTE_CONF_DESTID_SEL, table); @@ -414,12 +411,6 @@ static int idtg2_switch_init(struct rio_dev *rdev, int do_enum) rdev->rswitch->em_handle = idtg2_em_handler; rdev->rswitch->sw_sysfs = idtg2_sysfs; - if (do_enum) { - /* Ensure that default routing is disabled on startup */ - rio_write_config_32(rdev, - RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); - } - return 0; } diff --git a/trunk/drivers/rapidio/switches/idtcps.c b/trunk/drivers/rapidio/switches/idtcps.c index d06ee2d44b44..3a971077e7bf 100644 --- a/trunk/drivers/rapidio/switches/idtcps.c +++ b/trunk/drivers/rapidio/switches/idtcps.c @@ -26,9 +26,6 @@ idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, { u32 result; - if (route_port == RIO_INVALID_ROUTE) - route_port = CPS_DEFAULT_ROUTE; - if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); @@ -133,9 +130,6 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum) /* set TVAL = ~50us */ rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); - /* Ensure that default routing is disabled on startup */ - rio_write_config_32(rdev, - RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE); } return 0; diff --git a/trunk/drivers/rapidio/switches/tsi57x.c b/trunk/drivers/rapidio/switches/tsi57x.c index db8b8028988d..1a62934bfebc 100644 --- a/trunk/drivers/rapidio/switches/tsi57x.c +++ b/trunk/drivers/rapidio/switches/tsi57x.c @@ -303,12 +303,6 @@ static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum) rdev->rswitch->em_init = tsi57x_em_init; rdev->rswitch->em_handle = tsi57x_em_handler; - if (do_enum) { - /* Ensure that default routing is disabled on startup */ - rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, - RIO_INVALID_ROUTE); - } - return 0; } diff --git a/trunk/drivers/rtc/class.c b/trunk/drivers/rtc/class.c index 39013867cbd6..4194e59e14cd 100644 --- a/trunk/drivers/rtc/class.c +++ b/trunk/drivers/rtc/class.c @@ -41,26 +41,21 @@ static void rtc_device_release(struct device *dev) * system's wall clock; restore it on resume(). */ -static struct timespec delta; static time_t oldtime; +static struct timespec oldts; static int rtc_suspend(struct device *dev, pm_message_t mesg) { struct rtc_device *rtc = to_rtc_device(dev); struct rtc_time tm; - struct timespec ts = current_kernel_time(); if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) return 0; rtc_read_time(rtc, &tm); + ktime_get_ts(&oldts); rtc_tm_to_time(&tm, &oldtime); - /* RTC precision is 1 second; adjust delta for avg 1/2 sec err */ - set_normalized_timespec(&delta, - ts.tv_sec - oldtime, - ts.tv_nsec - (NSEC_PER_SEC >> 1)); - return 0; } @@ -70,10 +65,12 @@ static int rtc_resume(struct device *dev) struct rtc_time tm; time_t newtime; struct timespec time; + struct timespec newts; if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) return 0; + ktime_get_ts(&newts); rtc_read_time(rtc, &tm); if (rtc_valid_tm(&tm) != 0) { pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev)); @@ -85,15 +82,13 @@ static int rtc_resume(struct device *dev) pr_debug("%s: time travel!\n", dev_name(&rtc->dev)); return 0; } + /* calculate the RTC time delta */ + set_normalized_timespec(&time, newtime - oldtime, 0); - /* restore wall clock using delta against this RTC; - * adjust again for avg 1/2 second RTC sampling error - */ - set_normalized_timespec(&time, - newtime + delta.tv_sec, - (NSEC_PER_SEC >> 1) + delta.tv_nsec); - do_settimeofday(&time); + /* subtract kernel time between rtc_suspend to rtc_resume */ + time = timespec_sub(time, timespec_sub(newts, oldts)); + timekeeping_inject_sleeptime(&time); return 0; } diff --git a/trunk/drivers/rtc/rtc-davinci.c b/trunk/drivers/rtc/rtc-davinci.c index 755e1fe914af..8d46838dff8a 100644 --- a/trunk/drivers/rtc/rtc-davinci.c +++ b/trunk/drivers/rtc/rtc-davinci.c @@ -524,8 +524,6 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) goto fail2; } - platform_set_drvdata(pdev, davinci_rtc); - davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &davinci_rtc_ops, THIS_MODULE); if (IS_ERR(davinci_rtc->rtc)) { @@ -555,6 +553,8 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL); + platform_set_drvdata(pdev, davinci_rtc); + device_init_wakeup(&pdev->dev, 0); return 0; @@ -562,7 +562,6 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) fail4: rtc_device_unregister(davinci_rtc->rtc); fail3: - platform_set_drvdata(pdev, NULL); iounmap(davinci_rtc->base); fail2: release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size); diff --git a/trunk/drivers/rtc/rtc-ds1286.c b/trunk/drivers/rtc/rtc-ds1286.c index 47e681df31e2..60ce69600828 100644 --- a/trunk/drivers/rtc/rtc-ds1286.c +++ b/trunk/drivers/rtc/rtc-ds1286.c @@ -355,7 +355,6 @@ static int __devinit ds1286_probe(struct platform_device *pdev) goto out; } spin_lock_init(&priv->lock); - platform_set_drvdata(pdev, priv); rtc = rtc_device_register("ds1286", &pdev->dev, &ds1286_ops, THIS_MODULE); if (IS_ERR(rtc)) { @@ -363,6 +362,7 @@ static int __devinit ds1286_probe(struct platform_device *pdev) goto out; } priv->rtc = rtc; + platform_set_drvdata(pdev, priv); return 0; out: diff --git a/trunk/drivers/rtc/rtc-ep93xx.c b/trunk/drivers/rtc/rtc-ep93xx.c index 335551d333b2..11ae64dcbf3c 100644 --- a/trunk/drivers/rtc/rtc-ep93xx.c +++ b/trunk/drivers/rtc/rtc-ep93xx.c @@ -151,7 +151,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) return -ENXIO; pdev->dev.platform_data = ep93xx_rtc; - platform_set_drvdata(pdev, rtc); rtc = rtc_device_register(pdev->name, &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); @@ -160,6 +159,8 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) goto exit; } + platform_set_drvdata(pdev, rtc); + err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); if (err) goto fail; @@ -167,9 +168,9 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) return 0; fail: + platform_set_drvdata(pdev, NULL); rtc_device_unregister(rtc); exit: - platform_set_drvdata(pdev, NULL); pdev->dev.platform_data = NULL; return err; } diff --git a/trunk/drivers/rtc/rtc-m41t80.c b/trunk/drivers/rtc/rtc-m41t80.c index eda128fc1d38..69fe664a2228 100644 --- a/trunk/drivers/rtc/rtc-m41t80.c +++ b/trunk/drivers/rtc/rtc-m41t80.c @@ -783,9 +783,6 @@ static int m41t80_probe(struct i2c_client *client, goto exit; } - clientdata->features = id->driver_data; - i2c_set_clientdata(client, clientdata); - rtc = rtc_device_register(client->name, &client->dev, &m41t80_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { @@ -795,6 +792,8 @@ static int m41t80_probe(struct i2c_client *client, } clientdata->rtc = rtc; + clientdata->features = id->driver_data; + i2c_set_clientdata(client, clientdata); /* Make sure HT (Halt Update) bit is cleared */ rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR); diff --git a/trunk/drivers/rtc/rtc-max8925.c b/trunk/drivers/rtc/rtc-max8925.c index 3bc046f427e0..174036dda786 100644 --- a/trunk/drivers/rtc/rtc-max8925.c +++ b/trunk/drivers/rtc/rtc-max8925.c @@ -257,10 +257,6 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev) goto out_irq; } - dev_set_drvdata(&pdev->dev, info); - /* XXX - isn't this redundant? */ - platform_set_drvdata(pdev, info); - info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev, &max8925_rtc_ops, THIS_MODULE); ret = PTR_ERR(info->rtc_dev); @@ -269,9 +265,11 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev) goto out_rtc; } + dev_set_drvdata(&pdev->dev, info); + platform_set_drvdata(pdev, info); + return 0; out_rtc: - platform_set_drvdata(pdev, NULL); free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); out_irq: kfree(info); diff --git a/trunk/drivers/rtc/rtc-max8998.c b/trunk/drivers/rtc/rtc-max8998.c index 2e48aa604273..3f7bc6b9fefa 100644 --- a/trunk/drivers/rtc/rtc-max8998.c +++ b/trunk/drivers/rtc/rtc-max8998.c @@ -265,8 +265,6 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) info->rtc = max8998->rtc; info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0; - platform_set_drvdata(pdev, info); - info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev, &max8998_rtc_ops, THIS_MODULE); @@ -276,6 +274,8 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) goto out_rtc; } + platform_set_drvdata(pdev, info); + ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0, "rtc-alarm0", info); @@ -293,7 +293,6 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) return 0; out_rtc: - platform_set_drvdata(pdev, NULL); kfree(info); return ret; } diff --git a/trunk/drivers/rtc/rtc-mc13xxx.c b/trunk/drivers/rtc/rtc-mc13xxx.c index a1a278bc340d..c5ac03793e79 100644 --- a/trunk/drivers/rtc/rtc-mc13xxx.c +++ b/trunk/drivers/rtc/rtc-mc13xxx.c @@ -349,15 +349,11 @@ static int __devinit mc13xxx_rtc_probe(struct platform_device *pdev) if (ret) goto err_alarm_irq_request; - mc13xxx_unlock(mc13xxx); - priv->rtc = rtc_device_register(pdev->name, &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE); if (IS_ERR(priv->rtc)) { ret = PTR_ERR(priv->rtc); - mc13xxx_lock(mc13xxx); - mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv); err_alarm_irq_request: @@ -369,12 +365,12 @@ static int __devinit mc13xxx_rtc_probe(struct platform_device *pdev) mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv); err_reset_irq_request: - mc13xxx_unlock(mc13xxx); - platform_set_drvdata(pdev, NULL); kfree(priv); } + mc13xxx_unlock(mc13xxx); + return ret; } diff --git a/trunk/drivers/rtc/rtc-msm6242.c b/trunk/drivers/rtc/rtc-msm6242.c index fcb113c11122..67820626e18f 100644 --- a/trunk/drivers/rtc/rtc-msm6242.c +++ b/trunk/drivers/rtc/rtc-msm6242.c @@ -214,7 +214,6 @@ static int __init msm6242_rtc_probe(struct platform_device *dev) error = -ENOMEM; goto out_free_priv; } - platform_set_drvdata(dev, priv); rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops, THIS_MODULE); @@ -224,10 +223,10 @@ static int __init msm6242_rtc_probe(struct platform_device *dev) } priv->rtc = rtc; + platform_set_drvdata(dev, priv); return 0; out_unmap: - platform_set_drvdata(dev, NULL); iounmap(priv->regs); out_free_priv: kfree(priv); diff --git a/trunk/drivers/rtc/rtc-mxc.c b/trunk/drivers/rtc/rtc-mxc.c index d814417bee8c..826ab64a8fa9 100644 --- a/trunk/drivers/rtc/rtc-mxc.c +++ b/trunk/drivers/rtc/rtc-mxc.c @@ -418,6 +418,14 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) goto exit_put_clk; } + rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, + THIS_MODULE); + if (IS_ERR(rtc)) { + ret = PTR_ERR(rtc); + goto exit_put_clk; + } + + pdata->rtc = rtc; platform_set_drvdata(pdev, pdata); /* Configure and enable the RTC */ @@ -430,19 +438,8 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) pdata->irq = -1; } - rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, - THIS_MODULE); - if (IS_ERR(rtc)) { - ret = PTR_ERR(rtc); - goto exit_clr_drvdata; - } - - pdata->rtc = rtc; - return 0; -exit_clr_drvdata: - platform_set_drvdata(pdev, NULL); exit_put_clk: clk_disable(pdata->clk); clk_put(pdata->clk); diff --git a/trunk/drivers/rtc/rtc-pcap.c b/trunk/drivers/rtc/rtc-pcap.c index cd4f198cc2ef..a633abc42896 100644 --- a/trunk/drivers/rtc/rtc-pcap.c +++ b/trunk/drivers/rtc/rtc-pcap.c @@ -151,8 +151,6 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev) pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent); - platform_set_drvdata(pdev, pcap_rtc); - pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev, &pcap_rtc_ops, THIS_MODULE); if (IS_ERR(pcap_rtc->rtc)) { @@ -160,6 +158,7 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev) goto fail_rtc; } + platform_set_drvdata(pdev, pcap_rtc); timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ); alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA); @@ -178,7 +177,6 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev) fail_timer: rtc_device_unregister(pcap_rtc->rtc); fail_rtc: - platform_set_drvdata(pdev, NULL); kfree(pcap_rtc); return err; } diff --git a/trunk/drivers/rtc/rtc-rp5c01.c b/trunk/drivers/rtc/rtc-rp5c01.c index 359da6d020b9..694da39b6dd2 100644 --- a/trunk/drivers/rtc/rtc-rp5c01.c +++ b/trunk/drivers/rtc/rtc-rp5c01.c @@ -249,15 +249,15 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) spin_lock_init(&priv->lock); - platform_set_drvdata(dev, priv); - rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { error = PTR_ERR(rtc); goto out_unmap; } + priv->rtc = rtc; + platform_set_drvdata(dev, priv); error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); if (error) @@ -268,7 +268,6 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) out_unregister: rtc_device_unregister(rtc); out_unmap: - platform_set_drvdata(dev, NULL); iounmap(priv->regs); out_free_priv: kfree(priv); diff --git a/trunk/drivers/rtc/rtc-s3c.c b/trunk/drivers/rtc/rtc-s3c.c index 16512ecae31a..b3466c491cd3 100644 --- a/trunk/drivers/rtc/rtc-s3c.c +++ b/trunk/drivers/rtc/rtc-s3c.c @@ -46,7 +46,6 @@ static struct clk *rtc_clk; static void __iomem *s3c_rtc_base; static int s3c_rtc_alarmno = NO_IRQ; static int s3c_rtc_tickno = NO_IRQ; -static bool wake_en; static enum s3c_cpu_type s3c_rtc_cpu_type; static DEFINE_SPINLOCK(s3c_rtc_pie_lock); @@ -563,12 +562,8 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) } s3c_rtc_enable(pdev, 0); - if (device_may_wakeup(&pdev->dev) && !wake_en) { - if (enable_irq_wake(s3c_rtc_alarmno) == 0) - wake_en = true; - else - dev_err(&pdev->dev, "enable_irq_wake failed\n"); - } + if (device_may_wakeup(&pdev->dev)) + enable_irq_wake(s3c_rtc_alarmno); return 0; } @@ -584,10 +579,8 @@ static int s3c_rtc_resume(struct platform_device *pdev) writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); } - if (device_may_wakeup(&pdev->dev) && wake_en) { + if (device_may_wakeup(&pdev->dev)) disable_irq_wake(s3c_rtc_alarmno); - wake_en = false; - } return 0; } diff --git a/trunk/drivers/s390/block/dasd.c b/trunk/drivers/s390/block/dasd.c index 86b6f1cc1b10..475e603fc584 100644 --- a/trunk/drivers/s390/block/dasd.c +++ b/trunk/drivers/s390/block/dasd.c @@ -1742,20 +1742,11 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) static inline int _dasd_term_running_cqr(struct dasd_device *device) { struct dasd_ccw_req *cqr; - int rc; if (list_empty(&device->ccw_queue)) return 0; cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); - rc = device->discipline->term_IO(cqr); - if (!rc) - /* - * CQR terminated because a more important request is pending. - * Undo decreasing of retry counter because this is - * not an error case. - */ - cqr->retries++; - return rc; + return device->discipline->term_IO(cqr); } int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) diff --git a/trunk/drivers/s390/block/dasd_diag.c b/trunk/drivers/s390/block/dasd_diag.c index 85dddb1e4126..29143eda9dd9 100644 --- a/trunk/drivers/s390/block/dasd_diag.c +++ b/trunk/drivers/s390/block/dasd_diag.c @@ -239,6 +239,7 @@ static void dasd_ext_handler(unsigned int ext_int_code, addr_t ip; int rc; + kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++; switch (ext_int_code >> 24) { case DASD_DIAG_CODE_31BIT: ip = (addr_t) param32; @@ -249,7 +250,6 @@ static void dasd_ext_handler(unsigned int ext_int_code, default: return; } - kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++; if (!ip) { /* no intparm: unsolicited interrupt */ DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited " "interrupt"); diff --git a/trunk/drivers/s390/char/sclp_cmd.c b/trunk/drivers/s390/char/sclp_cmd.c index be55fb2b1b1c..4b60ede07f0e 100644 --- a/trunk/drivers/s390/char/sclp_cmd.c +++ b/trunk/drivers/s390/char/sclp_cmd.c @@ -518,8 +518,6 @@ static void __init insert_increment(u16 rn, int standby, int assigned) return; new_incr->rn = rn; new_incr->standby = standby; - if (!standby) - new_incr->usecount = 1; last_rn = 0; prev = &sclp_mem_list; list_for_each_entry(incr, &sclp_mem_list, list) { diff --git a/trunk/drivers/s390/char/tape_block.c b/trunk/drivers/s390/char/tape_block.c index 1b3924c2fffd..83cea9a55e2f 100644 --- a/trunk/drivers/s390/char/tape_block.c +++ b/trunk/drivers/s390/char/tape_block.c @@ -236,6 +236,7 @@ tapeblock_setup_device(struct tape_device * device) disk->major = tapeblock_major; disk->first_minor = device->first_minor; disk->fops = &tapeblock_fops; + disk->events = DISK_EVENT_MEDIA_CHANGE; disk->private_data = tape_get_device(device); disk->queue = blkdat->request_queue; set_capacity(disk, 0); diff --git a/trunk/drivers/s390/kvm/kvm_virtio.c b/trunk/drivers/s390/kvm/kvm_virtio.c index 607998f0b7d8..414427d64a8f 100644 --- a/trunk/drivers/s390/kvm/kvm_virtio.c +++ b/trunk/drivers/s390/kvm/kvm_virtio.c @@ -381,10 +381,10 @@ static void kvm_extint_handler(unsigned int ext_int_code, u16 subcode; u32 param; + kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++; subcode = ext_int_code >> 16; if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) return; - kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++; /* The LSB might be overloaded, we have to mask it */ vq = (struct virtqueue *)(param64 & ~1UL); diff --git a/trunk/drivers/scsi/device_handler/scsi_dh.c b/trunk/drivers/scsi/device_handler/scsi_dh.c index 0119b8147797..564e6ecd17c2 100644 --- a/trunk/drivers/scsi/device_handler/scsi_dh.c +++ b/trunk/drivers/scsi/device_handler/scsi_dh.c @@ -394,14 +394,12 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) unsigned long flags; struct scsi_device *sdev; struct scsi_device_handler *scsi_dh = NULL; - struct device *dev = NULL; spin_lock_irqsave(q->queue_lock, flags); sdev = q->queuedata; if (sdev && sdev->scsi_dh_data) scsi_dh = sdev->scsi_dh_data->scsi_dh; - dev = get_device(&sdev->sdev_gendev); - if (!scsi_dh || !dev || + if (!scsi_dh || !get_device(&sdev->sdev_gendev) || sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL) err = SCSI_DH_NOSYS; @@ -412,13 +410,12 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) if (err) { if (fn) fn(data, err); - goto out; + return err; } if (scsi_dh->activate) err = scsi_dh->activate(sdev, fn, data); -out: - put_device(dev); + put_device(&sdev->sdev_gendev); return err; } EXPORT_SYMBOL_GPL(scsi_dh_activate); diff --git a/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.c index d72f1f2b1392..1c6d2b405eef 100644 --- a/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -688,13 +688,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, goto out; } - /* Check for overflow and wraparound */ - if (karg.data_sge_offset * 4 > ioc->request_sz || - karg.data_sge_offset > (UINT_MAX / 4)) { - ret = -EINVAL; - goto out; - } - /* copy in request message frame from user */ if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, @@ -1970,7 +1963,7 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state) Mpi2DiagBufferPostReply_t *mpi_reply; int rc, i; u8 buffer_type; - unsigned long timeleft, request_size, copy_size; + unsigned long timeleft; u16 smid; u16 ioc_status; u8 issue_reset = 0; @@ -2006,8 +1999,6 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state) return -ENOMEM; } - request_size = ioc->diag_buffer_sz[buffer_type]; - if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { printk(MPT2SAS_ERR_FMT "%s: either the starting_offset " "or bytes_to_read are not 4 byte aligned\n", ioc->name, @@ -2015,23 +2006,13 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state) return -EINVAL; } - if (karg.starting_offset > request_size) - return -EINVAL; - diag_data = (void *)(request_data + karg.starting_offset); dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: diag_buffer(%p), " "offset(%d), sz(%d)\n", ioc->name, __func__, diag_data, karg.starting_offset, karg.bytes_to_read)); - /* Truncate data on requests that are too large */ - if ((diag_data + karg.bytes_to_read < diag_data) || - (diag_data + karg.bytes_to_read > request_data + request_size)) - copy_size = request_size - karg.starting_offset; - else - copy_size = karg.bytes_to_read; - if (copy_to_user((void __user *)uarg->diagnostic_data, - diag_data, copy_size)) { + diag_data, karg.bytes_to_read)) { printk(MPT2SAS_ERR_FMT "%s: Unable to write " "mpt_diag_read_buffer_t data @ %p\n", ioc->name, __func__, diag_data); diff --git a/trunk/drivers/scsi/pmcraid.c b/trunk/drivers/scsi/pmcraid.c index 7f636b118287..96d5ad0c1e42 100644 --- a/trunk/drivers/scsi/pmcraid.c +++ b/trunk/drivers/scsi/pmcraid.c @@ -3814,9 +3814,6 @@ static long pmcraid_ioctl_passthrough( rc = -EFAULT; goto out_free_buffer; } - } else if (request_size < 0) { - rc = -EINVAL; - goto out_free_buffer; } /* check if we have any additional command parameters */ diff --git a/trunk/drivers/scsi/qlogicpti.c b/trunk/drivers/scsi/qlogicpti.c index 9689d41c7888..e2d45c91b8e8 100644 --- a/trunk/drivers/scsi/qlogicpti.c +++ b/trunk/drivers/scsi/qlogicpti.c @@ -1292,10 +1292,8 @@ static struct scsi_host_template qpti_template = { .use_clustering = ENABLE_CLUSTERING, }; -static const struct of_device_id qpti_match[]; static int __devinit qpti_sbus_probe(struct platform_device *op) { - const struct of_device_id *match; struct scsi_host_template *tpnt; struct device_node *dp = op->dev.of_node; struct Scsi_Host *host; @@ -1303,10 +1301,9 @@ static int __devinit qpti_sbus_probe(struct platform_device *op) static int nqptis; const char *fcode; - match = of_match_device(qpti_match, &op->dev); - if (!match) + if (!op->dev.of_match) return -EINVAL; - tpnt = match->data; + tpnt = op->dev.of_match->data; /* Sometimes Antares cards come up not completely * setup, and we get a report of a zero IRQ. diff --git a/trunk/drivers/scsi/scsi_lib.c b/trunk/drivers/scsi/scsi_lib.c index ec1803a48723..e9901b8f8443 100644 --- a/trunk/drivers/scsi/scsi_lib.c +++ b/trunk/drivers/scsi/scsi_lib.c @@ -74,6 +74,8 @@ struct kmem_cache *scsi_sdb_cache; */ #define SCSI_QUEUE_DELAY 3 +static void scsi_run_queue(struct request_queue *q); + /* * Function: scsi_unprep_request() * @@ -159,7 +161,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) blk_requeue_request(q, cmd->request); spin_unlock_irqrestore(q->queue_lock, flags); - kblockd_schedule_work(q, &device->requeue_work); + scsi_run_queue(q); return 0; } @@ -398,15 +400,10 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost) static void scsi_run_queue(struct request_queue *q) { struct scsi_device *sdev = q->queuedata; - struct Scsi_Host *shost; + struct Scsi_Host *shost = sdev->host; LIST_HEAD(starved_list); unsigned long flags; - /* if the device is dead, sdev will be NULL, so no queue to run */ - if (!sdev) - return; - - shost = sdev->host; if (scsi_target(sdev)->single_lun) scsi_single_lun_run(sdev); @@ -436,11 +433,7 @@ static void scsi_run_queue(struct request_queue *q) continue; } - spin_unlock(shost->host_lock); - spin_lock(sdev->request_queue->queue_lock); - __blk_run_queue(sdev->request_queue); - spin_unlock(sdev->request_queue->queue_lock); - spin_lock(shost->host_lock); + blk_run_queue_async(sdev->request_queue); } /* put any unprocessed entries back */ list_splice(&starved_list, &shost->starved_list); @@ -449,16 +442,6 @@ static void scsi_run_queue(struct request_queue *q) blk_run_queue(q); } -void scsi_requeue_run_queue(struct work_struct *work) -{ - struct scsi_device *sdev; - struct request_queue *q; - - sdev = container_of(work, struct scsi_device, requeue_work); - q = sdev->request_queue; - scsi_run_queue(q); -} - /* * Function: scsi_requeue_command() * diff --git a/trunk/drivers/scsi/scsi_scan.c b/trunk/drivers/scsi/scsi_scan.c index 58584dc0724a..087821fac8fe 100644 --- a/trunk/drivers/scsi/scsi_scan.c +++ b/trunk/drivers/scsi/scsi_scan.c @@ -242,7 +242,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, int display_failure_msg = 1, ret; struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); extern void scsi_evt_thread(struct work_struct *work); - extern void scsi_requeue_run_queue(struct work_struct *work); sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, GFP_ATOMIC); @@ -265,7 +264,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, INIT_LIST_HEAD(&sdev->event_list); spin_lock_init(&sdev->list_lock); INIT_WORK(&sdev->event_work, scsi_evt_thread); - INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue); sdev->sdev_gendev.parent = get_device(&starget->dev); sdev->sdev_target = starget; diff --git a/trunk/drivers/scsi/scsi_sysfs.c b/trunk/drivers/scsi/scsi_sysfs.c index e63912510fb9..e44ff64233fd 100644 --- a/trunk/drivers/scsi/scsi_sysfs.c +++ b/trunk/drivers/scsi/scsi_sysfs.c @@ -322,8 +322,14 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) kfree(evt); } - /* NULL queue means the device can't be used */ - sdev->request_queue = NULL; + if (sdev->request_queue) { + sdev->request_queue->queuedata = NULL; + /* user context needed to free queue */ + scsi_free_queue(sdev->request_queue); + /* temporary expedient, try to catch use of queue lock + * after free of sdev */ + sdev->request_queue = NULL; + } scsi_target_reap(scsi_target(sdev)); @@ -931,12 +937,6 @@ void __scsi_remove_device(struct scsi_device *sdev) if (sdev->host->hostt->slave_destroy) sdev->host->hostt->slave_destroy(sdev); transport_destroy_device(dev); - - /* cause the request function to reject all I/O requests */ - sdev->request_queue->queuedata = NULL; - - /* Freeing the queue signals to block that we're done */ - scsi_free_queue(sdev->request_queue); put_device(dev); } diff --git a/trunk/drivers/ssb/pci.c b/trunk/drivers/ssb/pci.c index 7ad48585c5e6..6f34963b3c64 100644 --- a/trunk/drivers/ssb/pci.c +++ b/trunk/drivers/ssb/pci.c @@ -662,6 +662,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out, static int ssb_pci_sprom_get(struct ssb_bus *bus, struct ssb_sprom *sprom) { + const struct ssb_sprom *fallback; int err; u16 *buf; @@ -706,17 +707,10 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, if (err) { /* All CRC attempts failed. * Maybe there is no SPROM on the device? - * Now we ask the arch code if there is some sprom - * available for this device in some other storage */ - err = ssb_fill_sprom_with_fallback(bus, sprom); - if (err) { - ssb_printk(KERN_WARNING PFX "WARNING: Using" - " fallback SPROM failed (err %d)\n", - err); - } else { - ssb_dprintk(KERN_DEBUG PFX "Using SPROM" - " revision %d provided by" - " platform.\n", sprom->revision); + * If we have a fallback, use that. */ + fallback = ssb_get_fallback_sprom(); + if (fallback) { + memcpy(sprom, fallback, sizeof(*sprom)); err = 0; goto out_free; } diff --git a/trunk/drivers/ssb/sprom.c b/trunk/drivers/ssb/sprom.c index 45ff0e3a3828..5f34d7a3e3a5 100644 --- a/trunk/drivers/ssb/sprom.c +++ b/trunk/drivers/ssb/sprom.c @@ -17,7 +17,7 @@ #include -static int(*get_fallback_sprom)(struct ssb_bus *dev, struct ssb_sprom *out); +static const struct ssb_sprom *fallback_sprom; static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len, @@ -145,43 +145,36 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus, } /** - * ssb_arch_register_fallback_sprom - Registers a method providing a - * fallback SPROM if no SPROM is found. + * ssb_arch_set_fallback_sprom - Set a fallback SPROM for use if no SPROM is found. * - * @sprom_callback: The callback function. + * @sprom: The SPROM data structure to register. * - * With this function the architecture implementation may register a - * callback handler which fills the SPROM data structure. The fallback is - * only used for PCI based SSB devices, where no valid SPROM can be found - * in the shadow registers. + * With this function the architecture implementation may register a fallback + * SPROM data structure. The fallback is only used for PCI based SSB devices, + * where no valid SPROM can be found in the shadow registers. * - * This function is useful for weird architectures that have a half-assed - * SSB device hardwired to their PCI bus. + * This function is useful for weird architectures that have a half-assed SSB device + * hardwired to their PCI bus. * - * Note that it does only work with PCI attached SSB devices. PCMCIA - * devices currently don't use this fallback. - * Architectures must provide the SPROM for native SSB devices anyway, so - * the fallback also isn't used for native devices. + * Note that it does only work with PCI attached SSB devices. PCMCIA devices currently + * don't use this fallback. + * Architectures must provide the SPROM for native SSB devices anyway, + * so the fallback also isn't used for native devices. * - * This function is available for architecture code, only. So it is not - * exported. + * This function is available for architecture code, only. So it is not exported. */ -int ssb_arch_register_fallback_sprom(int (*sprom_callback)(struct ssb_bus *bus, - struct ssb_sprom *out)) +int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom) { - if (get_fallback_sprom) + if (fallback_sprom) return -EEXIST; - get_fallback_sprom = sprom_callback; + fallback_sprom = sprom; return 0; } -int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, struct ssb_sprom *out) +const struct ssb_sprom *ssb_get_fallback_sprom(void) { - if (!get_fallback_sprom) - return -ENOENT; - - return get_fallback_sprom(bus, out); + return fallback_sprom; } /* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */ diff --git a/trunk/drivers/ssb/ssb_private.h b/trunk/drivers/ssb/ssb_private.h index 77653014db0b..0331139a726f 100644 --- a/trunk/drivers/ssb/ssb_private.h +++ b/trunk/drivers/ssb/ssb_private.h @@ -171,8 +171,7 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus, const char *buf, size_t count, int (*sprom_check_crc)(const u16 *sprom, size_t size), int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom)); -extern int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, - struct ssb_sprom *out); +extern const struct ssb_sprom *ssb_get_fallback_sprom(void); /* core.c */ diff --git a/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c index 830822f86e41..eeb7dd43f9a8 100644 --- a/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c +++ b/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c @@ -2288,3 +2288,7 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link, free_netdev(dev); return NULL; } + +EXPORT_SYMBOL(init_ft1000_card); +EXPORT_SYMBOL(stop_ft1000_card); +EXPORT_SYMBOL(flarion_ft1000_cnt); diff --git a/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c index bdfb1aec58df..935608e72007 100644 --- a/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c +++ b/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c @@ -214,3 +214,6 @@ void ft1000CleanupProc(struct net_device *dev) remove_proc_entry(FT1000_PROC, init_net.proc_net); unregister_netdevice_notifier(&ft1000_netdev_notifier); } + +EXPORT_SYMBOL(ft1000InitProc); +EXPORT_SYMBOL(ft1000CleanupProc); diff --git a/trunk/drivers/staging/gma500/Kconfig b/trunk/drivers/staging/gma500/Kconfig index ce8bedaeaac2..5501eb9b3355 100644 --- a/trunk/drivers/staging/gma500/Kconfig +++ b/trunk/drivers/staging/gma500/Kconfig @@ -1,6 +1,6 @@ config DRM_PSB tristate "Intel GMA500 KMS Framebuffer" - depends on DRM && PCI && X86 + depends on DRM && PCI select FB_CFB_COPYAREA select FB_CFB_FILLRECT select FB_CFB_IMAGEBLIT diff --git a/trunk/drivers/staging/intel_sst/intelmid_v1_control.c b/trunk/drivers/staging/intel_sst/intelmid_v1_control.c index 1ea814218059..9cc15c1c18d4 100644 --- a/trunk/drivers/staging/intel_sst/intelmid_v1_control.c +++ b/trunk/drivers/staging/intel_sst/intelmid_v1_control.c @@ -28,7 +28,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include #include #include #include diff --git a/trunk/drivers/staging/intel_sst/intelmid_v2_control.c b/trunk/drivers/staging/intel_sst/intelmid_v2_control.c index 3c6b3abff3c3..26d815a67eb8 100644 --- a/trunk/drivers/staging/intel_sst/intelmid_v2_control.c +++ b/trunk/drivers/staging/intel_sst/intelmid_v2_control.c @@ -29,7 +29,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include #include #include "intel_sst.h" #include "intelmid_snd_control.h" diff --git a/trunk/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/trunk/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c index 22c04eabed41..b5d21f6497f9 100644 --- a/trunk/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c +++ b/trunk/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c @@ -12,7 +12,6 @@ */ #include #include -#include #include #include "olpc_dcon.h" diff --git a/trunk/drivers/staging/rt2860/common/cmm_data_pci.c b/trunk/drivers/staging/rt2860/common/cmm_data_pci.c index f01a51c381f1..bef0bbd8cef7 100644 --- a/trunk/drivers/staging/rt2860/common/cmm_data_pci.c +++ b/trunk/drivers/staging/rt2860/common/cmm_data_pci.c @@ -444,7 +444,7 @@ int RTMPCheckRxError(struct rt_rtmp_adapter *pAd, return (NDIS_STATUS_FAILURE); } } - /* Drop not U2M frames, can't drop here because we will drop beacon in this case */ + /* Drop not U2M frames, can't's drop here because we will drop beacon in this case */ /* I am kind of doubting the U2M bit operation */ /* if (pRxD->U2M == 0) */ /* return(NDIS_STATUS_FAILURE); */ diff --git a/trunk/drivers/staging/rt2860/common/cmm_data_usb.c b/trunk/drivers/staging/rt2860/common/cmm_data_usb.c index 83a62faa7e57..5637857ae9eb 100644 --- a/trunk/drivers/staging/rt2860/common/cmm_data_usb.c +++ b/trunk/drivers/staging/rt2860/common/cmm_data_usb.c @@ -860,7 +860,7 @@ int RTMPCheckRxError(struct rt_rtmp_adapter *pAd, DBGPRINT_RAW(RT_DEBUG_ERROR, ("received packet too long\n")); return NDIS_STATUS_FAILURE; } - /* Drop not U2M frames, can't drop here because we will drop beacon in this case */ + /* Drop not U2M frames, can't's drop here because we will drop beacon in this case */ /* I am kind of doubting the U2M bit operation */ /* if (pRxD->U2M == 0) */ /* return(NDIS_STATUS_FAILURE); */ diff --git a/trunk/drivers/staging/rts_pstor/debug.h b/trunk/drivers/staging/rts_pstor/debug.h index ab305be96fb5..e1408b0e7ae4 100644 --- a/trunk/drivers/staging/rts_pstor/debug.h +++ b/trunk/drivers/staging/rts_pstor/debug.h @@ -28,7 +28,7 @@ #define RTSX_STOR "rts_pstor: " -#ifdef CONFIG_RTS_PSTOR_DEBUG +#if CONFIG_RTS_PSTOR_DEBUG #define RTSX_DEBUGP(x...) printk(KERN_DEBUG RTSX_STOR x) #define RTSX_DEBUGPN(x...) printk(KERN_DEBUG x) #define RTSX_DEBUGPX(x...) printk(x) diff --git a/trunk/drivers/staging/rts_pstor/ms.c b/trunk/drivers/staging/rts_pstor/ms.c index d89795c6a3ac..810e170894f5 100644 --- a/trunk/drivers/staging/rts_pstor/ms.c +++ b/trunk/drivers/staging/rts_pstor/ms.c @@ -23,7 +23,6 @@ #include #include #include -#include #include "rtsx.h" #include "rtsx_transport.h" diff --git a/trunk/drivers/staging/rts_pstor/rtsx_chip.c b/trunk/drivers/staging/rts_pstor/rtsx_chip.c index 4e60780ea804..d2f1c715a684 100644 --- a/trunk/drivers/staging/rts_pstor/rtsx_chip.c +++ b/trunk/drivers/staging/rts_pstor/rtsx_chip.c @@ -24,7 +24,6 @@ #include #include #include -#include #include "rtsx.h" #include "rtsx_transport.h" @@ -1312,11 +1311,11 @@ void rtsx_polling_func(struct rtsx_chip *chip) #ifdef SUPPORT_OCP if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) { -#ifdef CONFIG_RTS_PSTOR_DEBUG + #if CONFIG_RTS_PSTOR_DEBUG if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER | MS_OC_NOW | MS_OC_EVER)) { RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n", chip->ocp_stat); } -#endif + #endif if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) { if (chip->card_exist & SD_CARD) { diff --git a/trunk/drivers/staging/rts_pstor/rtsx_scsi.c b/trunk/drivers/staging/rts_pstor/rtsx_scsi.c index 7de1fae443fc..20c2464a20f9 100644 --- a/trunk/drivers/staging/rts_pstor/rtsx_scsi.c +++ b/trunk/drivers/staging/rts_pstor/rtsx_scsi.c @@ -23,7 +23,6 @@ #include #include #include -#include #include "rtsx.h" #include "rtsx_transport.h" diff --git a/trunk/drivers/staging/rts_pstor/sd.c b/trunk/drivers/staging/rts_pstor/sd.c index b1277a6c7a8b..8d066bd428c4 100644 --- a/trunk/drivers/staging/rts_pstor/sd.c +++ b/trunk/drivers/staging/rts_pstor/sd.c @@ -909,7 +909,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir) RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET); RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, 0); } else { -#ifdef CONFIG_RTS_PSTOR_DEBUG +#if CONFIG_RTS_PSTOR_DEBUG rtsx_read_register(chip, SD_VP_CTL, &val); RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val); rtsx_read_register(chip, SD_DCMPS_CTL, &val); @@ -958,7 +958,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir) return STATUS_SUCCESS; Fail: -#ifdef CONFIG_RTS_PSTOR_DEBUG +#if CONFIG_RTS_PSTOR_DEBUG rtsx_read_register(chip, SD_VP_CTL, &val); RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val); rtsx_read_register(chip, SD_DCMPS_CTL, &val); diff --git a/trunk/drivers/staging/rts_pstor/trace.h b/trunk/drivers/staging/rts_pstor/trace.h index bc83b49a4eb4..2c668bae6ff4 100644 --- a/trunk/drivers/staging/rts_pstor/trace.h +++ b/trunk/drivers/staging/rts_pstor/trace.h @@ -82,7 +82,7 @@ do { \ #define TRACE_GOTO(chip, label) goto label #endif -#ifdef CONFIG_RTS_PSTOR_DEBUG +#if CONFIG_RTS_PSTOR_DEBUG static inline void rtsx_dump(u8 *buf, int buf_len) { int i; diff --git a/trunk/drivers/staging/rts_pstor/xd.c b/trunk/drivers/staging/rts_pstor/xd.c index 9f3add1e8f59..7bcd468b8f2c 100644 --- a/trunk/drivers/staging/rts_pstor/xd.c +++ b/trunk/drivers/staging/rts_pstor/xd.c @@ -23,7 +23,6 @@ #include #include #include -#include #include "rtsx.h" #include "rtsx_transport.h" diff --git a/trunk/drivers/staging/solo6x10/Kconfig b/trunk/drivers/staging/solo6x10/Kconfig index 03dcac4ea4d0..2cf77c940860 100644 --- a/trunk/drivers/staging/solo6x10/Kconfig +++ b/trunk/drivers/staging/solo6x10/Kconfig @@ -2,7 +2,6 @@ config SOLO6X10 tristate "Softlogic 6x10 MPEG codec cards" depends on PCI && VIDEO_DEV && SND && I2C select VIDEOBUF_DMA_SG - select SND_PCM ---help--- This driver supports the Softlogic based MPEG-4 and h.264 codec codec cards. diff --git a/trunk/drivers/staging/spectra/ffsport.c b/trunk/drivers/staging/spectra/ffsport.c index 506547b603e1..20dae73d3b78 100644 --- a/trunk/drivers/staging/spectra/ffsport.c +++ b/trunk/drivers/staging/spectra/ffsport.c @@ -653,7 +653,7 @@ static int SBD_setup_device(struct spectra_nand_dev *dev, int which) } dev->queue->queuedata = dev; - /* As Linux block layer doesn't support >4KB hardware sector, */ + /* As Linux block layer does't support >4KB hardware sector, */ /* Here we force report 512 byte hardware sector size to Kernel */ blk_queue_logical_block_size(dev->queue, 512); diff --git a/trunk/drivers/staging/tidspbridge/dynload/cload.c b/trunk/drivers/staging/tidspbridge/dynload/cload.c index fe1ef0addb09..5cecd237e3f6 100644 --- a/trunk/drivers/staging/tidspbridge/dynload/cload.c +++ b/trunk/drivers/staging/tidspbridge/dynload/cload.c @@ -718,7 +718,7 @@ static void dload_symbols(struct dload_state *dlthis) * as a temporary for .dllview record construction. * Allocate storage for the whole table. Add 1 to the section count * in case a trampoline section is auto-generated as well as the - * size of the trampoline section name so DLLView doesn't get lost. + * size of the trampoline section name so DLLView does't get lost. */ siz = sym_count * sizeof(struct local_symbol); diff --git a/trunk/drivers/staging/tty/specialix.c b/trunk/drivers/staging/tty/specialix.c index 5c3598ec7456..cb24c6d999db 100644 --- a/trunk/drivers/staging/tty/specialix.c +++ b/trunk/drivers/staging/tty/specialix.c @@ -978,7 +978,7 @@ static void sx_change_speed(struct specialix_board *bp, spin_lock_irqsave(&bp->lock, flags); sx_out(bp, CD186x_CAR, port_No(port)); - /* The Specialix board doesn't implement the RTS lines. + /* The Specialix board does't implement the RTS lines. They are used to set the IRQ level. Don't touch them. */ if (sx_crtscts(tty)) port->MSVR = MSVR_DTR | (sx_in(bp, CD186x_MSVR) & MSVR_RTS); diff --git a/trunk/drivers/staging/usbip/vhci_hcd.c b/trunk/drivers/staging/usbip/vhci_hcd.c index 4f4f13321f40..0f02a4b12ae4 100644 --- a/trunk/drivers/staging/usbip/vhci_hcd.c +++ b/trunk/drivers/staging/usbip/vhci_hcd.c @@ -876,10 +876,8 @@ static void vhci_shutdown_connection(struct usbip_device *ud) } /* kill threads related to this sdev, if v.c. exists */ - if (vdev->ud.tcp_rx) - kthread_stop(vdev->ud.tcp_rx); - if (vdev->ud.tcp_tx) - kthread_stop(vdev->ud.tcp_tx); + kthread_stop(vdev->ud.tcp_rx); + kthread_stop(vdev->ud.tcp_tx); usbip_uinfo("stop threads\n"); @@ -951,6 +949,9 @@ static void vhci_device_init(struct vhci_device *vdev) { memset(vdev, 0, sizeof(*vdev)); + vdev->ud.tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx"); + vdev->ud.tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx"); + vdev->ud.side = USBIP_VHCI; vdev->ud.status = VDEV_ST_NULL; /* vdev->ud.lock = SPIN_LOCK_UNLOCKED; */ @@ -1138,7 +1139,7 @@ static int vhci_hcd_probe(struct platform_device *pdev) usbip_uerr("create hcd failed\n"); return -ENOMEM; } - hcd->has_tt = 1; + /* this is private data for vhci_hcd */ the_controller = hcd_to_vhci(hcd); diff --git a/trunk/drivers/staging/usbip/vhci_sysfs.c b/trunk/drivers/staging/usbip/vhci_sysfs.c index e2dadbd5ef1e..3f2459f30415 100644 --- a/trunk/drivers/staging/usbip/vhci_sysfs.c +++ b/trunk/drivers/staging/usbip/vhci_sysfs.c @@ -21,7 +21,6 @@ #include "vhci.h" #include -#include /* TODO: refine locking ?*/ @@ -221,13 +220,13 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, vdev->ud.tcp_socket = socket; vdev->ud.status = VDEV_ST_NOTASSIGNED; + wake_up_process(vdev->ud.tcp_rx); + wake_up_process(vdev->ud.tcp_tx); + spin_unlock(&vdev->ud.lock); spin_unlock(&the_controller->lock); /* end the lock */ - vdev->ud.tcp_rx = kthread_run(vhci_rx_loop, &vdev->ud, "vhci_rx"); - vdev->ud.tcp_tx = kthread_run(vhci_tx_loop, &vdev->ud, "vhci_tx"); - rh_port_connect(rhport, speed); return count; diff --git a/trunk/drivers/staging/wlan-ng/cfg80211.c b/trunk/drivers/staging/wlan-ng/cfg80211.c index 76378397b763..6a71f52c59b1 100644 --- a/trunk/drivers/staging/wlan-ng/cfg80211.c +++ b/trunk/drivers/staging/wlan-ng/cfg80211.c @@ -273,7 +273,7 @@ int prism2_del_key(struct wiphy *wiphy, struct net_device *dev, } int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_index, bool unicast, bool multicast) + u8 key_index) { wlandevice_t *wlandev = dev->ml_priv; diff --git a/trunk/drivers/tty/serial/Kconfig b/trunk/drivers/tty/serial/Kconfig index b1f0f83b870d..80484af781e1 100644 --- a/trunk/drivers/tty/serial/Kconfig +++ b/trunk/drivers/tty/serial/Kconfig @@ -1391,14 +1391,6 @@ config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE help Support for Console on the NWP serial ports. -config SERIAL_LANTIQ - bool "Lantiq serial driver" - depends on LANTIQ - select SERIAL_CORE - select SERIAL_CORE_CONSOLE - help - Support for console and UART on Lantiq SoCs. - config SERIAL_QE tristate "Freescale QUICC Engine serial port support" depends on QUICC_ENGINE diff --git a/trunk/drivers/tty/serial/Makefile b/trunk/drivers/tty/serial/Makefile index 35276043d9d1..fee0690ef8e3 100644 --- a/trunk/drivers/tty/serial/Makefile +++ b/trunk/drivers/tty/serial/Makefile @@ -94,4 +94,3 @@ obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o -obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o diff --git a/trunk/drivers/tty/serial/lantiq.c b/trunk/drivers/tty/serial/lantiq.c deleted file mode 100644 index 58cf279ed879..000000000000 --- a/trunk/drivers/tty/serial/lantiq.c +++ /dev/null @@ -1,756 +0,0 @@ -/* - * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * Copyright (C) 2004 Infineon IFAP DC COM CPE - * Copyright (C) 2007 Felix Fietkau - * Copyright (C) 2007 John Crispin - * Copyright (C) 2010 Thomas Langer, - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define PORT_LTQ_ASC 111 -#define MAXPORTS 2 -#define UART_DUMMY_UER_RX 1 -#define DRVNAME "ltq_asc" -#ifdef __BIG_ENDIAN -#define LTQ_ASC_TBUF (0x0020 + 3) -#define LTQ_ASC_RBUF (0x0024 + 3) -#else -#define LTQ_ASC_TBUF 0x0020 -#define LTQ_ASC_RBUF 0x0024 -#endif -#define LTQ_ASC_FSTAT 0x0048 -#define LTQ_ASC_WHBSTATE 0x0018 -#define LTQ_ASC_STATE 0x0014 -#define LTQ_ASC_IRNCR 0x00F8 -#define LTQ_ASC_CLC 0x0000 -#define LTQ_ASC_ID 0x0008 -#define LTQ_ASC_PISEL 0x0004 -#define LTQ_ASC_TXFCON 0x0044 -#define LTQ_ASC_RXFCON 0x0040 -#define LTQ_ASC_CON 0x0010 -#define LTQ_ASC_BG 0x0050 -#define LTQ_ASC_IRNREN 0x00F4 - -#define ASC_IRNREN_TX 0x1 -#define ASC_IRNREN_RX 0x2 -#define ASC_IRNREN_ERR 0x4 -#define ASC_IRNREN_TX_BUF 0x8 -#define ASC_IRNCR_TIR 0x1 -#define ASC_IRNCR_RIR 0x2 -#define ASC_IRNCR_EIR 0x4 - -#define ASCOPT_CSIZE 0x3 -#define TXFIFO_FL 1 -#define RXFIFO_FL 1 -#define ASCCLC_DISS 0x2 -#define ASCCLC_RMCMASK 0x0000FF00 -#define ASCCLC_RMCOFFSET 8 -#define ASCCON_M_8ASYNC 0x0 -#define ASCCON_M_7ASYNC 0x2 -#define ASCCON_ODD 0x00000020 -#define ASCCON_STP 0x00000080 -#define ASCCON_BRS 0x00000100 -#define ASCCON_FDE 0x00000200 -#define ASCCON_R 0x00008000 -#define ASCCON_FEN 0x00020000 -#define ASCCON_ROEN 0x00080000 -#define ASCCON_TOEN 0x00100000 -#define ASCSTATE_PE 0x00010000 -#define ASCSTATE_FE 0x00020000 -#define ASCSTATE_ROE 0x00080000 -#define ASCSTATE_ANY (ASCSTATE_ROE|ASCSTATE_PE|ASCSTATE_FE) -#define ASCWHBSTATE_CLRREN 0x00000001 -#define ASCWHBSTATE_SETREN 0x00000002 -#define ASCWHBSTATE_CLRPE 0x00000004 -#define ASCWHBSTATE_CLRFE 0x00000008 -#define ASCWHBSTATE_CLRROE 0x00000020 -#define ASCTXFCON_TXFEN 0x0001 -#define ASCTXFCON_TXFFLU 0x0002 -#define ASCTXFCON_TXFITLMASK 0x3F00 -#define ASCTXFCON_TXFITLOFF 8 -#define ASCRXFCON_RXFEN 0x0001 -#define ASCRXFCON_RXFFLU 0x0002 -#define ASCRXFCON_RXFITLMASK 0x3F00 -#define ASCRXFCON_RXFITLOFF 8 -#define ASCFSTAT_RXFFLMASK 0x003F -#define ASCFSTAT_TXFFLMASK 0x3F00 -#define ASCFSTAT_TXFREEMASK 0x3F000000 -#define ASCFSTAT_TXFREEOFF 24 - -static void lqasc_tx_chars(struct uart_port *port); -static struct ltq_uart_port *lqasc_port[MAXPORTS]; -static struct uart_driver lqasc_reg; -static DEFINE_SPINLOCK(ltq_asc_lock); - -struct ltq_uart_port { - struct uart_port port; - struct clk *clk; - unsigned int tx_irq; - unsigned int rx_irq; - unsigned int err_irq; -}; - -static inline struct -ltq_uart_port *to_ltq_uart_port(struct uart_port *port) -{ - return container_of(port, struct ltq_uart_port, port); -} - -static void -lqasc_stop_tx(struct uart_port *port) -{ - return; -} - -static void -lqasc_start_tx(struct uart_port *port) -{ - unsigned long flags; - spin_lock_irqsave(<q_asc_lock, flags); - lqasc_tx_chars(port); - spin_unlock_irqrestore(<q_asc_lock, flags); - return; -} - -static void -lqasc_stop_rx(struct uart_port *port) -{ - ltq_w32(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); -} - -static void -lqasc_enable_ms(struct uart_port *port) -{ -} - -static int -lqasc_rx_chars(struct uart_port *port) -{ - struct tty_struct *tty = tty_port_tty_get(&port->state->port); - unsigned int ch = 0, rsr = 0, fifocnt; - - if (!tty) { - dev_dbg(port->dev, "%s:tty is busy now", __func__); - return -EBUSY; - } - fifocnt = - ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK; - while (fifocnt--) { - u8 flag = TTY_NORMAL; - ch = ltq_r8(port->membase + LTQ_ASC_RBUF); - rsr = (ltq_r32(port->membase + LTQ_ASC_STATE) - & ASCSTATE_ANY) | UART_DUMMY_UER_RX; - tty_flip_buffer_push(tty); - port->icount.rx++; - - /* - * Note that the error handling code is - * out of the main execution path - */ - if (rsr & ASCSTATE_ANY) { - if (rsr & ASCSTATE_PE) { - port->icount.parity++; - ltq_w32_mask(0, ASCWHBSTATE_CLRPE, - port->membase + LTQ_ASC_WHBSTATE); - } else if (rsr & ASCSTATE_FE) { - port->icount.frame++; - ltq_w32_mask(0, ASCWHBSTATE_CLRFE, - port->membase + LTQ_ASC_WHBSTATE); - } - if (rsr & ASCSTATE_ROE) { - port->icount.overrun++; - ltq_w32_mask(0, ASCWHBSTATE_CLRROE, - port->membase + LTQ_ASC_WHBSTATE); - } - - rsr &= port->read_status_mask; - - if (rsr & ASCSTATE_PE) - flag = TTY_PARITY; - else if (rsr & ASCSTATE_FE) - flag = TTY_FRAME; - } - - if ((rsr & port->ignore_status_mask) == 0) - tty_insert_flip_char(tty, ch, flag); - - if (rsr & ASCSTATE_ROE) - /* - * Overrun is special, since it's reported - * immediately, and doesn't affect the current - * character - */ - tty_insert_flip_char(tty, 0, TTY_OVERRUN); - } - if (ch != 0) - tty_flip_buffer_push(tty); - tty_kref_put(tty); - return 0; -} - -static void -lqasc_tx_chars(struct uart_port *port) -{ - struct circ_buf *xmit = &port->state->xmit; - if (uart_tx_stopped(port)) { - lqasc_stop_tx(port); - return; - } - - while (((ltq_r32(port->membase + LTQ_ASC_FSTAT) & - ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) { - if (port->x_char) { - ltq_w8(port->x_char, port->membase + LTQ_ASC_TBUF); - port->icount.tx++; - port->x_char = 0; - continue; - } - - if (uart_circ_empty(xmit)) - break; - - ltq_w8(port->state->xmit.buf[port->state->xmit.tail], - port->membase + LTQ_ASC_TBUF); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); -} - -static irqreturn_t -lqasc_tx_int(int irq, void *_port) -{ - unsigned long flags; - struct uart_port *port = (struct uart_port *)_port; - spin_lock_irqsave(<q_asc_lock, flags); - ltq_w32(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); - spin_unlock_irqrestore(<q_asc_lock, flags); - lqasc_start_tx(port); - return IRQ_HANDLED; -} - -static irqreturn_t -lqasc_err_int(int irq, void *_port) -{ - unsigned long flags; - struct uart_port *port = (struct uart_port *)_port; - spin_lock_irqsave(<q_asc_lock, flags); - /* clear any pending interrupts */ - ltq_w32_mask(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE | - ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE); - spin_unlock_irqrestore(<q_asc_lock, flags); - return IRQ_HANDLED; -} - -static irqreturn_t -lqasc_rx_int(int irq, void *_port) -{ - unsigned long flags; - struct uart_port *port = (struct uart_port *)_port; - spin_lock_irqsave(<q_asc_lock, flags); - ltq_w32(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); - lqasc_rx_chars(port); - spin_unlock_irqrestore(<q_asc_lock, flags); - return IRQ_HANDLED; -} - -static unsigned int -lqasc_tx_empty(struct uart_port *port) -{ - int status; - status = ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK; - return status ? 0 : TIOCSER_TEMT; -} - -static unsigned int -lqasc_get_mctrl(struct uart_port *port) -{ - return TIOCM_CTS | TIOCM_CAR | TIOCM_DSR; -} - -static void -lqasc_set_mctrl(struct uart_port *port, u_int mctrl) -{ -} - -static void -lqasc_break_ctl(struct uart_port *port, int break_state) -{ -} - -static int -lqasc_startup(struct uart_port *port) -{ - struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); - int retval; - - port->uartclk = clk_get_rate(ltq_port->clk); - - ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), - port->membase + LTQ_ASC_CLC); - - ltq_w32(0, port->membase + LTQ_ASC_PISEL); - ltq_w32( - ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) | - ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU, - port->membase + LTQ_ASC_TXFCON); - ltq_w32( - ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK) - | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU, - port->membase + LTQ_ASC_RXFCON); - /* make sure other settings are written to hardware before - * setting enable bits - */ - wmb(); - ltq_w32_mask(0, ASCCON_M_8ASYNC | ASCCON_FEN | ASCCON_TOEN | - ASCCON_ROEN, port->membase + LTQ_ASC_CON); - - retval = request_irq(ltq_port->tx_irq, lqasc_tx_int, - IRQF_DISABLED, "asc_tx", port); - if (retval) { - pr_err("failed to request lqasc_tx_int\n"); - return retval; - } - - retval = request_irq(ltq_port->rx_irq, lqasc_rx_int, - IRQF_DISABLED, "asc_rx", port); - if (retval) { - pr_err("failed to request lqasc_rx_int\n"); - goto err1; - } - - retval = request_irq(ltq_port->err_irq, lqasc_err_int, - IRQF_DISABLED, "asc_err", port); - if (retval) { - pr_err("failed to request lqasc_err_int\n"); - goto err2; - } - - ltq_w32(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, - port->membase + LTQ_ASC_IRNREN); - return 0; - -err2: - free_irq(ltq_port->rx_irq, port); -err1: - free_irq(ltq_port->tx_irq, port); - return retval; -} - -static void -lqasc_shutdown(struct uart_port *port) -{ - struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); - free_irq(ltq_port->tx_irq, port); - free_irq(ltq_port->rx_irq, port); - free_irq(ltq_port->err_irq, port); - - ltq_w32(0, port->membase + LTQ_ASC_CON); - ltq_w32_mask(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU, - port->membase + LTQ_ASC_RXFCON); - ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, - port->membase + LTQ_ASC_TXFCON); -} - -static void -lqasc_set_termios(struct uart_port *port, - struct ktermios *new, struct ktermios *old) -{ - unsigned int cflag; - unsigned int iflag; - unsigned int divisor; - unsigned int baud; - unsigned int con = 0; - unsigned long flags; - - cflag = new->c_cflag; - iflag = new->c_iflag; - - switch (cflag & CSIZE) { - case CS7: - con = ASCCON_M_7ASYNC; - break; - - case CS5: - case CS6: - default: - new->c_cflag &= ~ CSIZE; - new->c_cflag |= CS8; - con = ASCCON_M_8ASYNC; - break; - } - - cflag &= ~CMSPAR; /* Mark/Space parity is not supported */ - - if (cflag & CSTOPB) - con |= ASCCON_STP; - - if (cflag & PARENB) { - if (!(cflag & PARODD)) - con &= ~ASCCON_ODD; - else - con |= ASCCON_ODD; - } - - port->read_status_mask = ASCSTATE_ROE; - if (iflag & INPCK) - port->read_status_mask |= ASCSTATE_FE | ASCSTATE_PE; - - port->ignore_status_mask = 0; - if (iflag & IGNPAR) - port->ignore_status_mask |= ASCSTATE_FE | ASCSTATE_PE; - - if (iflag & IGNBRK) { - /* - * If we're ignoring parity and break indicators, - * ignore overruns too (for real raw support). - */ - if (iflag & IGNPAR) - port->ignore_status_mask |= ASCSTATE_ROE; - } - - if ((cflag & CREAD) == 0) - port->ignore_status_mask |= UART_DUMMY_UER_RX; - - /* set error signals - framing, parity and overrun, enable receiver */ - con |= ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN; - - spin_lock_irqsave(<q_asc_lock, flags); - - /* set up CON */ - ltq_w32_mask(0, con, port->membase + LTQ_ASC_CON); - - /* Set baud rate - take a divider of 2 into account */ - baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); - divisor = uart_get_divisor(port, baud); - divisor = divisor / 2 - 1; - - /* disable the baudrate generator */ - ltq_w32_mask(ASCCON_R, 0, port->membase + LTQ_ASC_CON); - - /* make sure the fractional divider is off */ - ltq_w32_mask(ASCCON_FDE, 0, port->membase + LTQ_ASC_CON); - - /* set up to use divisor of 2 */ - ltq_w32_mask(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON); - - /* now we can write the new baudrate into the register */ - ltq_w32(divisor, port->membase + LTQ_ASC_BG); - - /* turn the baudrate generator back on */ - ltq_w32_mask(0, ASCCON_R, port->membase + LTQ_ASC_CON); - - /* enable rx */ - ltq_w32(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); - - spin_unlock_irqrestore(<q_asc_lock, flags); - - /* Don't rewrite B0 */ - if (tty_termios_baud_rate(new)) - tty_termios_encode_baud_rate(new, baud, baud); -} - -static const char* -lqasc_type(struct uart_port *port) -{ - if (port->type == PORT_LTQ_ASC) - return DRVNAME; - else - return NULL; -} - -static void -lqasc_release_port(struct uart_port *port) -{ - if (port->flags & UPF_IOREMAP) { - iounmap(port->membase); - port->membase = NULL; - } -} - -static int -lqasc_request_port(struct uart_port *port) -{ - struct platform_device *pdev = to_platform_device(port->dev); - struct resource *res; - int size; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "cannot obtain I/O memory region"); - return -ENODEV; - } - size = resource_size(res); - - res = devm_request_mem_region(&pdev->dev, res->start, - size, dev_name(&pdev->dev)); - if (!res) { - dev_err(&pdev->dev, "cannot request I/O memory region"); - return -EBUSY; - } - - if (port->flags & UPF_IOREMAP) { - port->membase = devm_ioremap_nocache(&pdev->dev, - port->mapbase, size); - if (port->membase == NULL) - return -ENOMEM; - } - return 0; -} - -static void -lqasc_config_port(struct uart_port *port, int flags) -{ - if (flags & UART_CONFIG_TYPE) { - port->type = PORT_LTQ_ASC; - lqasc_request_port(port); - } -} - -static int -lqasc_verify_port(struct uart_port *port, - struct serial_struct *ser) -{ - int ret = 0; - if (ser->type != PORT_UNKNOWN && ser->type != PORT_LTQ_ASC) - ret = -EINVAL; - if (ser->irq < 0 || ser->irq >= NR_IRQS) - ret = -EINVAL; - if (ser->baud_base < 9600) - ret = -EINVAL; - return ret; -} - -static struct uart_ops lqasc_pops = { - .tx_empty = lqasc_tx_empty, - .set_mctrl = lqasc_set_mctrl, - .get_mctrl = lqasc_get_mctrl, - .stop_tx = lqasc_stop_tx, - .start_tx = lqasc_start_tx, - .stop_rx = lqasc_stop_rx, - .enable_ms = lqasc_enable_ms, - .break_ctl = lqasc_break_ctl, - .startup = lqasc_startup, - .shutdown = lqasc_shutdown, - .set_termios = lqasc_set_termios, - .type = lqasc_type, - .release_port = lqasc_release_port, - .request_port = lqasc_request_port, - .config_port = lqasc_config_port, - .verify_port = lqasc_verify_port, -}; - -static void -lqasc_console_putchar(struct uart_port *port, int ch) -{ - int fifofree; - - if (!port->membase) - return; - - do { - fifofree = (ltq_r32(port->membase + LTQ_ASC_FSTAT) - & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF; - } while (fifofree == 0); - ltq_w8(ch, port->membase + LTQ_ASC_TBUF); -} - - -static void -lqasc_console_write(struct console *co, const char *s, u_int count) -{ - struct ltq_uart_port *ltq_port; - struct uart_port *port; - unsigned long flags; - - if (co->index >= MAXPORTS) - return; - - ltq_port = lqasc_port[co->index]; - if (!ltq_port) - return; - - port = <q_port->port; - - spin_lock_irqsave(<q_asc_lock, flags); - uart_console_write(port, s, count, lqasc_console_putchar); - spin_unlock_irqrestore(<q_asc_lock, flags); -} - -static int __init -lqasc_console_setup(struct console *co, char *options) -{ - struct ltq_uart_port *ltq_port; - struct uart_port *port; - int baud = 115200; - int bits = 8; - int parity = 'n'; - int flow = 'n'; - - if (co->index >= MAXPORTS) - return -ENODEV; - - ltq_port = lqasc_port[co->index]; - if (!ltq_port) - return -ENODEV; - - port = <q_port->port; - - port->uartclk = clk_get_rate(ltq_port->clk); - - if (options) - uart_parse_options(options, &baud, &parity, &bits, &flow); - return uart_set_options(port, co, baud, parity, bits, flow); -} - -static struct console lqasc_console = { - .name = "ttyLTQ", - .write = lqasc_console_write, - .device = uart_console_device, - .setup = lqasc_console_setup, - .flags = CON_PRINTBUFFER, - .index = -1, - .data = &lqasc_reg, -}; - -static int __init -lqasc_console_init(void) -{ - register_console(&lqasc_console); - return 0; -} -console_initcall(lqasc_console_init); - -static struct uart_driver lqasc_reg = { - .owner = THIS_MODULE, - .driver_name = DRVNAME, - .dev_name = "ttyLTQ", - .major = 0, - .minor = 0, - .nr = MAXPORTS, - .cons = &lqasc_console, -}; - -static int __init -lqasc_probe(struct platform_device *pdev) -{ - struct ltq_uart_port *ltq_port; - struct uart_port *port; - struct resource *mmres, *irqres; - int tx_irq, rx_irq, err_irq; - struct clk *clk; - int ret; - - mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); - irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!mmres || !irqres) - return -ENODEV; - - if (pdev->id >= MAXPORTS) - return -EBUSY; - - if (lqasc_port[pdev->id] != NULL) - return -EBUSY; - - clk = clk_get(&pdev->dev, "fpi"); - if (IS_ERR(clk)) { - pr_err("failed to get fpi clk\n"); - return -ENOENT; - } - - tx_irq = platform_get_irq_byname(pdev, "tx"); - rx_irq = platform_get_irq_byname(pdev, "rx"); - err_irq = platform_get_irq_byname(pdev, "err"); - if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0)) - return -ENODEV; - - ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL); - if (!ltq_port) - return -ENOMEM; - - port = <q_port->port; - - port->iotype = SERIAL_IO_MEM; - port->flags = ASYNC_BOOT_AUTOCONF | UPF_IOREMAP; - port->ops = &lqasc_pops; - port->fifosize = 16; - port->type = PORT_LTQ_ASC, - port->line = pdev->id; - port->dev = &pdev->dev; - - port->irq = tx_irq; /* unused, just to be backward-compatibe */ - port->mapbase = mmres->start; - - ltq_port->clk = clk; - - ltq_port->tx_irq = tx_irq; - ltq_port->rx_irq = rx_irq; - ltq_port->err_irq = err_irq; - - lqasc_port[pdev->id] = ltq_port; - platform_set_drvdata(pdev, ltq_port); - - ret = uart_add_one_port(&lqasc_reg, port); - - return ret; -} - -static struct platform_driver lqasc_driver = { - .driver = { - .name = DRVNAME, - .owner = THIS_MODULE, - }, -}; - -int __init -init_lqasc(void) -{ - int ret; - - ret = uart_register_driver(&lqasc_reg); - if (ret != 0) - return ret; - - ret = platform_driver_probe(&lqasc_driver, lqasc_probe); - if (ret != 0) - uart_unregister_driver(&lqasc_reg); - - return ret; -} - -module_init(init_lqasc); - -MODULE_DESCRIPTION("Lantiq serial port driver"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/tty/serial/of_serial.c b/trunk/drivers/tty/serial/of_serial.c index c911b2419abb..0e8eec516df4 100644 --- a/trunk/drivers/tty/serial/of_serial.c +++ b/trunk/drivers/tty/serial/of_serial.c @@ -80,17 +80,14 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev, /* * Try to register a serial port */ -static struct of_device_id of_platform_serial_table[]; static int __devinit of_platform_serial_probe(struct platform_device *ofdev) { - const struct of_device_id *match; struct of_serial_info *info; struct uart_port port; int port_type; int ret; - match = of_match_device(of_platform_serial_table, &ofdev->dev); - if (!match) + if (!ofdev->dev.of_match) return -EINVAL; if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) @@ -100,7 +97,7 @@ static int __devinit of_platform_serial_probe(struct platform_device *ofdev) if (info == NULL) return -ENOMEM; - port_type = (unsigned long)match->data; + port_type = (unsigned long)ofdev->dev.of_match->data; ret = of_platform_serial_setup(ofdev, port_type, &port); if (ret) goto out; diff --git a/trunk/drivers/usb/gadget/fsl_qe_udc.c b/trunk/drivers/usb/gadget/fsl_qe_udc.c index 3a68e09309f7..36613b37c504 100644 --- a/trunk/drivers/usb/gadget/fsl_qe_udc.c +++ b/trunk/drivers/usb/gadget/fsl_qe_udc.c @@ -2539,18 +2539,15 @@ static void qe_udc_release(struct device *dev) } /* Driver probe functions */ -static const struct of_device_id qe_udc_match[]; static int __devinit qe_udc_probe(struct platform_device *ofdev) { - const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct qe_ep *ep; unsigned int ret = 0; unsigned int i; const void *prop; - match = of_match_device(qe_udc_match, &ofdev->dev); - if (!match) + if (!ofdev->dev.of_match) return -EINVAL; prop = of_get_property(np, "mode", NULL); @@ -2564,7 +2561,7 @@ static int __devinit qe_udc_probe(struct platform_device *ofdev) return -ENOMEM; } - udc_controller->soc_type = (unsigned long)match->data; + udc_controller->soc_type = (unsigned long)ofdev->dev.of_match->data; udc_controller->usb_regs = of_iomap(np, 0); if (!udc_controller->usb_regs) { ret = -ENOMEM; diff --git a/trunk/drivers/usb/host/ehci-omap.c b/trunk/drivers/usb/host/ehci-omap.c index 627f3a678759..7e41a95c5ceb 100644 --- a/trunk/drivers/usb/host/ehci-omap.c +++ b/trunk/drivers/usb/host/ehci-omap.c @@ -40,7 +40,6 @@ #include #include #include -#include /* EHCI Register Set */ #define EHCI_INSNREG04 (0xA0) @@ -119,8 +118,6 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) struct ehci_hcd *omap_ehci; int ret = -ENODEV; int irq; - int i; - char supply[7]; if (usb_disabled()) return -ENODEV; @@ -161,23 +158,6 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(res); hcd->regs = regs; - /* get ehci regulator and enable */ - for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) { - if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY) { - pdata->regulator[i] = NULL; - continue; - } - snprintf(supply, sizeof(supply), "hsusb%d", i); - pdata->regulator[i] = regulator_get(dev, supply); - if (IS_ERR(pdata->regulator[i])) { - pdata->regulator[i] = NULL; - dev_dbg(dev, - "failed to get ehci port%d regulator\n", i); - } else { - regulator_enable(pdata->regulator[i]); - } - } - ret = omap_usbhs_enable(dev); if (ret) { dev_err(dev, "failed to start usbhs with err %d\n", ret); diff --git a/trunk/drivers/usb/host/isp1760-hcd.c b/trunk/drivers/usb/host/isp1760-hcd.c index 7b2e69aa2e98..795345ad45e6 100644 --- a/trunk/drivers/usb/host/isp1760-hcd.c +++ b/trunk/drivers/usb/host/isp1760-hcd.c @@ -1633,7 +1633,6 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) ints[i].qh = NULL; ints[i].qtd = NULL; - urb->status = status; isp1760_urb_done(hcd, urb); if (qtd) pe(hcd, qh, qtd); diff --git a/trunk/drivers/usb/host/xhci-hub.c b/trunk/drivers/usb/host/xhci-hub.c index 73f75d26436c..a78f2ebd11b7 100644 --- a/trunk/drivers/usb/host/xhci-hub.c +++ b/trunk/drivers/usb/host/xhci-hub.c @@ -777,7 +777,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd) if (t1 != t2) xhci_writel(xhci, t2, port_array[port_index]); - if (hcd->speed != HCD_USB3) { + if (DEV_HIGHSPEED(t1)) { /* enable remote wake up for USB 2.0 */ u32 __iomem *addr; u32 tmp; @@ -866,21 +866,6 @@ int xhci_bus_resume(struct usb_hcd *hcd) temp |= PORT_LINK_STROBE | XDEV_U0; xhci_writel(xhci, temp, port_array[port_index]); } - /* wait for the port to enter U0 and report port link - * state change. - */ - spin_unlock_irqrestore(&xhci->lock, flags); - msleep(20); - spin_lock_irqsave(&xhci->lock, flags); - - /* Clear PLC */ - temp = xhci_readl(xhci, port_array[port_index]); - if (temp & PORT_PLC) { - temp = xhci_port_state_to_neutral(temp); - temp |= PORT_PLC; - xhci_writel(xhci, temp, port_array[port_index]); - } - slot_id = xhci_find_slot_id_by_port(hcd, xhci, port_index + 1); if (slot_id) @@ -888,7 +873,7 @@ int xhci_bus_resume(struct usb_hcd *hcd) } else xhci_writel(xhci, temp, port_array[port_index]); - if (hcd->speed != HCD_USB3) { + if (DEV_HIGHSPEED(temp)) { /* disable remote wake up for USB 2.0 */ u32 __iomem *addr; u32 tmp; diff --git a/trunk/drivers/usb/musb/musb_gadget.c b/trunk/drivers/usb/musb/musb_gadget.c index f47c20197c61..6dfbf9ffd7a6 100644 --- a/trunk/drivers/usb/musb/musb_gadget.c +++ b/trunk/drivers/usb/musb/musb_gadget.c @@ -1887,9 +1887,11 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver, otg_set_vbus(musb->xceiv, 1); hcd->self.uses_pio_for_control = 1; + + if (musb->xceiv->last_event == USB_EVENT_NONE) + pm_runtime_put(musb->controller); + } - if (musb->xceiv->last_event == USB_EVENT_NONE) - pm_runtime_put(musb->controller); return 0; diff --git a/trunk/drivers/usb/musb/omap2430.c b/trunk/drivers/usb/musb/omap2430.c index e9e60b6e0583..57a27fa954b4 100644 --- a/trunk/drivers/usb/musb/omap2430.c +++ b/trunk/drivers/usb/musb/omap2430.c @@ -270,7 +270,7 @@ static int musb_otg_notifications(struct notifier_block *nb, DBG(4, "VBUS Disconnect\n"); #ifdef CONFIG_USB_GADGET_MUSB_HDRC - if (is_otg_enabled(musb) || is_peripheral_enabled(musb)) + if (is_otg_enabled(musb)) if (musb->gadget_driver) #endif { diff --git a/trunk/drivers/vhost/vhost.c b/trunk/drivers/vhost/vhost.c index 7aa4eea930f1..2ab291241635 100644 --- a/trunk/drivers/vhost/vhost.c +++ b/trunk/drivers/vhost/vhost.c @@ -4,7 +4,7 @@ * Author: Michael S. Tsirkin * * Inspiration, some code, and most witty comments come from - * Documentation/virtual/lguest/lguest.c, by Rusty Russell + * Documentation/lguest/lguest.c, by Rusty Russell * * This work is licensed under the terms of the GNU GPL, version 2. * diff --git a/trunk/drivers/video/acornfb.c b/trunk/drivers/video/acornfb.c index 6183a57eb69d..82acb8dc4aa1 100644 --- a/trunk/drivers/video/acornfb.c +++ b/trunk/drivers/video/acornfb.c @@ -66,7 +66,7 @@ * have. Allow 1% either way on the nominal for TVs. */ #define NR_MONTYPES 6 -static struct fb_monspecs monspecs[NR_MONTYPES] __devinitdata = { +static struct fb_monspecs monspecs[NR_MONTYPES] __initdata = { { /* TV */ .hfmin = 15469, .hfmax = 15781, @@ -873,7 +873,7 @@ static struct fb_ops acornfb_ops = { /* * Everything after here is initialisation!!! */ -static struct fb_videomode modedb[] __devinitdata = { +static struct fb_videomode modedb[] __initdata = { { /* 320x256 @ 50Hz */ NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2, FB_SYNC_COMP_HIGH_ACT, @@ -925,7 +925,8 @@ static struct fb_videomode modedb[] __devinitdata = { } }; -static struct fb_videomode acornfb_default_mode __devinitdata = { +static struct fb_videomode __initdata +acornfb_default_mode = { .name = NULL, .refresh = 60, .xres = 640, @@ -941,7 +942,7 @@ static struct fb_videomode acornfb_default_mode __devinitdata = { .vmode = FB_VMODE_NONINTERLACED }; -static void __devinit acornfb_init_fbinfo(void) +static void __init acornfb_init_fbinfo(void) { static int first = 1; @@ -1017,7 +1018,8 @@ static void __devinit acornfb_init_fbinfo(void) * size can optionally be followed by 'M' or 'K' for * MB or KB respectively. */ -static void __devinit acornfb_parse_mon(char *opt) +static void __init +acornfb_parse_mon(char *opt) { char *p = opt; @@ -1064,7 +1066,8 @@ static void __devinit acornfb_parse_mon(char *opt) current_par.montype = -1; } -static void __devinit acornfb_parse_montype(char *opt) +static void __init +acornfb_parse_montype(char *opt) { current_par.montype = -2; @@ -1105,7 +1108,8 @@ static void __devinit acornfb_parse_montype(char *opt) } } -static void __devinit acornfb_parse_dram(char *opt) +static void __init +acornfb_parse_dram(char *opt) { unsigned int size; @@ -1130,14 +1134,15 @@ static void __devinit acornfb_parse_dram(char *opt) static struct options { char *name; void (*parse)(char *opt); -} opt_table[] __devinitdata = { +} opt_table[] __initdata = { { "mon", acornfb_parse_mon }, { "montype", acornfb_parse_montype }, { "dram", acornfb_parse_dram }, { NULL, NULL } }; -static int __devinit acornfb_setup(char *options) +int __init +acornfb_setup(char *options) { struct options *optp; char *opt; @@ -1174,7 +1179,8 @@ static int __devinit acornfb_setup(char *options) * Detect type of monitor connected * For now, we just assume SVGA */ -static int __devinit acornfb_detect_monitortype(void) +static int __init +acornfb_detect_monitortype(void) { return 4; } diff --git a/trunk/drivers/video/atafb.c b/trunk/drivers/video/atafb.c index 64e41f5448c4..5b2b5ef4edba 100644 --- a/trunk/drivers/video/atafb.c +++ b/trunk/drivers/video/atafb.c @@ -3117,7 +3117,7 @@ int __init atafb_init(void) atafb_ops.fb_setcolreg = &falcon_setcolreg; error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, IRQ_TYPE_PRIO, - "framebuffer:modeswitch", + "framebuffer/modeswitch", falcon_vbl_switcher); if (error) return error; diff --git a/trunk/drivers/video/fbmem.c b/trunk/drivers/video/fbmem.c index 5aac00eb1830..e0c2284924b6 100644 --- a/trunk/drivers/video/fbmem.c +++ b/trunk/drivers/video/fbmem.c @@ -42,34 +42,9 @@ #define FBPIXMAPSIZE (1024 * 8) -static DEFINE_MUTEX(registration_lock); struct fb_info *registered_fb[FB_MAX] __read_mostly; int num_registered_fb __read_mostly; -static struct fb_info *get_fb_info(unsigned int idx) -{ - struct fb_info *fb_info; - - if (idx >= FB_MAX) - return ERR_PTR(-ENODEV); - - mutex_lock(®istration_lock); - fb_info = registered_fb[idx]; - if (fb_info) - atomic_inc(&fb_info->count); - mutex_unlock(®istration_lock); - - return fb_info; -} - -static void put_fb_info(struct fb_info *fb_info) -{ - if (!atomic_dec_and_test(&fb_info->count)) - return; - if (fb_info->fbops->fb_destroy) - fb_info->fbops->fb_destroy(fb_info); -} - int lock_fb_info(struct fb_info *info) { mutex_lock(&info->lock); @@ -672,7 +647,6 @@ int fb_show_logo(struct fb_info *info, int rotate) { return 0; } static void *fb_seq_start(struct seq_file *m, loff_t *pos) { - mutex_lock(®istration_lock); return (*pos < FB_MAX) ? pos : NULL; } @@ -684,7 +658,6 @@ static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos) static void fb_seq_stop(struct seq_file *m, void *v) { - mutex_unlock(®istration_lock); } static int fb_seq_show(struct seq_file *m, void *v) @@ -717,30 +690,13 @@ static const struct file_operations fb_proc_fops = { .release = seq_release, }; -/* - * We hold a reference to the fb_info in file->private_data, - * but if the current registered fb has changed, we don't - * actually want to use it. - * - * So look up the fb_info using the inode minor number, - * and just verify it against the reference we have. - */ -static struct fb_info *file_fb_info(struct file *file) -{ - struct inode *inode = file->f_path.dentry->d_inode; - int fbidx = iminor(inode); - struct fb_info *info = registered_fb[fbidx]; - - if (info != file->private_data) - info = NULL; - return info; -} - static ssize_t fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; - struct fb_info *info = file_fb_info(file); + struct inode *inode = file->f_path.dentry->d_inode; + int fbidx = iminor(inode); + struct fb_info *info = registered_fb[fbidx]; u8 *buffer, *dst; u8 __iomem *src; int c, cnt = 0, err = 0; @@ -805,7 +761,9 @@ static ssize_t fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; - struct fb_info *info = file_fb_info(file); + struct inode *inode = file->f_path.dentry->d_inode; + int fbidx = iminor(inode); + struct fb_info *info = registered_fb[fbidx]; u8 *buffer, *src; u8 __iomem *dst; int c, cnt = 0, err = 0; @@ -1183,10 +1141,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct fb_info *info = file_fb_info(file); + struct inode *inode = file->f_path.dentry->d_inode; + int fbidx = iminor(inode); + struct fb_info *info = registered_fb[fbidx]; - if (!info) - return -ENODEV; return do_fb_ioctl(info, cmd, arg); } @@ -1307,13 +1265,12 @@ static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd, static long fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct fb_info *info = file_fb_info(file); - struct fb_ops *fb; + struct inode *inode = file->f_path.dentry->d_inode; + int fbidx = iminor(inode); + struct fb_info *info = registered_fb[fbidx]; + struct fb_ops *fb = info->fbops; long ret = -ENOIOCTLCMD; - if (!info) - return -ENODEV; - fb = info->fbops; switch(cmd) { case FBIOGET_VSCREENINFO: case FBIOPUT_VSCREENINFO: @@ -1346,18 +1303,16 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd, static int fb_mmap(struct file *file, struct vm_area_struct * vma) { - struct fb_info *info = file_fb_info(file); - struct fb_ops *fb; + int fbidx = iminor(file->f_path.dentry->d_inode); + struct fb_info *info = registered_fb[fbidx]; + struct fb_ops *fb = info->fbops; unsigned long off; unsigned long start; u32 len; - if (!info) - return -ENODEV; if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) return -EINVAL; off = vma->vm_pgoff << PAGE_SHIFT; - fb = info->fbops; if (!fb) return -ENODEV; mutex_lock(&info->mm_lock); @@ -1406,16 +1361,14 @@ __releases(&info->lock) struct fb_info *info; int res = 0; - info = get_fb_info(fbidx); - if (!info) { + if (fbidx >= FB_MAX) + return -ENODEV; + info = registered_fb[fbidx]; + if (!info) request_module("fb%d", fbidx); - info = get_fb_info(fbidx); - if (!info) - return -ENODEV; - } - if (IS_ERR(info)) - return PTR_ERR(info); - + info = registered_fb[fbidx]; + if (!info) + return -ENODEV; mutex_lock(&info->lock); if (!try_module_get(info->fbops->owner)) { res = -ENODEV; @@ -1433,8 +1386,6 @@ __releases(&info->lock) #endif out: mutex_unlock(&info->lock); - if (res) - put_fb_info(info); return res; } @@ -1450,7 +1401,6 @@ __releases(&info->lock) info->fbops->fb_release(info,1); module_put(info->fbops->owner); mutex_unlock(&info->lock); - put_fb_info(info); return 0; } @@ -1537,10 +1487,8 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena, return false; } -static int do_unregister_framebuffer(struct fb_info *fb_info); - #define VGA_FB_PHYS 0xA0000 -static void do_remove_conflicting_framebuffers(struct apertures_struct *a, +void remove_conflicting_framebuffers(struct apertures_struct *a, const char *name, bool primary) { int i; @@ -1562,32 +1510,43 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a, printk(KERN_INFO "fb: conflicting fb hw usage " "%s vs %s - removing generic driver\n", name, registered_fb[i]->fix.id); - do_unregister_framebuffer(registered_fb[i]); + unregister_framebuffer(registered_fb[i]); } } } +EXPORT_SYMBOL(remove_conflicting_framebuffers); -static int do_register_framebuffer(struct fb_info *fb_info) +/** + * register_framebuffer - registers a frame buffer device + * @fb_info: frame buffer info structure + * + * Registers a frame buffer device @fb_info. + * + * Returns negative errno on error, or zero for success. + * + */ + +int +register_framebuffer(struct fb_info *fb_info) { int i; struct fb_event event; struct fb_videomode mode; + if (num_registered_fb == FB_MAX) + return -ENXIO; + if (fb_check_foreignness(fb_info)) return -ENOSYS; - do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, + remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, fb_is_primary_device(fb_info)); - if (num_registered_fb == FB_MAX) - return -ENXIO; - num_registered_fb++; for (i = 0 ; i < FB_MAX; i++) if (!registered_fb[i]) break; fb_info->node = i; - atomic_set(&fb_info->count, 1); mutex_init(&fb_info->lock); mutex_init(&fb_info->mm_lock); @@ -1633,14 +1592,36 @@ static int do_register_framebuffer(struct fb_info *fb_info) return 0; } -static int do_unregister_framebuffer(struct fb_info *fb_info) + +/** + * unregister_framebuffer - releases a frame buffer device + * @fb_info: frame buffer info structure + * + * Unregisters a frame buffer device @fb_info. + * + * Returns negative errno on error, or zero for success. + * + * This function will also notify the framebuffer console + * to release the driver. + * + * This is meant to be called within a driver's module_exit() + * function. If this is called outside module_exit(), ensure + * that the driver implements fb_open() and fb_release() to + * check that no processes are using the device. + */ + +int +unregister_framebuffer(struct fb_info *fb_info) { struct fb_event event; int i, ret = 0; i = fb_info->node; - if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info) - return -EINVAL; + if (!registered_fb[i]) { + ret = -EINVAL; + goto done; + } + if (!lock_fb_info(fb_info)) return -ENODEV; @@ -1648,14 +1629,16 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); unlock_fb_info(fb_info); - if (ret) - return -EINVAL; + if (ret) { + ret = -EINVAL; + goto done; + } if (fb_info->pixmap.addr && (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) kfree(fb_info->pixmap.addr); fb_destroy_modelist(&fb_info->modelist); - registered_fb[i] = NULL; + registered_fb[i]=NULL; num_registered_fb--; fb_cleanup_device(fb_info); device_destroy(fb_class, MKDEV(FB_MAJOR, i)); @@ -1663,65 +1646,9 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); /* this may free fb info */ - put_fb_info(fb_info); - return 0; -} - -void remove_conflicting_framebuffers(struct apertures_struct *a, - const char *name, bool primary) -{ - mutex_lock(®istration_lock); - do_remove_conflicting_framebuffers(a, name, primary); - mutex_unlock(®istration_lock); -} -EXPORT_SYMBOL(remove_conflicting_framebuffers); - -/** - * register_framebuffer - registers a frame buffer device - * @fb_info: frame buffer info structure - * - * Registers a frame buffer device @fb_info. - * - * Returns negative errno on error, or zero for success. - * - */ -int -register_framebuffer(struct fb_info *fb_info) -{ - int ret; - - mutex_lock(®istration_lock); - ret = do_register_framebuffer(fb_info); - mutex_unlock(®istration_lock); - - return ret; -} - -/** - * unregister_framebuffer - releases a frame buffer device - * @fb_info: frame buffer info structure - * - * Unregisters a frame buffer device @fb_info. - * - * Returns negative errno on error, or zero for success. - * - * This function will also notify the framebuffer console - * to release the driver. - * - * This is meant to be called within a driver's module_exit() - * function. If this is called outside module_exit(), ensure - * that the driver implements fb_open() and fb_release() to - * check that no processes are using the device. - */ -int -unregister_framebuffer(struct fb_info *fb_info) -{ - int ret; - - mutex_lock(®istration_lock); - ret = do_unregister_framebuffer(fb_info); - mutex_unlock(®istration_lock); - + if (fb_info->fbops->fb_destroy) + fb_info->fbops->fb_destroy(fb_info); +done: return ret; } diff --git a/trunk/drivers/watchdog/Kconfig b/trunk/drivers/watchdog/Kconfig index 022f9eb0b7bf..1b0f98bc51b5 100644 --- a/trunk/drivers/watchdog/Kconfig +++ b/trunk/drivers/watchdog/Kconfig @@ -990,12 +990,6 @@ config BCM63XX_WDT To compile this driver as a loadable module, choose M here. The module will be called bcm63xx_wdt. -config LANTIQ_WDT - tristate "Lantiq SoC watchdog" - depends on LANTIQ - help - Hardware driver for the Lantiq SoC Watchdog Timer. - # PARISC Architecture # POWERPC Architecture diff --git a/trunk/drivers/watchdog/Makefile b/trunk/drivers/watchdog/Makefile index ed26f7094e47..3f8608b922a7 100644 --- a/trunk/drivers/watchdog/Makefile +++ b/trunk/drivers/watchdog/Makefile @@ -123,7 +123,6 @@ obj-$(CONFIG_AR7_WDT) += ar7_wdt.o obj-$(CONFIG_TXX9_WDT) += txx9wdt.o obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o -obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o # PARISC Architecture diff --git a/trunk/drivers/watchdog/lantiq_wdt.c b/trunk/drivers/watchdog/lantiq_wdt.c deleted file mode 100644 index 7d82adac1cb2..000000000000 --- a/trunk/drivers/watchdog/lantiq_wdt.c +++ /dev/null @@ -1,261 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2010 John Crispin - * Based on EP93xx wdt driver - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -/* Section 3.4 of the datasheet - * The password sequence protects the WDT control register from unintended - * write actions, which might cause malfunction of the WDT. - * - * essentially the following two magic passwords need to be written to allow - * IO access to the WDT core - */ -#define LTQ_WDT_PW1 0x00BE0000 -#define LTQ_WDT_PW2 0x00DC0000 - -#define LTQ_WDT_CR 0x0 /* watchdog control register */ -#define LTQ_WDT_SR 0x8 /* watchdog status register */ - -#define LTQ_WDT_SR_EN (0x1 << 31) /* enable bit */ -#define LTQ_WDT_SR_PWD (0x3 << 26) /* turn on power */ -#define LTQ_WDT_SR_CLKDIV (0x3 << 24) /* turn on clock and set */ - /* divider to 0x40000 */ -#define LTQ_WDT_DIVIDER 0x40000 -#define LTQ_MAX_TIMEOUT ((1 << 16) - 1) /* the reload field is 16 bit */ - -static int nowayout = WATCHDOG_NOWAYOUT; - -static void __iomem *ltq_wdt_membase; -static unsigned long ltq_io_region_clk_rate; - -static unsigned long ltq_wdt_bootstatus; -static unsigned long ltq_wdt_in_use; -static int ltq_wdt_timeout = 30; -static int ltq_wdt_ok_to_close; - -static void -ltq_wdt_enable(void) -{ - ltq_wdt_timeout = ltq_wdt_timeout * - (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000; - if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT) - ltq_wdt_timeout = LTQ_MAX_TIMEOUT; - - /* write the first password magic */ - ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR); - /* write the second magic plus the configuration and new timeout */ - ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV | - LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR); -} - -static void -ltq_wdt_disable(void) -{ - /* write the first password magic */ - ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR); - /* write the second password magic with no config - * this turns the watchdog off - */ - ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR); -} - -static ssize_t -ltq_wdt_write(struct file *file, const char __user *data, - size_t len, loff_t *ppos) -{ - if (len) { - if (!nowayout) { - size_t i; - - ltq_wdt_ok_to_close = 0; - for (i = 0; i != len; i++) { - char c; - - if (get_user(c, data + i)) - return -EFAULT; - if (c == 'V') - ltq_wdt_ok_to_close = 1; - else - ltq_wdt_ok_to_close = 0; - } - } - ltq_wdt_enable(); - } - - return len; -} - -static struct watchdog_info ident = { - .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | - WDIOF_CARDRESET, - .identity = "ltq_wdt", -}; - -static long -ltq_wdt_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) -{ - int ret = -ENOTTY; - - switch (cmd) { - case WDIOC_GETSUPPORT: - ret = copy_to_user((struct watchdog_info __user *)arg, &ident, - sizeof(ident)) ? -EFAULT : 0; - break; - - case WDIOC_GETBOOTSTATUS: - ret = put_user(ltq_wdt_bootstatus, (int __user *)arg); - break; - - case WDIOC_GETSTATUS: - ret = put_user(0, (int __user *)arg); - break; - - case WDIOC_SETTIMEOUT: - ret = get_user(ltq_wdt_timeout, (int __user *)arg); - if (!ret) - ltq_wdt_enable(); - /* intentional drop through */ - case WDIOC_GETTIMEOUT: - ret = put_user(ltq_wdt_timeout, (int __user *)arg); - break; - - case WDIOC_KEEPALIVE: - ltq_wdt_enable(); - ret = 0; - break; - } - return ret; -} - -static int -ltq_wdt_open(struct inode *inode, struct file *file) -{ - if (test_and_set_bit(0, <q_wdt_in_use)) - return -EBUSY; - ltq_wdt_in_use = 1; - ltq_wdt_enable(); - - return nonseekable_open(inode, file); -} - -static int -ltq_wdt_release(struct inode *inode, struct file *file) -{ - if (ltq_wdt_ok_to_close) - ltq_wdt_disable(); - else - pr_err("ltq_wdt: watchdog closed without warning\n"); - ltq_wdt_ok_to_close = 0; - clear_bit(0, <q_wdt_in_use); - - return 0; -} - -static const struct file_operations ltq_wdt_fops = { - .owner = THIS_MODULE, - .write = ltq_wdt_write, - .unlocked_ioctl = ltq_wdt_ioctl, - .open = ltq_wdt_open, - .release = ltq_wdt_release, - .llseek = no_llseek, -}; - -static struct miscdevice ltq_wdt_miscdev = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = <q_wdt_fops, -}; - -static int __init -ltq_wdt_probe(struct platform_device *pdev) -{ - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - struct clk *clk; - - if (!res) { - dev_err(&pdev->dev, "cannot obtain I/O memory region"); - return -ENOENT; - } - res = devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), dev_name(&pdev->dev)); - if (!res) { - dev_err(&pdev->dev, "cannot request I/O memory region"); - return -EBUSY; - } - ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start, - resource_size(res)); - if (!ltq_wdt_membase) { - dev_err(&pdev->dev, "cannot remap I/O memory region\n"); - return -ENOMEM; - } - - /* we do not need to enable the clock as it is always running */ - clk = clk_get(&pdev->dev, "io"); - WARN_ON(!clk); - ltq_io_region_clk_rate = clk_get_rate(clk); - clk_put(clk); - - if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST) - ltq_wdt_bootstatus = WDIOF_CARDRESET; - - return misc_register(<q_wdt_miscdev); -} - -static int __devexit -ltq_wdt_remove(struct platform_device *pdev) -{ - misc_deregister(<q_wdt_miscdev); - - if (ltq_wdt_membase) - iounmap(ltq_wdt_membase); - - return 0; -} - - -static struct platform_driver ltq_wdt_driver = { - .remove = __devexit_p(ltq_wdt_remove), - .driver = { - .name = "ltq_wdt", - .owner = THIS_MODULE, - }, -}; - -static int __init -init_ltq_wdt(void) -{ - return platform_driver_probe(<q_wdt_driver, ltq_wdt_probe); -} - -static void __exit -exit_ltq_wdt(void) -{ - return platform_driver_unregister(<q_wdt_driver); -} - -module_init(init_ltq_wdt); -module_exit(exit_ltq_wdt); - -module_param(nowayout, int, 0); -MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); - -MODULE_AUTHOR("John Crispin "); -MODULE_DESCRIPTION("Lantiq SoC Watchdog"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/trunk/drivers/watchdog/mpc8xxx_wdt.c b/trunk/drivers/watchdog/mpc8xxx_wdt.c index eed5436ffb51..528bceb220fd 100644 --- a/trunk/drivers/watchdog/mpc8xxx_wdt.c +++ b/trunk/drivers/watchdog/mpc8xxx_wdt.c @@ -185,20 +185,17 @@ static struct miscdevice mpc8xxx_wdt_miscdev = { .fops = &mpc8xxx_wdt_fops, }; -static const struct of_device_id mpc8xxx_wdt_match[]; static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev) { int ret; - const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct mpc8xxx_wdt_type *wdt_type; u32 freq = fsl_get_sys_freq(); bool enabled; - match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev); - if (!match) + if (!ofdev->dev.of_match) return -EINVAL; - wdt_type = match->data; + wdt_type = ofdev->dev.of_match->data; if (!freq || freq == -1) return -EINVAL; diff --git a/trunk/drivers/watchdog/mtx-1_wdt.c b/trunk/drivers/watchdog/mtx-1_wdt.c index 1479dc4d6129..5ec5ac1f7878 100644 --- a/trunk/drivers/watchdog/mtx-1_wdt.c +++ b/trunk/drivers/watchdog/mtx-1_wdt.c @@ -66,7 +66,6 @@ static struct { int default_ticks; unsigned long inuse; unsigned gpio; - int gstate; } mtx1_wdt_device; static void mtx1_wdt_trigger(unsigned long unused) @@ -76,13 +75,13 @@ static void mtx1_wdt_trigger(unsigned long unused) spin_lock(&mtx1_wdt_device.lock); if (mtx1_wdt_device.running) ticks--; - - /* toggle wdt gpio */ - mtx1_wdt_device.gstate = ~mtx1_wdt_device.gstate; - if (mtx1_wdt_device.gstate) - gpio_direction_output(mtx1_wdt_device.gpio, 1); - else - gpio_direction_input(mtx1_wdt_device.gpio); + /* + * toggle GPIO2_15 + */ + tmp = au_readl(GPIO2_DIR); + tmp = (tmp & ~(1 << mtx1_wdt_device.gpio)) | + ((~tmp) & (1 << mtx1_wdt_device.gpio)); + au_writel(tmp, GPIO2_DIR); if (mtx1_wdt_device.queue && ticks) mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); @@ -104,8 +103,7 @@ static void mtx1_wdt_start(void) spin_lock_irqsave(&mtx1_wdt_device.lock, flags); if (!mtx1_wdt_device.queue) { mtx1_wdt_device.queue = 1; - mtx1_wdt_device.gstate = 1; - gpio_direction_output(mtx1_wdt_device.gpio, 1); + gpio_set_value(mtx1_wdt_device.gpio, 1); mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); } mtx1_wdt_device.running++; @@ -119,8 +117,7 @@ static int mtx1_wdt_stop(void) spin_lock_irqsave(&mtx1_wdt_device.lock, flags); if (mtx1_wdt_device.queue) { mtx1_wdt_device.queue = 0; - mtx1_wdt_device.gstate = 0; - gpio_direction_output(mtx1_wdt_device.gpio, 0); + gpio_set_value(mtx1_wdt_device.gpio, 0); } ticks = mtx1_wdt_device.default_ticks; spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags); diff --git a/trunk/drivers/xen/Makefile b/trunk/drivers/xen/Makefile index 4781f806701d..f420f1ff7f13 100644 --- a/trunk/drivers/xen/Makefile +++ b/trunk/drivers/xen/Makefile @@ -4,21 +4,21 @@ obj-y += xenbus/ nostackp := $(call cc-option, -fno-stack-protector) CFLAGS_features.o := $(nostackp) -obj-$(CONFIG_BLOCK) += biomerge.o -obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o -obj-$(CONFIG_XEN_XENCOMM) += xencomm.o -obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o -obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o -obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o +obj-$(CONFIG_BLOCK) += biomerge.o +obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o +obj-$(CONFIG_XEN_XENCOMM) += xencomm.o +obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o +obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o +obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o -obj-$(CONFIG_XENFS) += xenfs/ +obj-$(CONFIG_XENFS) += xenfs/ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o -obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o -obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o -obj-$(CONFIG_XEN_DOM0) += pci.o +obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o +obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o +obj-$(CONFIG_XEN_DOM0) += pci.o -xen-evtchn-y := evtchn.o +xen-evtchn-y := evtchn.o xen-gntdev-y := gntdev.o xen-gntalloc-y := gntalloc.o -xen-platform-pci-y := platform-pci.o +xen-platform-pci-y := platform-pci.o diff --git a/trunk/drivers/xen/balloon.c b/trunk/drivers/xen/balloon.c index f54290baa3db..043af8ad6b60 100644 --- a/trunk/drivers/xen/balloon.c +++ b/trunk/drivers/xen/balloon.c @@ -114,6 +114,7 @@ static void __balloon_append(struct page *page) if (PageHighMem(page)) { list_add_tail(&page->lru, &ballooned_pages); balloon_stats.balloon_high++; + dec_totalhigh_pages(); } else { list_add(&page->lru, &ballooned_pages); balloon_stats.balloon_low++; @@ -123,8 +124,6 @@ static void __balloon_append(struct page *page) static void balloon_append(struct page *page) { __balloon_append(page); - if (PageHighMem(page)) - dec_totalhigh_pages(); totalram_pages--; } @@ -194,7 +193,7 @@ static enum bp_state update_schedule(enum bp_state state) return BP_EAGAIN; } -static long current_credit(void) +static unsigned long current_target(void) { unsigned long target = balloon_stats.target_pages; @@ -203,7 +202,7 @@ static long current_credit(void) balloon_stats.balloon_low + balloon_stats.balloon_high); - return target - balloon_stats.current_pages; + return target; } static enum bp_state increase_reservation(unsigned long nr_pages) @@ -247,7 +246,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) set_phys_to_machine(pfn, frame_list[i]); /* Link back into the page tables if not highmem. */ - if (xen_pv_domain() && !PageHighMem(page)) { + if (!xen_hvm_domain() && pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), @@ -294,7 +293,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) scrub_page(page); - if (xen_pv_domain() && !PageHighMem(page)) { + if (!xen_hvm_domain() && !PageHighMem(page)) { ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), __pte_ma(0), 0); @@ -338,7 +337,7 @@ static void balloon_process(struct work_struct *work) mutex_lock(&balloon_mutex); do { - credit = current_credit(); + credit = current_target() - balloon_stats.current_pages; if (credit > 0) state = increase_reservation(credit); @@ -421,7 +420,7 @@ void free_xenballooned_pages(int nr_pages, struct page** pages) } /* The balloon may be too large now. Shrink it if needed. */ - if (current_credit()) + if (current_target() != balloon_stats.current_pages) schedule_delayed_work(&balloon_worker, 0); mutex_unlock(&balloon_mutex); @@ -430,7 +429,7 @@ EXPORT_SYMBOL(free_xenballooned_pages); static int __init balloon_init(void) { - unsigned long pfn, extra_pfn_end; + unsigned long pfn, nr_pages, extra_pfn_end; struct page *page; if (!xen_domain()) @@ -438,7 +437,11 @@ static int __init balloon_init(void) pr_info("xen/balloon: Initialising balloon driver.\n"); - balloon_stats.current_pages = xen_pv_domain() ? min(xen_start_info->nr_pages, max_pfn) : max_pfn; + if (xen_pv_domain()) + nr_pages = xen_start_info->nr_pages; + else + nr_pages = max_pfn; + balloon_stats.current_pages = min(nr_pages, max_pfn); balloon_stats.target_pages = balloon_stats.current_pages; balloon_stats.balloon_low = 0; balloon_stats.balloon_high = 0; @@ -463,7 +466,7 @@ static int __init balloon_init(void) pfn < extra_pfn_end; pfn++) { page = pfn_to_page(pfn); - /* totalram_pages and totalhigh_pages do not include the boot-time + /* totalram_pages doesn't include the boot-time balloon extension, so don't subtract from it. */ __balloon_append(page); } diff --git a/trunk/drivers/xen/events.c b/trunk/drivers/xen/events.c index 3ff822b48145..33167b43ac7e 100644 --- a/trunk/drivers/xen/events.c +++ b/trunk/drivers/xen/events.c @@ -101,7 +101,6 @@ struct irq_info unsigned short gsi; unsigned char vector; unsigned char flags; - uint16_t domid; } pirq; } u; }; @@ -119,8 +118,6 @@ static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], static struct irq_chip xen_dynamic_chip; static struct irq_chip xen_percpu_chip; static struct irq_chip xen_pirq_chip; -static void enable_dynirq(struct irq_data *data); -static void disable_dynirq(struct irq_data *data); /* Get info for IRQ */ static struct irq_info *info_for_irq(unsigned irq) @@ -187,7 +184,6 @@ static void xen_irq_info_pirq_init(unsigned irq, unsigned short pirq, unsigned short gsi, unsigned short vector, - uint16_t domid, unsigned char flags) { struct irq_info *info = info_for_irq(irq); @@ -197,7 +193,6 @@ static void xen_irq_info_pirq_init(unsigned irq, info->u.pirq.pirq = pirq; info->u.pirq.gsi = gsi; info->u.pirq.vector = vector; - info->u.pirq.domid = domid; info->u.pirq.flags = flags; } @@ -478,6 +473,16 @@ static void xen_free_irq(unsigned irq) irq_free_desc(irq); } +static void pirq_unmask_notify(int irq) +{ + struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) }; + + if (unlikely(pirq_needs_eoi(irq))) { + int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); + WARN_ON(rc); + } +} + static void pirq_query_unmask(int irq) { struct physdev_irq_status_query irq_status; @@ -501,29 +506,6 @@ static bool probing_irq(int irq) return desc && desc->action == NULL; } -static void eoi_pirq(struct irq_data *data) -{ - int evtchn = evtchn_from_irq(data->irq); - struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; - int rc = 0; - - irq_move_irq(data); - - if (VALID_EVTCHN(evtchn)) - clear_evtchn(evtchn); - - if (pirq_needs_eoi(data->irq)) { - rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); - WARN_ON(rc); - } -} - -static void mask_ack_pirq(struct irq_data *data) -{ - disable_dynirq(data); - eoi_pirq(data); -} - static unsigned int __startup_pirq(unsigned int irq) { struct evtchn_bind_pirq bind_pirq; @@ -557,7 +539,7 @@ static unsigned int __startup_pirq(unsigned int irq) out: unmask_evtchn(evtchn); - eoi_pirq(irq_get_irq_data(irq)); + pirq_unmask_notify(irq); return 0; } @@ -597,7 +579,18 @@ static void enable_pirq(struct irq_data *data) static void disable_pirq(struct irq_data *data) { - disable_dynirq(data); +} + +static void ack_pirq(struct irq_data *data) +{ + int evtchn = evtchn_from_irq(data->irq); + + irq_move_irq(data); + + if (VALID_EVTCHN(evtchn)) { + mask_evtchn(evtchn); + clear_evtchn(evtchn); + } } static int find_irq_by_gsi(unsigned gsi) @@ -646,6 +639,9 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, if (irq < 0) goto out; + irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, + name); + irq_op.irq = irq; irq_op.vector = 0; @@ -659,35 +655,9 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, goto out; } - xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF, + xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, shareable ? PIRQ_SHAREABLE : 0); - pirq_query_unmask(irq); - /* We try to use the handler with the appropriate semantic for the - * type of interrupt: if the interrupt doesn't need an eoi - * (pirq_needs_eoi returns false), we treat it like an edge - * triggered interrupt so we use handle_edge_irq. - * As a matter of fact this only happens when the corresponding - * physical interrupt is edge triggered or an msi. - * - * On the other hand if the interrupt needs an eoi (pirq_needs_eoi - * returns true) we treat it like a level triggered interrupt so we - * use handle_fasteoi_irq like the native code does for this kind of - * interrupts. - * Depending on the Xen version, pirq_needs_eoi might return true - * not only for level triggered interrupts but for edge triggered - * interrupts too. In any case Xen always honors the eoi mechanism, - * not injecting any more pirqs of the same kind if the first one - * hasn't received an eoi yet. Therefore using the fasteoi handler - * is the right choice either way. - */ - if (pirq_needs_eoi(irq)) - irq_set_chip_and_handler_name(irq, &xen_pirq_chip, - handle_fasteoi_irq, name); - else - irq_set_chip_and_handler_name(irq, &xen_pirq_chip, - handle_edge_irq, name); - out: spin_unlock(&irq_mapping_update_lock); @@ -710,8 +680,7 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) } int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, - int pirq, int vector, const char *name, - domid_t domid) + int pirq, int vector, const char *name) { int irq, ret; @@ -721,10 +690,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, if (irq == -1) goto out; - irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, - name); + irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, + name); - xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0); + xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); ret = irq_set_msi_desc(irq, msidesc); if (ret < 0) goto error_irq; @@ -753,16 +722,9 @@ int xen_destroy_irq(int irq) if (xen_initial_domain()) { unmap_irq.pirq = info->u.pirq.pirq; - unmap_irq.domid = info->u.pirq.domid; + unmap_irq.domid = DOMID_SELF; rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); - /* If another domain quits without making the pci_disable_msix - * call, the Xen hypervisor takes care of freeing the PIRQs - * (free_domain_pirqs). - */ - if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) - printk(KERN_INFO "domain %d does not have %d anymore\n", - info->u.pirq.domid, info->u.pirq.pirq); - else if (rc) { + if (rc) { printk(KERN_WARNING "unmap irq failed %d\n", rc); goto out; } @@ -797,12 +759,6 @@ int xen_irq_from_pirq(unsigned pirq) return irq; } - -int xen_pirq_from_irq(unsigned irq) -{ - return pirq_from_irq(irq); -} -EXPORT_SYMBOL_GPL(xen_pirq_from_irq); int bind_evtchn_to_irq(unsigned int evtchn) { int irq; @@ -817,7 +773,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) goto out; irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, - handle_edge_irq, "event"); + handle_fasteoi_irq, "event"); xen_irq_info_evtchn_init(irq, evtchn); } @@ -1223,6 +1179,9 @@ static void __xen_evtchn_do_upcall(void) port = (word_idx * BITS_PER_LONG) + bit_idx; irq = evtchn_to_irq[port]; + mask_evtchn(port); + clear_evtchn(port); + if (irq != -1) { desc = irq_to_desc(irq); if (desc) @@ -1378,16 +1337,10 @@ static void ack_dynirq(struct irq_data *data) { int evtchn = evtchn_from_irq(data->irq); - irq_move_irq(data); + irq_move_masked_irq(data); if (VALID_EVTCHN(evtchn)) - clear_evtchn(evtchn); -} - -static void mask_ack_dynirq(struct irq_data *data) -{ - disable_dynirq(data); - ack_dynirq(data); + unmask_evtchn(evtchn); } static int retrigger_dynirq(struct irq_data *data) @@ -1549,18 +1502,6 @@ void xen_poll_irq(int irq) xen_poll_irq_timeout(irq, 0 /* no timeout */); } -/* Check whether the IRQ line is shared with other guests. */ -int xen_test_irq_shared(int irq) -{ - struct irq_info *info = info_for_irq(irq); - struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq }; - - if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) - return 0; - return !(irq_status.flags & XENIRQSTAT_shared); -} -EXPORT_SYMBOL_GPL(xen_test_irq_shared); - void xen_irq_resume(void) { unsigned int cpu, evtchn; @@ -1594,9 +1535,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, - .irq_ack = ack_dynirq, - .irq_mask_ack = mask_ack_dynirq, - + .irq_eoi = ack_dynirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, }; @@ -1606,15 +1545,14 @@ static struct irq_chip xen_pirq_chip __read_mostly = { .irq_startup = startup_pirq, .irq_shutdown = shutdown_pirq, + .irq_enable = enable_pirq, - .irq_disable = disable_pirq, + .irq_unmask = enable_pirq, - .irq_mask = disable_dynirq, - .irq_unmask = enable_dynirq, + .irq_disable = disable_pirq, + .irq_mask = disable_pirq, - .irq_ack = eoi_pirq, - .irq_eoi = eoi_pirq, - .irq_mask_ack = mask_ack_pirq, + .irq_ack = ack_pirq, .irq_set_affinity = set_affinity_irq, diff --git a/trunk/drivers/xen/gntalloc.c b/trunk/drivers/xen/gntalloc.c index f6832f46aea4..a7ffdfe19fc9 100644 --- a/trunk/drivers/xen/gntalloc.c +++ b/trunk/drivers/xen/gntalloc.c @@ -427,17 +427,6 @@ static long gntalloc_ioctl(struct file *filp, unsigned int cmd, return 0; } -static void gntalloc_vma_open(struct vm_area_struct *vma) -{ - struct gntalloc_gref *gref = vma->vm_private_data; - if (!gref) - return; - - spin_lock(&gref_lock); - gref->users++; - spin_unlock(&gref_lock); -} - static void gntalloc_vma_close(struct vm_area_struct *vma) { struct gntalloc_gref *gref = vma->vm_private_data; @@ -452,7 +441,6 @@ static void gntalloc_vma_close(struct vm_area_struct *vma) } static struct vm_operations_struct gntalloc_vmops = { - .open = gntalloc_vma_open, .close = gntalloc_vma_close, }; @@ -483,6 +471,8 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_private_data = gref; vma->vm_flags |= VM_RESERVED; + vma->vm_flags |= VM_DONTCOPY; + vma->vm_flags |= VM_PFNMAP | VM_PFN_AT_MMAP; vma->vm_ops = &gntalloc_vmops; diff --git a/trunk/drivers/xen/gntdev.c b/trunk/drivers/xen/gntdev.c index f914b26cf0c2..b0f9e8fb0052 100644 --- a/trunk/drivers/xen/gntdev.c +++ b/trunk/drivers/xen/gntdev.c @@ -330,26 +330,17 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages) /* ------------------------------------------------------------------ */ -static void gntdev_vma_open(struct vm_area_struct *vma) -{ - struct grant_map *map = vma->vm_private_data; - - pr_debug("gntdev_vma_open %p\n", vma); - atomic_inc(&map->users); -} - static void gntdev_vma_close(struct vm_area_struct *vma) { struct grant_map *map = vma->vm_private_data; - pr_debug("gntdev_vma_close %p\n", vma); + pr_debug("close %p\n", vma); map->vma = NULL; vma->vm_private_data = NULL; gntdev_put_map(map); } static struct vm_operations_struct gntdev_vmops = { - .open = gntdev_vma_open, .close = gntdev_vma_close, }; @@ -661,10 +652,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) vma->vm_ops = &gntdev_vmops; - vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND; - - if (use_ptemod) - vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP; + vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP; vma->vm_private_data = map; diff --git a/trunk/drivers/xen/grant-table.c b/trunk/drivers/xen/grant-table.c index fd725cde6ad1..3745a318defc 100644 --- a/trunk/drivers/xen/grant-table.c +++ b/trunk/drivers/xen/grant-table.c @@ -466,30 +466,13 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, if (map_ops[i].status) continue; - if (map_ops[i].flags & GNTMAP_contains_pte) { - pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + + /* m2p override only supported for GNTMAP_contains_pte mappings */ + if (!(map_ops[i].flags & GNTMAP_contains_pte)) + continue; + pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + (map_ops[i].host_addr & ~PAGE_MASK)); - mfn = pte_mfn(*pte); - } else { - /* If you really wanted to do this: - * mfn = PFN_DOWN(map_ops[i].dev_bus_addr); - * - * The reason we do not implement it is b/c on the - * unmap path (gnttab_unmap_refs) we have no means of - * checking whether the page is !GNTMAP_contains_pte. - * - * That is without some extra data-structure to carry - * the struct page, bool clear_pte, and list_head next - * tuples and deal with allocation/delallocation, etc. - * - * The users of this API set the GNTMAP_contains_pte - * flag so lets just return not supported until it - * becomes neccessary to implement. - */ - return -EOPNOTSUPP; - } - ret = m2p_add_override(mfn, pages[i], - map_ops[i].flags & GNTMAP_contains_pte); + mfn = pte_mfn(*pte); + ret = m2p_add_override(mfn, pages[i]); if (ret) return ret; } @@ -511,7 +494,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, return ret; for (i = 0; i < count; i++) { - ret = m2p_remove_override(pages[i], true /* clear the PTE */); + ret = m2p_remove_override(pages[i]); if (ret) return ret; } diff --git a/trunk/drivers/xen/manage.c b/trunk/drivers/xen/manage.c index 0b5366b5be20..a2eee574784e 100644 --- a/trunk/drivers/xen/manage.c +++ b/trunk/drivers/xen/manage.c @@ -70,7 +70,12 @@ static int xen_suspend(void *data) BUG_ON(!irqs_disabled()); - err = syscore_suspend(); + err = sysdev_suspend(PMSG_FREEZE); + if (!err) { + err = syscore_suspend(); + if (err) + sysdev_resume(); + } if (err) { printk(KERN_ERR "xen_suspend: system core suspend failed: %d\n", err); @@ -97,6 +102,7 @@ static int xen_suspend(void *data) } syscore_resume(); + sysdev_resume(); return 0; } diff --git a/trunk/drivers/xen/sys-hypervisor.c b/trunk/drivers/xen/sys-hypervisor.c index 1e0fe01eb670..60f1827a32cb 100644 --- a/trunk/drivers/xen/sys-hypervisor.c +++ b/trunk/drivers/xen/sys-hypervisor.c @@ -215,7 +215,7 @@ static struct attribute_group xen_compilation_group = { .attrs = xen_compile_attrs, }; -static int __init xen_compilation_init(void) +int __init static xen_compilation_init(void) { return sysfs_create_group(hypervisor_kobj, &xen_compilation_group); } diff --git a/trunk/fs/block_dev.c b/trunk/fs/block_dev.c index 257b00e98428..5147bdd3b8e1 100644 --- a/trunk/fs/block_dev.c +++ b/trunk/fs/block_dev.c @@ -1102,7 +1102,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) if (!bdev->bd_part) goto out_clear; - ret = 0; if (disk->fops->open) { ret = disk->fops->open(bdev, mode); if (ret == -ERESTARTSYS) { @@ -1119,18 +1118,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) put_disk(disk); goto restart; } + if (ret) + goto out_clear; } - /* - * If the device is invalidated, rescan partition - * if open succeeded or failed with -ENOMEDIUM. - * The latter is necessary to prevent ghost - * partitions on a removed medium. - */ - if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) - rescan_partitions(disk, bdev); - if (ret) - goto out_clear; - if (!bdev->bd_openers) { bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); bdi = blk_get_backing_dev_info(bdev); @@ -1138,6 +1128,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) bdi = &default_backing_dev_info; bdev_inode_switch_bdi(bdev->bd_inode, bdi); } + if (bdev->bd_invalidated) + rescan_partitions(disk, bdev); } else { struct block_device *whole; whole = bdget_disk(disk, 0); @@ -1161,14 +1153,13 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) } } else { if (bdev->bd_contains == bdev) { - ret = 0; - if (bdev->bd_disk->fops->open) + if (bdev->bd_disk->fops->open) { ret = bdev->bd_disk->fops->open(bdev, mode); - /* the same as first opener case, read comment there */ - if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) + if (ret) + goto out_unlock_bdev; + } + if (bdev->bd_invalidated) rescan_partitions(bdev->bd_disk, bdev); - if (ret) - goto out_unlock_bdev; } /* only one opener holds refs to the module and disk */ module_put(disk->fops->owner); diff --git a/trunk/fs/btrfs/acl.c b/trunk/fs/btrfs/acl.c index 44ea5b92e1ba..5d505aaa72fb 100644 --- a/trunk/fs/btrfs/acl.c +++ b/trunk/fs/btrfs/acl.c @@ -178,13 +178,12 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, if (value) { acl = posix_acl_from_xattr(value, size); - if (IS_ERR(acl)) - return PTR_ERR(acl); - if (acl) { ret = posix_acl_valid(acl); if (ret) goto out; + } else if (IS_ERR(acl)) { + return PTR_ERR(acl); } } diff --git a/trunk/fs/btrfs/ctree.h b/trunk/fs/btrfs/ctree.h index 8f4b81de3ae2..2e61fe1b6b8c 100644 --- a/trunk/fs/btrfs/ctree.h +++ b/trunk/fs/btrfs/ctree.h @@ -718,7 +718,7 @@ struct btrfs_space_info { u64 total_bytes; /* total bytes in the space, this doesn't take mirrors into account */ u64 bytes_used; /* total bytes used, - this doesn't take mirrors into account */ + this does't take mirrors into account */ u64 bytes_pinned; /* total bytes pinned, will be freed when the transaction finishes */ u64 bytes_reserved; /* total bytes the allocator has reserved for diff --git a/trunk/fs/btrfs/extent-tree.c b/trunk/fs/btrfs/extent-tree.c index 9ee6bd55e16c..cd52f7f556ef 100644 --- a/trunk/fs/btrfs/extent-tree.c +++ b/trunk/fs/btrfs/extent-tree.c @@ -8856,38 +8856,23 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, int btrfs_init_space_info(struct btrfs_fs_info *fs_info) { struct btrfs_space_info *space_info; - struct btrfs_super_block *disk_super; - u64 features; - u64 flags; - int mixed = 0; int ret; - disk_super = &fs_info->super_copy; - if (!btrfs_super_root(disk_super)) - return 1; - - features = btrfs_super_incompat_flags(disk_super); - if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) - mixed = 1; + ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0, + &space_info); + if (ret) + return ret; - flags = BTRFS_BLOCK_GROUP_SYSTEM; - ret = update_space_info(fs_info, flags, 0, 0, &space_info); + ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0, + &space_info); if (ret) - goto out; + return ret; - if (mixed) { - flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; - ret = update_space_info(fs_info, flags, 0, 0, &space_info); - } else { - flags = BTRFS_BLOCK_GROUP_METADATA; - ret = update_space_info(fs_info, flags, 0, 0, &space_info); - if (ret) - goto out; + ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0, + &space_info); + if (ret) + return ret; - flags = BTRFS_BLOCK_GROUP_DATA; - ret = update_space_info(fs_info, flags, 0, 0, &space_info); - } -out: return ret; } diff --git a/trunk/fs/btrfs/ioctl.c b/trunk/fs/btrfs/ioctl.c index 2616f7ed4799..ffb48d6c5433 100644 --- a/trunk/fs/btrfs/ioctl.c +++ b/trunk/fs/btrfs/ioctl.c @@ -81,13 +81,6 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags) iflags |= FS_NOATIME_FL; if (flags & BTRFS_INODE_DIRSYNC) iflags |= FS_DIRSYNC_FL; - if (flags & BTRFS_INODE_NODATACOW) - iflags |= FS_NOCOW_FL; - - if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS)) - iflags |= FS_COMPR_FL; - else if (flags & BTRFS_INODE_NOCOMPRESS) - iflags |= FS_NOCOMP_FL; return iflags; } @@ -151,13 +144,16 @@ static int check_flags(unsigned int flags) if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NOATIME_FL | FS_NODUMP_FL | \ FS_SYNC_FL | FS_DIRSYNC_FL | \ - FS_NOCOMP_FL | FS_COMPR_FL | - FS_NOCOW_FL)) + FS_NOCOMP_FL | FS_COMPR_FL | \ + FS_NOCOW_FL | FS_COW_FL)) return -EOPNOTSUPP; if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) return -EINVAL; + if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL)) + return -EINVAL; + return 0; } @@ -222,10 +218,6 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) ip->flags |= BTRFS_INODE_DIRSYNC; else ip->flags &= ~BTRFS_INODE_DIRSYNC; - if (flags & FS_NOCOW_FL) - ip->flags |= BTRFS_INODE_NODATACOW; - else - ip->flags &= ~BTRFS_INODE_NODATACOW; /* * The COMPRESS flag can only be changed by users, while the NOCOMPRESS @@ -238,9 +230,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) } else if (flags & FS_COMPR_FL) { ip->flags |= BTRFS_INODE_COMPRESS; ip->flags &= ~BTRFS_INODE_NOCOMPRESS; - } else { - ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); } + if (flags & FS_NOCOW_FL) + ip->flags |= BTRFS_INODE_NODATACOW; + else if (flags & FS_COW_FL) + ip->flags &= ~BTRFS_INODE_NODATACOW; trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); diff --git a/trunk/fs/ceph/addr.c b/trunk/fs/ceph/addr.c index 38b8ab554924..e159c529fd2b 100644 --- a/trunk/fs/ceph/addr.c +++ b/trunk/fs/ceph/addr.c @@ -775,13 +775,6 @@ static int ceph_writepages_start(struct address_space *mapping, ci->i_truncate_seq, ci->i_truncate_size, &inode->i_mtime, true, 1, 0); - - if (!req) { - rc = -ENOMEM; - unlock_page(page); - break; - } - max_pages = req->r_num_pages; alloc_page_vec(fsc, req); diff --git a/trunk/fs/ceph/caps.c b/trunk/fs/ceph/caps.c index 2a5404c1c42f..5323c330bbf3 100644 --- a/trunk/fs/ceph/caps.c +++ b/trunk/fs/ceph/caps.c @@ -819,7 +819,7 @@ int __ceph_caps_used(struct ceph_inode_info *ci) used |= CEPH_CAP_FILE_CACHE; if (ci->i_wr_ref) used |= CEPH_CAP_FILE_WR; - if (ci->i_wb_ref || ci->i_wrbuffer_ref) + if (ci->i_wrbuffer_ref) used |= CEPH_CAP_FILE_BUFFER; return used; } @@ -1331,11 +1331,10 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci) } /* - * Mark caps dirty. If inode is newly dirty, return the dirty flags. - * Caller is then responsible for calling __mark_inode_dirty with the - * returned flags value. + * Mark caps dirty. If inode is newly dirty, add to the global dirty + * list. */ -int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) +void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) { struct ceph_mds_client *mdsc = ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; @@ -1358,7 +1357,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) list_add(&ci->i_dirty_item, &mdsc->cap_dirty); spin_unlock(&mdsc->cap_dirty_lock); if (ci->i_flushing_caps == 0) { - ihold(inode); + igrab(inode); dirty |= I_DIRTY_SYNC; } } @@ -1366,8 +1365,9 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && (mask & CEPH_CAP_FILE_BUFFER)) dirty |= I_DIRTY_DATASYNC; + if (dirty) + __mark_inode_dirty(inode, dirty); __cap_delay_requeue(mdsc, ci); - return dirty; } /* @@ -1990,11 +1990,11 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got) if (got & CEPH_CAP_FILE_WR) ci->i_wr_ref++; if (got & CEPH_CAP_FILE_BUFFER) { - if (ci->i_wb_ref == 0) - ihold(&ci->vfs_inode); - ci->i_wb_ref++; - dout("__take_cap_refs %p wb %d -> %d (?)\n", - &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref); + if (ci->i_wrbuffer_ref == 0) + igrab(&ci->vfs_inode); + ci->i_wrbuffer_ref++; + dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n", + &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref); } } @@ -2169,12 +2169,12 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) if (--ci->i_rdcache_ref == 0) last++; if (had & CEPH_CAP_FILE_BUFFER) { - if (--ci->i_wb_ref == 0) { + if (--ci->i_wrbuffer_ref == 0) { last++; put++; } - dout("put_cap_refs %p wb %d -> %d (?)\n", - inode, ci->i_wb_ref+1, ci->i_wb_ref); + dout("put_cap_refs %p wrbuffer %d -> %d (?)\n", + inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref); } if (had & CEPH_CAP_FILE_WR) if (--ci->i_wr_ref == 0) { diff --git a/trunk/fs/ceph/file.c b/trunk/fs/ceph/file.c index 203252d88d9f..159b512d5a27 100644 --- a/trunk/fs/ceph/file.c +++ b/trunk/fs/ceph/file.c @@ -734,12 +734,9 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, } } if (ret >= 0) { - int dirty; spin_lock(&inode->i_lock); - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); + __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); spin_unlock(&inode->i_lock); - if (dirty) - __mark_inode_dirty(inode, dirty); } out: diff --git a/trunk/fs/ceph/inode.c b/trunk/fs/ceph/inode.c index 70b6a4839c38..b54c97da1c43 100644 --- a/trunk/fs/ceph/inode.c +++ b/trunk/fs/ceph/inode.c @@ -355,7 +355,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb) ci->i_rd_ref = 0; ci->i_rdcache_ref = 0; ci->i_wr_ref = 0; - ci->i_wb_ref = 0; ci->i_wrbuffer_ref = 0; ci->i_wrbuffer_ref_head = 0; ci->i_shared_gen = 0; @@ -1568,7 +1567,6 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) int release = 0, dirtied = 0; int mask = 0; int err = 0; - int inode_dirty_flags = 0; if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; @@ -1727,16 +1725,13 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) dout("setattr %p ATTR_FILE ... hrm!\n", inode); if (dirtied) { - inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied); + __ceph_mark_dirty_caps(ci, dirtied); inode->i_ctime = CURRENT_TIME; } release &= issued; spin_unlock(&inode->i_lock); - if (inode_dirty_flags) - __mark_inode_dirty(inode, inode_dirty_flags); - if (mask) { req->r_inode = igrab(inode); req->r_inode_drop = release; diff --git a/trunk/fs/ceph/mds_client.c b/trunk/fs/ceph/mds_client.c index d0fae4ce9ba5..f60b07b0feb0 100644 --- a/trunk/fs/ceph/mds_client.c +++ b/trunk/fs/ceph/mds_client.c @@ -3304,8 +3304,8 @@ static void con_put(struct ceph_connection *con) { struct ceph_mds_session *s = con->private; - dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1); ceph_put_mds_session(s); + dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref)); } /* diff --git a/trunk/fs/ceph/snap.c b/trunk/fs/ceph/snap.c index 24067d68a554..e86ec1155f8f 100644 --- a/trunk/fs/ceph/snap.c +++ b/trunk/fs/ceph/snap.c @@ -206,7 +206,7 @@ void ceph_put_snap_realm(struct ceph_mds_client *mdsc, up_write(&mdsc->snap_rwsem); } else { spin_lock(&mdsc->snap_empty_lock); - list_add(&realm->empty_item, &mdsc->snap_empty); + list_add(&mdsc->snap_empty, &realm->empty_item); spin_unlock(&mdsc->snap_empty_lock); } } diff --git a/trunk/fs/ceph/super.h b/trunk/fs/ceph/super.h index f5cabefa98dc..619fe719968f 100644 --- a/trunk/fs/ceph/super.h +++ b/trunk/fs/ceph/super.h @@ -293,7 +293,7 @@ struct ceph_inode_info { /* held references to caps */ int i_pin_ref; - int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref; + int i_rd_ref, i_rdcache_ref, i_wr_ref; int i_wrbuffer_ref, i_wrbuffer_ref_head; u32 i_shared_gen; /* increment each time we get FILE_SHARED */ u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */ @@ -506,7 +506,7 @@ static inline int __ceph_caps_dirty(struct ceph_inode_info *ci) { return ci->i_dirty_caps | ci->i_flushing_caps; } -extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask); +extern void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask); extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask); extern int __ceph_caps_used(struct ceph_inode_info *ci); diff --git a/trunk/fs/ceph/xattr.c b/trunk/fs/ceph/xattr.c index f2b628696180..8c9eba6ef9df 100644 --- a/trunk/fs/ceph/xattr.c +++ b/trunk/fs/ceph/xattr.c @@ -703,7 +703,6 @@ int ceph_setxattr(struct dentry *dentry, const char *name, struct ceph_inode_xattr *xattr = NULL; int issued; int required_blob_size; - int dirty; if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; @@ -764,12 +763,11 @@ int ceph_setxattr(struct dentry *dentry, const char *name, dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued)); err = __set_xattr(ci, newname, name_len, newval, val_len, 1, 1, 1, &xattr); - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); + __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); ci->i_xattrs.dirty = true; inode->i_ctime = CURRENT_TIME; spin_unlock(&inode->i_lock); - if (dirty) - __mark_inode_dirty(inode, dirty); + return err; do_sync: @@ -812,7 +810,6 @@ int ceph_removexattr(struct dentry *dentry, const char *name) struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode); int issued; int err; - int dirty; if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; @@ -836,13 +833,12 @@ int ceph_removexattr(struct dentry *dentry, const char *name) goto do_sync; err = __remove_xattr_by_name(ceph_inode(inode), name); - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); + __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); ci->i_xattrs.dirty = true; inode->i_ctime = CURRENT_TIME; spin_unlock(&inode->i_lock); - if (dirty) - __mark_inode_dirty(inode, dirty); + return err; do_sync: spin_unlock(&inode->i_lock); diff --git a/trunk/fs/cifs/cifs_unicode.c b/trunk/fs/cifs/cifs_unicode.c index 1b2e180b018d..23d43cde4306 100644 --- a/trunk/fs/cifs/cifs_unicode.c +++ b/trunk/fs/cifs/cifs_unicode.c @@ -277,7 +277,6 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen, for (i = 0, j = 0; i < srclen; j++) { src_char = source[i]; - charlen = 1; switch (src_char) { case 0: put_unaligned(0, &target[j]); @@ -317,13 +316,16 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen, dst_char = cpu_to_le16(0x003f); charlen = 1; } + /* + * character may take more than one byte in the source + * string, but will take exactly two bytes in the + * target string + */ + i += charlen; + continue; } - /* - * character may take more than one byte in the source string, - * but will take exactly two bytes in the target string - */ - i += charlen; put_unaligned(dst_char, &target[j]); + i++; /* move to next char in source string */ } ctoUCS_out: diff --git a/trunk/fs/cifs/connect.c b/trunk/fs/cifs/connect.c index 277262a8e82f..4bc862a80efa 100644 --- a/trunk/fs/cifs/connect.c +++ b/trunk/fs/cifs/connect.c @@ -274,8 +274,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) char *data_area_of_target; char *data_area_of_buf2; int remaining; - unsigned int byte_count, total_in_buf; - __u16 total_data_size, total_in_buf2; + __u16 byte_count, total_data_size, total_in_buf, total_in_buf2; total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); @@ -288,7 +287,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) remaining = total_data_size - total_in_buf; if (remaining < 0) - return -EPROTO; + return -EINVAL; if (remaining == 0) /* nothing to do, ignore */ return 0; @@ -309,28 +308,19 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) data_area_of_target += total_in_buf; /* copy second buffer into end of first buffer */ + memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); total_in_buf += total_in_buf2; - /* is the result too big for the field? */ - if (total_in_buf > USHRT_MAX) - return -EPROTO; put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount); - - /* fix up the BCC */ byte_count = get_bcc_le(pTargetSMB); byte_count += total_in_buf2; - /* is the result too big for the field? */ - if (byte_count > USHRT_MAX) - return -EPROTO; put_bcc_le(byte_count, pTargetSMB); byte_count = pTargetSMB->smb_buf_length; byte_count += total_in_buf2; - /* don't allow buffer to overflow */ - if (byte_count > CIFSMaxBufSize) - return -ENOBUFS; - pTargetSMB->smb_buf_length = byte_count; - memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); + /* BB also add check that we are not beyond maximum buffer size */ + + pTargetSMB->smb_buf_length = byte_count; if (remaining == total_in_buf2) { cFYI(1, "found the last secondary response"); @@ -617,63 +607,59 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); - if (mid_entry->mid != smb_buffer->Mid || - mid_entry->midState != MID_REQUEST_SUBMITTED || - mid_entry->command != smb_buffer->Command) { - mid_entry = NULL; - continue; - } - - if (length == 0 && - check2ndT2(smb_buffer, server->maxBuf) > 0) { - /* We have a multipart transact2 resp */ - isMultiRsp = true; - if (mid_entry->resp_buf) { - /* merge response - fix up 1st*/ - length = coalesce_t2(smb_buffer, - mid_entry->resp_buf); - if (length > 0) { - length = 0; - mid_entry->multiRsp = true; - break; - } else { - /* all parts received or - * packet is malformed - */ - mid_entry->multiEnd = true; - goto multi_t2_fnd; - } - } else { - if (!isLargeBuf) { - /* - * FIXME: switch to already - * allocated largebuf? - */ - cERROR(1, "1st trans2 resp " - "needs bigbuf"); + if ((mid_entry->mid == smb_buffer->Mid) && + (mid_entry->midState == MID_REQUEST_SUBMITTED) && + (mid_entry->command == smb_buffer->Command)) { + if (length == 0 && + check2ndT2(smb_buffer, server->maxBuf) > 0) { + /* We have a multipart transact2 resp */ + isMultiRsp = true; + if (mid_entry->resp_buf) { + /* merge response - fix up 1st*/ + if (coalesce_t2(smb_buffer, + mid_entry->resp_buf)) { + mid_entry->multiRsp = + true; + break; + } else { + /* all parts received */ + mid_entry->multiEnd = + true; + goto multi_t2_fnd; + } } else { - /* Have first buffer */ - mid_entry->resp_buf = - smb_buffer; - mid_entry->largeBuf = true; - bigbuf = NULL; + if (!isLargeBuf) { + cERROR(1, "1st trans2 resp needs bigbuf"); + /* BB maybe we can fix this up, switch + to already allocated large buffer? */ + } else { + /* Have first buffer */ + mid_entry->resp_buf = + smb_buffer; + mid_entry->largeBuf = + true; + bigbuf = NULL; + } } + break; } - break; - } - mid_entry->resp_buf = smb_buffer; - mid_entry->largeBuf = isLargeBuf; + mid_entry->resp_buf = smb_buffer; + mid_entry->largeBuf = isLargeBuf; multi_t2_fnd: - if (length == 0) - mid_entry->midState = MID_RESPONSE_RECEIVED; - else - mid_entry->midState = MID_RESPONSE_MALFORMED; + if (length == 0) + mid_entry->midState = + MID_RESPONSE_RECEIVED; + else + mid_entry->midState = + MID_RESPONSE_MALFORMED; #ifdef CONFIG_CIFS_STATS2 - mid_entry->when_received = jiffies; + mid_entry->when_received = jiffies; #endif - list_del_init(&mid_entry->qhead); - mid_entry->callback(mid_entry); - break; + list_del_init(&mid_entry->qhead); + mid_entry->callback(mid_entry); + break; + } + mid_entry = NULL; } spin_unlock(&GlobalMid_Lock); @@ -2673,11 +2659,6 @@ is_path_accessible(int xid, struct cifsTconInfo *tcon, 0 /* not legacy */, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); - - if (rc == -EOPNOTSUPP || rc == -EINVAL) - rc = SMBQueryInformation(xid, tcon, full_path, pfile_info, - cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); kfree(pfile_info); return rc; } diff --git a/trunk/fs/cifs/sess.c b/trunk/fs/cifs/sess.c index 645114ad0a10..f6728eb6f4b9 100644 --- a/trunk/fs/cifs/sess.c +++ b/trunk/fs/cifs/sess.c @@ -276,7 +276,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, } static void -decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, +decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses, const struct nls_table *nls_cp) { int len; @@ -284,6 +284,19 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, cFYI(1, "bleft %d", bleft); + /* + * Windows servers do not always double null terminate their final + * Unicode string. Check to see if there are an uneven number of bytes + * left. If so, then add an extra NULL pad byte to the end of the + * response. + * + * See section 2.7.2 in "Implementing CIFS" for details + */ + if (bleft % 2) { + data[bleft] = 0; + ++bleft; + } + kfree(ses->serverOS); ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp); cFYI(1, "serverOS=%s", ses->serverOS); @@ -916,9 +929,7 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, } /* BB check if Unicode and decode strings */ - if (bytes_remaining == 0) { - /* no string area to decode, do nothing */ - } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { + if (smb_buf->Flags2 & SMBFLG2_UNICODE) { /* unicode string area must be word-aligned */ if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { ++bcc_ptr; diff --git a/trunk/fs/configfs/dir.c b/trunk/fs/configfs/dir.c index 9a37a9b6de3a..3313dd19f543 100644 --- a/trunk/fs/configfs/dir.c +++ b/trunk/fs/configfs/dir.c @@ -53,14 +53,11 @@ DEFINE_SPINLOCK(configfs_dirent_lock); static void configfs_d_iput(struct dentry * dentry, struct inode * inode) { - struct configfs_dirent *sd = dentry->d_fsdata; + struct configfs_dirent * sd = dentry->d_fsdata; if (sd) { BUG_ON(sd->s_dentry != dentry); - /* Coordinate with configfs_readdir */ - spin_lock(&configfs_dirent_lock); sd->s_dentry = NULL; - spin_unlock(&configfs_dirent_lock); configfs_put(sd); } iput(inode); @@ -692,8 +689,7 @@ static int create_default_group(struct config_group *parent_group, sd = child->d_fsdata; sd->s_type |= CONFIGFS_USET_DEFAULT; } else { - BUG_ON(child->d_inode); - d_drop(child); + d_delete(child); dput(child); } } @@ -1549,7 +1545,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir struct configfs_dirent * parent_sd = dentry->d_fsdata; struct configfs_dirent *cursor = filp->private_data; struct list_head *p, *q = &cursor->s_sibling; - ino_t ino = 0; + ino_t ino; int i = filp->f_pos; switch (i) { @@ -1577,7 +1573,6 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir struct configfs_dirent *next; const char * name; int len; - struct inode *inode = NULL; next = list_entry(p, struct configfs_dirent, s_sibling); @@ -1586,28 +1581,9 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir name = configfs_get_name(next); len = strlen(name); - - /* - * We'll have a dentry and an inode for - * PINNED items and for open attribute - * files. We lock here to prevent a race - * with configfs_d_iput() clearing - * s_dentry before calling iput(). - * - * Why do we go to the trouble? If - * someone has an attribute file open, - * the inode number should match until - * they close it. Beyond that, we don't - * care. - */ - spin_lock(&configfs_dirent_lock); - dentry = next->s_dentry; - if (dentry) - inode = dentry->d_inode; - if (inode) - ino = inode->i_ino; - spin_unlock(&configfs_dirent_lock); - if (!inode) + if (next->s_dentry) + ino = next->s_dentry->d_inode->i_ino; + else ino = iunique(configfs_sb, 2); if (filldir(dirent, name, len, filp->f_pos, ino, @@ -1707,8 +1683,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) err = configfs_attach_group(sd->s_element, &group->cg_item, dentry); if (err) { - BUG_ON(dentry->d_inode); - d_drop(dentry); + d_delete(dentry); dput(dentry); } else { spin_lock(&configfs_dirent_lock); diff --git a/trunk/fs/debugfs/file.c b/trunk/fs/debugfs/file.c index 568304d058a3..89d394d8fe24 100644 --- a/trunk/fs/debugfs/file.c +++ b/trunk/fs/debugfs/file.c @@ -429,16 +429,25 @@ static ssize_t write_file_bool(struct file *file, const char __user *user_buf, { char buf[32]; int buf_size; - bool bv; u32 *val = file->private_data; buf_size = min(count, (sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; - if (strtobool(buf, &bv) == 0) - *val = bv; - + switch (buf[0]) { + case 'y': + case 'Y': + case '1': + *val = 1; + break; + case 'n': + case 'N': + case '0': + *val = 0; + break; + } + return count; } diff --git a/trunk/fs/file.c b/trunk/fs/file.c index 4c6992d8f3ba..0be344755c02 100644 --- a/trunk/fs/file.c +++ b/trunk/fs/file.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -40,17 +39,14 @@ int sysctl_nr_open_max = 1024 * 1024; /* raised later */ */ static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); -static void *alloc_fdmem(unsigned int size) +static inline void *alloc_fdmem(unsigned int size) { - /* - * Very large allocations can stress page reclaim, so fall back to - * vmalloc() if the allocation size will be considered "large" by the VM. - */ - if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { - void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN); - if (data != NULL) - return data; - } + void *data; + + data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN); + if (data != NULL) + return data; + return vmalloc(size); } diff --git a/trunk/fs/fuse/dir.c b/trunk/fs/fuse/dir.c index b32eb29a4e6f..c6ba49bd95b3 100644 --- a/trunk/fs/fuse/dir.c +++ b/trunk/fs/fuse/dir.c @@ -174,7 +174,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) if (!inode) return 0; - if (nd && (nd->flags & LOOKUP_RCU)) + if (nd->flags & LOOKUP_RCU) return -ECHILD; fc = get_fuse_conn(inode); diff --git a/trunk/fs/hpfs/Kconfig b/trunk/fs/hpfs/Kconfig index 56bd15c5bf6c..0c39dc3ef7d7 100644 --- a/trunk/fs/hpfs/Kconfig +++ b/trunk/fs/hpfs/Kconfig @@ -1,6 +1,7 @@ config HPFS_FS tristate "OS/2 HPFS file system support" depends on BLOCK + depends on BROKEN || !PREEMPT help OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS is the file system used for organizing files on OS/2 hard disk diff --git a/trunk/fs/hpfs/alloc.c b/trunk/fs/hpfs/alloc.c index 7a5eb2c718c8..5503e2c28910 100644 --- a/trunk/fs/hpfs/alloc.c +++ b/trunk/fs/hpfs/alloc.c @@ -8,6 +8,8 @@ #include "hpfs_fn.h" +static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec); + /* * Check if a sector is allocated in bitmap * This is really slow. Turned on only if chk==2 @@ -16,9 +18,9 @@ static int chk_if_allocated(struct super_block *s, secno sec, char *msg) { struct quad_buffer_head qbh; - u32 *bmp; + unsigned *bmp; if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail; - if ((cpu_to_le32(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) { + if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f)) & 1) { hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec); goto fail1; } @@ -26,7 +28,7 @@ static int chk_if_allocated(struct super_block *s, secno sec, char *msg) if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) { unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4; if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail; - if ((le32_to_cpu(bmp[ssec >> 5]) >> (ssec & 0x1f)) & 1) { + if ((bmp[ssec >> 5] >> (ssec & 0x1f)) & 1) { hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec); goto fail1; } @@ -73,6 +75,7 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne hpfs_error(s, "Bad allocation size: %d", n); return 0; } + lock_super(s); if (bs != ~0x3fff) { if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls; } else { @@ -82,6 +85,10 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne ret = bs + nr; goto rt; } + /*if (!tstbits(bmp, nr + n, n + forward)) { + ret = bs + nr + n; + goto rt; + }*/ q = nr + n; b = 0; while ((a = tstbits(bmp, q, n + forward)) != 0) { q += a; @@ -98,14 +105,14 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne goto rt; } nr >>= 5; - /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) */ + /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) {*/ i = nr; do { - if (!le32_to_cpu(bmp[i])) goto cont; - if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont; + if (!bmp[i]) goto cont; + if (n + forward >= 0x3f && bmp[i] != -1) goto cont; q = i<<5; if (i > 0) { - unsigned k = le32_to_cpu(bmp[i-1]); + unsigned k = bmp[i-1]; while (k & 0x80000000) { q--; k <<= 1; } @@ -125,17 +132,18 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne } while (i != nr); rt: if (ret) { - if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) { + if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (bmp[(ret & 0x3fff) >> 5] | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) { hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret); ret = 0; goto b; } - bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f))); + bmp[(ret & 0x3fff) >> 5] &= ~(((1 << n) - 1) << (ret & 0x1f)); hpfs_mark_4buffers_dirty(&qbh); } b: hpfs_brelse4(&qbh); uls: + unlock_super(s); return ret; } @@ -147,7 +155,7 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne * sectors */ -secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward) +secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward, int lock) { secno sec; int i; @@ -159,6 +167,7 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa forward = -forward; f_p = 1; } + if (lock) hpfs_lock_creation(s); n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14; if (near && near < sbi->sb_fs_size) { if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret; @@ -205,17 +214,18 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa ret: if (sec && f_p) { for (i = 0; i < forward; i++) { - if (!hpfs_alloc_if_possible(s, sec + i + 1)) { + if (!hpfs_alloc_if_possible_nolock(s, sec + i + 1)) { hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i); sec = 0; break; } } } + if (lock) hpfs_unlock_creation(s); return sec; } -static secno alloc_in_dirband(struct super_block *s, secno near) +static secno alloc_in_dirband(struct super_block *s, secno near, int lock) { unsigned nr = near; secno sec; @@ -226,35 +236,49 @@ static secno alloc_in_dirband(struct super_block *s, secno near) nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4; nr -= sbi->sb_dirband_start; nr >>= 2; + if (lock) hpfs_lock_creation(s); sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0); + if (lock) hpfs_unlock_creation(s); if (!sec) return 0; return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start; } /* Alloc sector if it's free */ -int hpfs_alloc_if_possible(struct super_block *s, secno sec) +static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec) { struct quad_buffer_head qbh; - u32 *bmp; + unsigned *bmp; + lock_super(s); if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end; - if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) { - bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f))); + if (bmp[(sec & 0x3fff) >> 5] & (1 << (sec & 0x1f))) { + bmp[(sec & 0x3fff) >> 5] &= ~(1 << (sec & 0x1f)); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); + unlock_super(s); return 1; } hpfs_brelse4(&qbh); end: + unlock_super(s); return 0; } +int hpfs_alloc_if_possible(struct super_block *s, secno sec) +{ + int r; + hpfs_lock_creation(s); + r = hpfs_alloc_if_possible_nolock(s, sec); + hpfs_unlock_creation(s); + return r; +} + /* Free sectors in bitmaps */ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) { struct quad_buffer_head qbh; - u32 *bmp; + unsigned *bmp; struct hpfs_sb_info *sbi = hpfs_sb(s); /*printk("2 - ");*/ if (!n) return; @@ -262,22 +286,26 @@ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) hpfs_error(s, "Trying to free reserved sector %08x", sec); return; } + lock_super(s); sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n; if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff; new_map: if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) { + unlock_super(s); return; } new_tst: - if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f) & 1)) { + if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f) & 1)) { hpfs_error(s, "sector %08x not allocated", sec); hpfs_brelse4(&qbh); + unlock_super(s); return; } - bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f)); + bmp[(sec & 0x3fff) >> 5] |= 1 << (sec & 0x1f); if (!--n) { hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); + unlock_super(s); return; } if (!(++sec & 0x3fff)) { @@ -299,13 +327,13 @@ int hpfs_check_free_dnodes(struct super_block *s, int n) int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14; int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff; int i, j; - u32 *bmp; + unsigned *bmp; struct quad_buffer_head qbh; if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) { for (j = 0; j < 512; j++) { unsigned k; - if (!le32_to_cpu(bmp[j])) continue; - for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) { + if (!bmp[j]) continue; + for (k = bmp[j]; k; k >>= 1) if (k & 1) if (!--n) { hpfs_brelse4(&qbh); return 0; } @@ -324,10 +352,10 @@ int hpfs_check_free_dnodes(struct super_block *s, int n) chk_bmp: if (bmp) { for (j = 0; j < 512; j++) { - u32 k; - if (!le32_to_cpu(bmp[j])) continue; + unsigned k; + if (!bmp[j]) continue; for (k = 0xf; k; k <<= 4) - if ((le32_to_cpu(bmp[j]) & k) == k) { + if ((bmp[j] & k) == k) { if (!--n) { hpfs_brelse4(&qbh); return 0; @@ -351,40 +379,44 @@ void hpfs_free_dnode(struct super_block *s, dnode_secno dno) hpfs_free_sectors(s, dno, 4); } else { struct quad_buffer_head qbh; - u32 *bmp; + unsigned *bmp; unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4; + lock_super(s); if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { + unlock_super(s); return; } - bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f)); + bmp[ssec >> 5] |= 1 << (ssec & 0x1f); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); + unlock_super(s); } } struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near, - dnode_secno *dno, struct quad_buffer_head *qbh) + dnode_secno *dno, struct quad_buffer_head *qbh, + int lock) { struct dnode *d; if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) { - if (!(*dno = alloc_in_dirband(s, near))) - if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL; + if (!(*dno = alloc_in_dirband(s, near, lock))) + if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) return NULL; } else { - if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) - if (!(*dno = alloc_in_dirband(s, near))) return NULL; + if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) + if (!(*dno = alloc_in_dirband(s, near, lock))) return NULL; } if (!(d = hpfs_get_4sectors(s, *dno, qbh))) { hpfs_free_dnode(s, *dno); return NULL; } memset(d, 0, 2048); - d->magic = cpu_to_le32(DNODE_MAGIC); - d->first_free = cpu_to_le32(52); + d->magic = DNODE_MAGIC; + d->first_free = 52; d->dirent[0] = 32; d->dirent[2] = 8; d->dirent[30] = 1; d->dirent[31] = 255; - d->self = cpu_to_le32(*dno); + d->self = *dno; return d; } @@ -392,16 +424,16 @@ struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *f struct buffer_head **bh) { struct fnode *f; - if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD))) return NULL; + if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD, 1))) return NULL; if (!(f = hpfs_get_sector(s, *fno, bh))) { hpfs_free_sectors(s, *fno, 1); return NULL; } memset(f, 0, 512); - f->magic = cpu_to_le32(FNODE_MAGIC); - f->ea_offs = cpu_to_le16(0xc4); + f->magic = FNODE_MAGIC; + f->ea_offs = 0xc4; f->btree.n_free_nodes = 8; - f->btree.first_free = cpu_to_le16(8); + f->btree.first_free = 8; return f; } @@ -409,16 +441,16 @@ struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *a struct buffer_head **bh) { struct anode *a; - if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD))) return NULL; + if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD, 1))) return NULL; if (!(a = hpfs_get_sector(s, *ano, bh))) { hpfs_free_sectors(s, *ano, 1); return NULL; } memset(a, 0, 512); - a->magic = cpu_to_le32(ANODE_MAGIC); - a->self = cpu_to_le32(*ano); + a->magic = ANODE_MAGIC; + a->self = *ano; a->btree.n_free_nodes = 40; a->btree.n_used_nodes = 0; - a->btree.first_free = cpu_to_le16(8); + a->btree.first_free = 8; return a; } diff --git a/trunk/fs/hpfs/anode.c b/trunk/fs/hpfs/anode.c index 08b503e8ed29..6a2f04bf3df0 100644 --- a/trunk/fs/hpfs/anode.c +++ b/trunk/fs/hpfs/anode.c @@ -22,8 +22,8 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode, if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; if (btree->internal) { for (i = 0; i < btree->n_used_nodes; i++) - if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { - a = le32_to_cpu(btree->u.internal[i].down); + if (btree->u.internal[i].file_secno > sec) { + a = btree->u.internal[i].down; brelse(bh); if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; btree = &anode->btree; @@ -34,18 +34,18 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode, return -1; } for (i = 0; i < btree->n_used_nodes; i++) - if (le32_to_cpu(btree->u.external[i].file_secno) <= sec && - le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) { - a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno); + if (btree->u.external[i].file_secno <= sec && + btree->u.external[i].file_secno + btree->u.external[i].length > sec) { + a = btree->u.external[i].disk_secno + sec - btree->u.external[i].file_secno; if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) { brelse(bh); return -1; } if (inode) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); - hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno); - hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno); - hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length); + hpfs_inode->i_file_sec = btree->u.external[i].file_secno; + hpfs_inode->i_disk_sec = btree->u.external[i].disk_secno; + hpfs_inode->i_n_secs = btree->u.external[i].length; } brelse(bh); return a; @@ -83,8 +83,8 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi return -1; } if (btree->internal) { - a = le32_to_cpu(btree->u.internal[n].down); - btree->u.internal[n].file_secno = cpu_to_le32(-1); + a = btree->u.internal[n].down; + btree->u.internal[n].file_secno = -1; mark_buffer_dirty(bh); brelse(bh); if (hpfs_sb(s)->sb_chk) @@ -94,15 +94,15 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi goto go_down; } if (n >= 0) { - if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) { + if (btree->u.external[n].file_secno + btree->u.external[n].length != fsecno) { hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x", - le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno, + btree->u.external[n].file_secno + btree->u.external[n].length, fsecno, fnod?'f':'a', node); brelse(bh); return -1; } - if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) { - btree->u.external[n].length = cpu_to_le32(le32_to_cpu(btree->u.external[n].length) + 1); + if (hpfs_alloc_if_possible(s, se = btree->u.external[n].disk_secno + btree->u.external[n].length)) { + btree->u.external[n].length++; mark_buffer_dirty(bh); brelse(bh); return se; @@ -115,20 +115,20 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi } se = !fnod ? node : (node + 16384) & ~16383; } - if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_MALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_Mu.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length); + fs = n < 0 ? 0 : btree->u.external[n].file_secno + btree->u.external[n].length; if (!btree->n_free_nodes) { - up = a != node ? le32_to_cpu(anode->up) : -1; + up = a != node ? anode->up : -1; if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) { brelse(bh); hpfs_free_sectors(s, se, 1); return -1; } if (a == node && fnod) { - anode->up = cpu_to_le32(node); + anode->up = node; anode->btree.fnode_parent = 1; anode->btree.n_used_nodes = btree->n_used_nodes; anode->btree.first_free = btree->first_free; @@ -137,9 +137,9 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi btree->internal = 1; btree->n_free_nodes = 11; btree->n_used_nodes = 1; - btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree); - btree->u.internal[0].file_secno = cpu_to_le32(-1); - btree->u.internal[0].down = cpu_to_le32(na); + btree->first_free = (char *)&(btree->u.internal[1]) - (char *)btree; + btree->u.internal[0].file_secno = -1; + btree->u.internal[0].down = na; mark_buffer_dirty(bh); } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) { brelse(bh); @@ -153,15 +153,15 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi btree = &anode->btree; } btree->n_free_nodes--; n = btree->n_used_nodes++; - btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 12); - btree->u.external[n].disk_secno = cpu_to_le32(se); - btree->u.external[n].file_secno = cpu_to_le32(fs); - btree->u.external[n].length = cpu_to_le32(1); + btree->first_free += 12; + btree->u.external[n].disk_secno = se; + btree->u.external[n].file_secno = fs; + btree->u.external[n].length = 1; mark_buffer_dirty(bh); brelse(bh); if ((a == node && fnod) || na == -1) return se; c2 = 0; - while (up != (anode_secno)-1) { + while (up != -1) { struct anode *new_anode; if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1; @@ -174,47 +174,47 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi } if (btree->n_free_nodes) { btree->n_free_nodes--; n = btree->n_used_nodes++; - btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 8); - btree->u.internal[n].file_secno = cpu_to_le32(-1); - btree->u.internal[n].down = cpu_to_le32(na); - btree->u.internal[n-1].file_secno = cpu_to_le32(fs); + btree->first_free += 8; + btree->u.internal[n].file_secno = -1; + btree->u.internal[n].down = na; + btree->u.internal[n-1].file_secno = fs; mark_buffer_dirty(bh); brelse(bh); brelse(bh2); hpfs_free_sectors(s, ra, 1); if ((anode = hpfs_map_anode(s, na, &bh))) { - anode->up = cpu_to_le32(up); + anode->up = up; anode->btree.fnode_parent = up == node && fnod; mark_buffer_dirty(bh); brelse(bh); } return se; } - up = up != node ? le32_to_cpu(anode->up) : -1; - btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1); + up = up != node ? anode->up : -1; + btree->u.internal[btree->n_used_nodes - 1].file_secno = /*fs*/-1; mark_buffer_dirty(bh); brelse(bh); a = na; if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) { anode = new_anode; - /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/ + /*anode->up = up != -1 ? up : ra;*/ anode->btree.internal = 1; anode->btree.n_used_nodes = 1; anode->btree.n_free_nodes = 59; - anode->btree.first_free = cpu_to_le16(16); - anode->btree.u.internal[0].down = cpu_to_le32(a); - anode->btree.u.internal[0].file_secno = cpu_to_le32(-1); + anode->btree.first_free = 16; + anode->btree.u.internal[0].down = a; + anode->btree.u.internal[0].file_secno = -1; mark_buffer_dirty(bh); brelse(bh); if ((anode = hpfs_map_anode(s, a, &bh))) { - anode->up = cpu_to_le32(na); + anode->up = na; mark_buffer_dirty(bh); brelse(bh); } } else na = a; } if ((anode = hpfs_map_anode(s, na, &bh))) { - anode->up = cpu_to_le32(node); + anode->up = node; if (fnod) anode->btree.fnode_parent = 1; mark_buffer_dirty(bh); brelse(bh); @@ -232,14 +232,14 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi } btree = &fnode->btree; } - ranode->up = cpu_to_le32(node); - memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free)); + ranode->up = node; + memcpy(&ranode->btree, btree, btree->first_free); if (fnod) ranode->btree.fnode_parent = 1; ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes; if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) { struct anode *unode; - if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) { - unode->up = cpu_to_le32(ra); + if ((unode = hpfs_map_anode(s, ranode->u.internal[n].down, &bh1))) { + unode->up = ra; unode->btree.fnode_parent = 0; mark_buffer_dirty(bh1); brelse(bh1); @@ -248,11 +248,11 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi btree->internal = 1; btree->n_free_nodes = fnod ? 10 : 58; btree->n_used_nodes = 2; - btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree); - btree->u.internal[0].file_secno = cpu_to_le32(fs); - btree->u.internal[0].down = cpu_to_le32(ra); - btree->u.internal[1].file_secno = cpu_to_le32(-1); - btree->u.internal[1].down = cpu_to_le32(na); + btree->first_free = (char *)&btree->u.internal[2] - (char *)btree; + btree->u.internal[0].file_secno = fs; + btree->u.internal[0].down = ra; + btree->u.internal[1].file_secno = -1; + btree->u.internal[1].down = na; mark_buffer_dirty(bh); brelse(bh); mark_buffer_dirty(bh2); @@ -279,7 +279,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree) go_down: d2 = 0; while (btree1->internal) { - ano = le32_to_cpu(btree1->u.internal[pos].down); + ano = btree1->u.internal[pos].down; if (level) brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1")) @@ -290,7 +290,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree) pos = 0; } for (i = 0; i < btree1->n_used_nodes; i++) - hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length)); + hpfs_free_sectors(s, btree1->u.external[i].disk_secno, btree1->u.external[i].length); go_up: if (!level) return; brelse(bh); @@ -298,13 +298,13 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree) if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return; hpfs_free_sectors(s, ano, 1); oano = ano; - ano = le32_to_cpu(anode->up); + ano = anode->up; if (--level) { if (!(anode = hpfs_map_anode(s, ano, &bh))) return; btree1 = &anode->btree; } else btree1 = btree; for (i = 0; i < btree1->n_used_nodes; i++) { - if (le32_to_cpu(btree1->u.internal[i].down) == oano) { + if (btree1->u.internal[i].down == oano) { if ((pos = i + 1) < btree1->n_used_nodes) goto go_down; else @@ -411,7 +411,7 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs) if (fno) { btree->n_free_nodes = 8; btree->n_used_nodes = 0; - btree->first_free = cpu_to_le16(8); + btree->first_free = 8; btree->internal = 0; mark_buffer_dirty(bh); } else hpfs_free_sectors(s, f, 1); @@ -421,22 +421,22 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs) while (btree->internal) { nodes = btree->n_used_nodes + btree->n_free_nodes; for (i = 0; i < btree->n_used_nodes; i++) - if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f; + if (btree->u.internal[i].file_secno >= secs) goto f; brelse(bh); hpfs_error(s, "internal btree %08x doesn't end with -1", node); return; f: for (j = i + 1; j < btree->n_used_nodes; j++) - hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0); + hpfs_ea_remove(s, btree->u.internal[j].down, 1, 0); btree->n_used_nodes = i + 1; btree->n_free_nodes = nodes - btree->n_used_nodes; - btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes); + btree->first_free = 8 + 8 * btree->n_used_nodes; mark_buffer_dirty(bh); - if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) { + if (btree->u.internal[i].file_secno == secs) { brelse(bh); return; } - node = le32_to_cpu(btree->u.internal[i].down); + node = btree->u.internal[i].down; brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree")) @@ -446,25 +446,25 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs) } nodes = btree->n_used_nodes + btree->n_free_nodes; for (i = 0; i < btree->n_used_nodes; i++) - if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff; + if (btree->u.external[i].file_secno + btree->u.external[i].length >= secs) goto ff; brelse(bh); return; ff: - if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) { + if (secs <= btree->u.external[i].file_secno) { hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs); if (i) i--; } - else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) { - hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs - - le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length) - - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */ - btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno)); + else if (btree->u.external[i].file_secno + btree->u.external[i].length > secs) { + hpfs_free_sectors(s, btree->u.external[i].disk_secno + secs - + btree->u.external[i].file_secno, btree->u.external[i].length + - secs + btree->u.external[i].file_secno); /* I hope gcc optimizes this :-) */ + btree->u.external[i].length = secs - btree->u.external[i].file_secno; } for (j = i + 1; j < btree->n_used_nodes; j++) - hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length)); + hpfs_free_sectors(s, btree->u.external[j].disk_secno, btree->u.external[j].length); btree->n_used_nodes = i + 1; btree->n_free_nodes = nodes - btree->n_used_nodes; - btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes); + btree->first_free = 8 + 12 * btree->n_used_nodes; mark_buffer_dirty(bh); brelse(bh); } @@ -480,12 +480,12 @@ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno) struct extended_attribute *ea_end; if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return; if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree); - else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno)); + else hpfs_remove_dtree(s, fnode->u.external[0].disk_secno); ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) if (ea->indirect) hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); - hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l)); + hpfs_ea_ext_remove(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l); brelse(bh); hpfs_free_sectors(s, fno, 1); } diff --git a/trunk/fs/hpfs/buffer.c b/trunk/fs/hpfs/buffer.c index 9ecde27d1e29..793cb9d943d2 100644 --- a/trunk/fs/hpfs/buffer.c +++ b/trunk/fs/hpfs/buffer.c @@ -9,6 +9,22 @@ #include #include "hpfs_fn.h" +void hpfs_lock_creation(struct super_block *s) +{ +#ifdef DEBUG_LOCKS + printk("lock creation\n"); +#endif + mutex_lock(&hpfs_sb(s)->hpfs_creation_de); +} + +void hpfs_unlock_creation(struct super_block *s) +{ +#ifdef DEBUG_LOCKS + printk("unlock creation\n"); +#endif + mutex_unlock(&hpfs_sb(s)->hpfs_creation_de); +} + /* Map a sector into a buffer and return pointers to it and to the buffer. */ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp, @@ -16,8 +32,6 @@ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head { struct buffer_head *bh; - hpfs_lock_assert(s); - cond_resched(); *bhp = bh = sb_bread(s, secno); @@ -36,8 +50,6 @@ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head struct buffer_head *bh; /*return hpfs_map_sector(s, secno, bhp, 0);*/ - hpfs_lock_assert(s); - cond_resched(); if ((*bhp = bh = sb_getblk(s, secno)) != NULL) { @@ -58,8 +70,6 @@ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffe struct buffer_head *bh; char *data; - hpfs_lock_assert(s); - cond_resched(); if (secno & 3) { @@ -115,8 +125,6 @@ void *hpfs_get_4sectors(struct super_block *s, unsigned secno, { cond_resched(); - hpfs_lock_assert(s); - if (secno & 3) { printk("HPFS: hpfs_get_4sectors: unaligned read\n"); return NULL; diff --git a/trunk/fs/hpfs/dir.c b/trunk/fs/hpfs/dir.c index f46ae025bfb5..b3d7c0ddb609 100644 --- a/trunk/fs/hpfs/dir.c +++ b/trunk/fs/hpfs/dir.c @@ -88,9 +88,9 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) hpfs_error(inode->i_sb, "not a directory, fnode %08lx", (unsigned long)inode->i_ino); } - if (hpfs_inode->i_dno != le32_to_cpu(fno->u.external[0].disk_secno)) { + if (hpfs_inode->i_dno != fno->u.external[0].disk_secno) { e = 1; - hpfs_error(inode->i_sb, "corrupted inode: i_dno == %08x, fnode -> dnode == %08x", hpfs_inode->i_dno, le32_to_cpu(fno->u.external[0].disk_secno)); + hpfs_error(inode->i_sb, "corrupted inode: i_dno == %08x, fnode -> dnode == %08x", hpfs_inode->i_dno, fno->u.external[0].disk_secno); } brelse(bh); if (e) { @@ -156,7 +156,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) goto again; } tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3); - if (filldir(dirent, tempname, de->namelen, old_pos, le32_to_cpu(de->fnode), DT_UNKNOWN) < 0) { + if (filldir(dirent, tempname, de->namelen, old_pos, de->fnode, DT_UNKNOWN) < 0) { filp->f_pos = old_pos; if (tempname != de->name) kfree(tempname); hpfs_brelse4(&qbh); @@ -221,7 +221,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name * Get inode number, what we're after. */ - ino = le32_to_cpu(de->fnode); + ino = de->fnode; /* * Go find or make an inode. @@ -236,7 +236,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name hpfs_init_inode(result); if (de->directory) hpfs_read_inode(result); - else if (le32_to_cpu(de->ea_size) && hpfs_sb(dir->i_sb)->sb_eas) + else if (de->ea_size && hpfs_sb(dir->i_sb)->sb_eas) hpfs_read_inode(result); else { result->i_mode |= S_IFREG; @@ -250,6 +250,8 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name hpfs_result = hpfs_i(result); if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino; + hpfs_decide_conv(result, name, len); + if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) { hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures"); goto bail1; @@ -261,19 +263,19 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name */ if (!result->i_ctime.tv_sec) { - if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->creation_date)))) + if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, de->creation_date))) result->i_ctime.tv_sec = 1; result->i_ctime.tv_nsec = 0; - result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date)); + result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, de->write_date); result->i_mtime.tv_nsec = 0; - result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date)); + result->i_atime.tv_sec = local_to_gmt(dir->i_sb, de->read_date); result->i_atime.tv_nsec = 0; - hpfs_result->i_ea_size = le32_to_cpu(de->ea_size); + hpfs_result->i_ea_size = de->ea_size; if (!hpfs_result->i_ea_mode && de->read_only) result->i_mode &= ~0222; if (!de->directory) { if (result->i_size == -1) { - result->i_size = le32_to_cpu(de->file_size); + result->i_size = de->file_size; result->i_data.a_ops = &hpfs_aops; hpfs_i(result)->mmu_private = result->i_size; /* diff --git a/trunk/fs/hpfs/dnode.c b/trunk/fs/hpfs/dnode.c index 1e0e2ac30fd3..9b2ffadfc8c4 100644 --- a/trunk/fs/hpfs/dnode.c +++ b/trunk/fs/hpfs/dnode.c @@ -14,11 +14,11 @@ static loff_t get_pos(struct dnode *d, struct hpfs_dirent *fde) struct hpfs_dirent *de_end = dnode_end_de(d); int i = 1; for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { - if (de == fde) return ((loff_t) le32_to_cpu(d->self) << 4) | (loff_t)i; + if (de == fde) return ((loff_t) d->self << 4) | (loff_t)i; i++; } printk("HPFS: get_pos: not_found\n"); - return ((loff_t)le32_to_cpu(d->self) << 4) | (loff_t)1; + return ((loff_t)d->self << 4) | (loff_t)1; } void hpfs_add_pos(struct inode *inode, loff_t *pos) @@ -130,30 +130,29 @@ static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno { struct hpfs_dirent *de; if (!(de = dnode_last_de(d))) { - hpfs_error(s, "set_last_pointer: empty dnode %08x", le32_to_cpu(d->self)); + hpfs_error(s, "set_last_pointer: empty dnode %08x", d->self); return; } if (hpfs_sb(s)->sb_chk) { if (de->down) { hpfs_error(s, "set_last_pointer: dnode %08x has already last pointer %08x", - le32_to_cpu(d->self), de_down_pointer(de)); + d->self, de_down_pointer(de)); return; } - if (le16_to_cpu(de->length) != 32) { - hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", le32_to_cpu(d->self)); + if (de->length != 32) { + hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", d->self); return; } } if (ptr) { - d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + 4); - if (le32_to_cpu(d->first_free) > 2048) { - hpfs_error(s, "set_last_pointer: too long dnode %08x", le32_to_cpu(d->self)); - d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - 4); + if ((d->first_free += 4) > 2048) { + hpfs_error(s,"set_last_pointer: too long dnode %08x", d->self); + d->first_free -= 4; return; } - de->length = cpu_to_le16(36); + de->length = 36; de->down = 1; - *(dnode_secno *)((char *)de + 32) = cpu_to_le32(ptr); + *(dnode_secno *)((char *)de + 32) = ptr; } } @@ -169,7 +168,7 @@ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { int c = hpfs_compare_names(s, name, namelen, de->name, de->namelen, de->last); if (!c) { - hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, le32_to_cpu(d->self)); + hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, d->self); return NULL; } if (c < 0) break; @@ -177,14 +176,15 @@ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, memmove((char *)de + d_size, de, (char *)de_end - (char *)de); memset(de, 0, d_size); if (down_ptr) { - *(dnode_secno *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr); + *(int *)((char *)de + d_size - 4) = down_ptr; de->down = 1; } - de->length = cpu_to_le16(d_size); + de->length = d_size; + if (down_ptr) de->down = 1; de->not_8x3 = hpfs_is_name_long(name, namelen); de->namelen = namelen; memcpy(de->name, name, namelen); - d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + d_size); + d->first_free += d_size; return de; } @@ -194,25 +194,25 @@ static void hpfs_delete_de(struct super_block *s, struct dnode *d, struct hpfs_dirent *de) { if (de->last) { - hpfs_error(s, "attempt to delete last dirent in dnode %08x", le32_to_cpu(d->self)); + hpfs_error(s, "attempt to delete last dirent in dnode %08x", d->self); return; } - d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - le16_to_cpu(de->length)); - memmove(de, de_next_de(de), le32_to_cpu(d->first_free) + (char *)d - (char *)de); + d->first_free -= de->length; + memmove(de, de_next_de(de), d->first_free + (char *)d - (char *)de); } static void fix_up_ptrs(struct super_block *s, struct dnode *d) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); - dnode_secno dno = le32_to_cpu(d->self); + dnode_secno dno = d->self; for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) if (de->down) { struct quad_buffer_head qbh; struct dnode *dd; if ((dd = hpfs_map_dnode(s, de_down_pointer(de), &qbh))) { - if (le32_to_cpu(dd->up) != dno || dd->root_dnode) { - dd->up = cpu_to_le32(dno); + if (dd->up != dno || dd->root_dnode) { + dd->up = dno; dd->root_dnode = 0; hpfs_mark_4buffers_dirty(&qbh); } @@ -262,7 +262,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, kfree(nname); return 1; } - if (le32_to_cpu(d->first_free) + de_size(namelen, down_ptr) <= 2048) { + if (d->first_free + de_size(namelen, down_ptr) <= 2048) { loff_t t; copy_de(de=hpfs_add_de(i->i_sb, d, name, namelen, down_ptr), new_de); t = get_pos(d, de); @@ -286,11 +286,11 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, kfree(nname); return 1; } - memcpy(nd, d, le32_to_cpu(d->first_free)); + memcpy(nd, d, d->first_free); copy_de(de = hpfs_add_de(i->i_sb, nd, name, namelen, down_ptr), new_de); for_all_poss(i, hpfs_pos_ins, get_pos(nd, de), 1); h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10; - if (!(ad = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &adno, &qbh1))) { + if (!(ad = hpfs_alloc_dnode(i->i_sb, d->up, &adno, &qbh1, 0))) { hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); hpfs_brelse4(&qbh); kfree(nd); @@ -313,21 +313,20 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, down_ptr = adno; set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0); de = de_next_de(de); - memmove((char *)nd + 20, de, le32_to_cpu(nd->first_free) + (char *)nd - (char *)de); - nd->first_free = cpu_to_le32(le32_to_cpu(nd->first_free) - ((char *)de - (char *)nd - 20)); - memcpy(d, nd, le32_to_cpu(nd->first_free)); + memmove((char *)nd + 20, de, nd->first_free + (char *)nd - (char *)de); + nd->first_free -= (char *)de - (char *)nd - 20; + memcpy(d, nd, nd->first_free); for_all_poss(i, hpfs_pos_del, (loff_t)dno << 4, pos); fix_up_ptrs(i->i_sb, ad); if (!d->root_dnode) { - ad->up = d->up; - dno = le32_to_cpu(ad->up); + dno = ad->up = d->up; hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); goto go_up; } - if (!(rd = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &rdno, &qbh2))) { + if (!(rd = hpfs_alloc_dnode(i->i_sb, d->up, &rdno, &qbh2, 0))) { hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); hpfs_brelse4(&qbh); hpfs_brelse4(&qbh1); @@ -339,7 +338,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, i->i_blocks += 4; rd->root_dnode = 1; rd->up = d->up; - if (!(fnode = hpfs_map_fnode(i->i_sb, le32_to_cpu(d->up), &bh))) { + if (!(fnode = hpfs_map_fnode(i->i_sb, d->up, &bh))) { hpfs_free_dnode(i->i_sb, rdno); hpfs_brelse4(&qbh); hpfs_brelse4(&qbh1); @@ -348,11 +347,10 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, kfree(nname); return 1; } - fnode->u.external[0].disk_secno = cpu_to_le32(rdno); + fnode->u.external[0].disk_secno = rdno; mark_buffer_dirty(bh); brelse(bh); - hpfs_i(i)->i_dno = rdno; - d->up = ad->up = cpu_to_le32(rdno); + d->up = ad->up = hpfs_i(i)->i_dno = rdno; d->root_dnode = ad->root_dnode = 0; hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); @@ -375,7 +373,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, int hpfs_add_dirent(struct inode *i, const unsigned char *name, unsigned namelen, - struct hpfs_dirent *new_de) + struct hpfs_dirent *new_de, int cdepth) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct dnode *d; @@ -405,6 +403,7 @@ int hpfs_add_dirent(struct inode *i, } } hpfs_brelse4(&qbh); + if (!cdepth) hpfs_lock_creation(i->i_sb); if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_ADD)) { c = 1; goto ret; @@ -412,6 +411,7 @@ int hpfs_add_dirent(struct inode *i, i->i_version++; c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0); ret: + if (!cdepth) hpfs_unlock_creation(i->i_sb); return c; } @@ -437,9 +437,9 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to) return 0; if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 0; if (hpfs_sb(i->i_sb)->sb_chk) { - if (le32_to_cpu(dnode->up) != chk_up) { + if (dnode->up != chk_up) { hpfs_error(i->i_sb, "move_to_top: up pointer from %08x should be %08x, is %08x", - dno, chk_up, le32_to_cpu(dnode->up)); + dno, chk_up, dnode->up); hpfs_brelse4(&qbh); return 0; } @@ -455,7 +455,7 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to) hpfs_brelse4(&qbh); } while (!(de = dnode_pre_last_de(dnode))) { - dnode_secno up = le32_to_cpu(dnode->up); + dnode_secno up = dnode->up; hpfs_brelse4(&qbh); hpfs_free_dnode(i->i_sb, dno); i->i_size -= 2048; @@ -474,8 +474,8 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to) hpfs_brelse4(&qbh); return 0; } - dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4); - de->length = cpu_to_le16(le16_to_cpu(de->length) - 4); + dnode->first_free -= 4; + de->length -= 4; de->down = 0; hpfs_mark_4buffers_dirty(&qbh); dno = up; @@ -483,12 +483,12 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to) t = get_pos(dnode, de); for_all_poss(i, hpfs_pos_subst, t, 4); for_all_poss(i, hpfs_pos_subst, t + 1, 5); - if (!(nde = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { + if (!(nde = kmalloc(de->length, GFP_NOFS))) { hpfs_error(i->i_sb, "out of memory for dirent - directory will be corrupted"); hpfs_brelse4(&qbh); return 0; } - memcpy(nde, de, le16_to_cpu(de->length)); + memcpy(nde, de, de->length); ddno = de->down ? de_down_pointer(de) : 0; hpfs_delete_de(i->i_sb, dnode, de); set_last_pointer(i->i_sb, dnode, ddno); @@ -517,11 +517,11 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) try_it_again: if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "delete_empty_dnode")) return; if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return; - if (le32_to_cpu(dnode->first_free) > 56) goto end; - if (le32_to_cpu(dnode->first_free) == 52 || le32_to_cpu(dnode->first_free) == 56) { + if (dnode->first_free > 56) goto end; + if (dnode->first_free == 52 || dnode->first_free == 56) { struct hpfs_dirent *de_end; int root = dnode->root_dnode; - up = le32_to_cpu(dnode->up); + up = dnode->up; de = dnode_first_de(dnode); down = de->down ? de_down_pointer(de) : 0; if (hpfs_sb(i->i_sb)->sb_chk) if (root && !down) { @@ -545,13 +545,13 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) return; } if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { - d1->up = cpu_to_le32(up); + d1->up = up; d1->root_dnode = 1; hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } if ((fnode = hpfs_map_fnode(i->i_sb, up, &bh))) { - fnode->u.external[0].disk_secno = cpu_to_le32(down); + fnode->u.external[0].disk_secno = down; mark_buffer_dirty(bh); brelse(bh); } @@ -570,22 +570,22 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, ((loff_t)up << 4) | p); if (!down) { de->down = 0; - de->length = cpu_to_le16(le16_to_cpu(de->length) - 4); - dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4); + de->length -= 4; + dnode->first_free -= 4; memmove(de_next_de(de), (char *)de_next_de(de) + 4, - (char *)dnode + le32_to_cpu(dnode->first_free) - (char *)de_next_de(de)); + (char *)dnode + dnode->first_free - (char *)de_next_de(de)); } else { struct dnode *d1; struct quad_buffer_head qbh1; - *(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4) = down; + *(dnode_secno *) ((void *) de + de->length - 4) = down; if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { - d1->up = cpu_to_le32(up); + d1->up = up; hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } } } else { - hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, le32_to_cpu(dnode->first_free)); + hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, dnode->first_free); goto end; } @@ -596,18 +596,18 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) struct quad_buffer_head qbh1; if (!de_next->down) goto endm; ndown = de_down_pointer(de_next); - if (!(de_cp = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { + if (!(de_cp = kmalloc(de->length, GFP_NOFS))) { printk("HPFS: out of memory for dtree balancing\n"); goto endm; } - memcpy(de_cp, de, le16_to_cpu(de->length)); + memcpy(de_cp, de, de->length); hpfs_delete_de(i->i_sb, dnode, de); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, 4); for_all_poss(i, hpfs_pos_del, ((loff_t)up << 4) | p, 1); if (de_cp->down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de_cp), &qbh1))) { - d1->up = cpu_to_le32(ndown); + d1->up = ndown; hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } @@ -635,7 +635,7 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) struct hpfs_dirent *del = dnode_last_de(d1); dlp = del->down ? de_down_pointer(del) : 0; if (!dlp && down) { - if (le32_to_cpu(d1->first_free) > 2044) { + if (d1->first_free > 2044) { if (hpfs_sb(i->i_sb)->sb_chk >= 2) { printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); printk("HPFS: warning: terminating balancing operation\n"); @@ -647,38 +647,38 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); printk("HPFS: warning: goin'on\n"); } - del->length = cpu_to_le16(le16_to_cpu(del->length) + 4); + del->length += 4; del->down = 1; - d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) + 4); + d1->first_free += 4; } if (dlp && !down) { - del->length = cpu_to_le16(le16_to_cpu(del->length) - 4); + del->length -= 4; del->down = 0; - d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) - 4); + d1->first_free -= 4; } else if (down) - *(dnode_secno *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down); + *(dnode_secno *) ((void *) del + del->length - 4) = down; } else goto endm; - if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) { + if (!(de_cp = kmalloc(de_prev->length, GFP_NOFS))) { printk("HPFS: out of memory for dtree balancing\n"); hpfs_brelse4(&qbh1); goto endm; } hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); - memcpy(de_cp, de_prev, le16_to_cpu(de_prev->length)); + memcpy(de_cp, de_prev, de_prev->length); hpfs_delete_de(i->i_sb, dnode, de_prev); if (!de_prev->down) { - de_prev->length = cpu_to_le16(le16_to_cpu(de_prev->length) + 4); + de_prev->length += 4; de_prev->down = 1; - dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) + 4); + dnode->first_free += 4; } - *(dnode_secno *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown); + *(dnode_secno *) ((void *) de_prev + de_prev->length - 4) = ndown; hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, ((loff_t)up << 4) | (p - 1)); if (down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de), &qbh1))) { - d1->up = cpu_to_le32(ndown); + d1->up = ndown; hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } @@ -701,6 +701,7 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de, { struct dnode *dnode = qbh->data; dnode_secno down = 0; + int lock = 0; loff_t t; if (de->first || de->last) { hpfs_error(i->i_sb, "hpfs_remove_dirent: attempt to delete first or last dirent in dnode %08x", dno); @@ -709,8 +710,11 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de, } if (de->down) down = de_down_pointer(de); if (depth && (de->down || (de == dnode_first_de(dnode) && de_next_de(de)->last))) { + lock = 1; + hpfs_lock_creation(i->i_sb); if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_DEL)) { hpfs_brelse4(qbh); + hpfs_unlock_creation(i->i_sb); return 2; } } @@ -723,9 +727,11 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de, dnode_secno a = move_to_top(i, down, dno); for_all_poss(i, hpfs_pos_subst, 5, t); if (a) delete_empty_dnode(i, a); + if (lock) hpfs_unlock_creation(i->i_sb); return !a; } delete_empty_dnode(i, dno); + if (lock) hpfs_unlock_creation(i->i_sb); return 0; } @@ -745,8 +751,8 @@ void hpfs_count_dnodes(struct super_block *s, dnode_secno dno, int *n_dnodes, ptr = 0; go_up: if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return; - if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && le32_to_cpu(dnode->up) != odno) - hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, le32_to_cpu(dnode->up)); + if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && dnode->up != odno) + hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, dnode->up); de = dnode_first_de(dnode); if (ptr) while(1) { if (de->down) if (de_down_pointer(de) == ptr) goto process_de; @@ -770,7 +776,7 @@ void hpfs_count_dnodes(struct super_block *s, dnode_secno dno, int *n_dnodes, if (!de->first && !de->last && n_items) (*n_items)++; if ((de = de_next_de(de)) < dnode_end_de(dnode)) goto next_de; ptr = dno; - dno = le32_to_cpu(dnode->up); + dno = dnode->up; if (dnode->root_dnode) { hpfs_brelse4(&qbh); return; @@ -818,8 +824,8 @@ dnode_secno hpfs_de_as_down_as_possible(struct super_block *s, dnode_secno dno) return d; if (!(de = map_nth_dirent(s, d, 1, &qbh, NULL))) return dno; if (hpfs_sb(s)->sb_chk) - if (up && le32_to_cpu(((struct dnode *)qbh.data)->up) != up) - hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, le32_to_cpu(((struct dnode *)qbh.data)->up)); + if (up && ((struct dnode *)qbh.data)->up != up) + hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, ((struct dnode *)qbh.data)->up); if (!de->down) { hpfs_brelse4(&qbh); return d; @@ -868,7 +874,7 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp, /* Going up */ if (dnode->root_dnode) goto bail; - if (!(up_dnode = hpfs_map_dnode(inode->i_sb, le32_to_cpu(dnode->up), &qbh0))) + if (!(up_dnode = hpfs_map_dnode(inode->i_sb, dnode->up, &qbh0))) goto bail; end_up_de = dnode_end_de(up_dnode); @@ -876,16 +882,16 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp, for (up_de = dnode_first_de(up_dnode); up_de < end_up_de; up_de = de_next_de(up_de)) { if (!(++c & 077)) hpfs_error(inode->i_sb, - "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", le32_to_cpu(dnode->up)); + "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", dnode->up); if (up_de->down && de_down_pointer(up_de) == dno) { - *posp = ((loff_t) le32_to_cpu(dnode->up) << 4) + c; + *posp = ((loff_t) dnode->up << 4) + c; hpfs_brelse4(&qbh0); return de; } } hpfs_error(inode->i_sb, "map_pos_dirent: pointer to dnode %08x not found in parent dnode %08x", - dno, le32_to_cpu(dnode->up)); + dno, dnode->up); hpfs_brelse4(&qbh0); bail: @@ -1011,17 +1017,17 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, /*name2[15] = 0xff;*/ name1len = 15; name2len = 256; } - if (!(upf = hpfs_map_fnode(s, le32_to_cpu(f->up), &bh))) { + if (!(upf = hpfs_map_fnode(s, f->up, &bh))) { kfree(name2); return NULL; } if (!upf->dirflag) { brelse(bh); - hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up)); + hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, f->up); kfree(name2); return NULL; } - dno = le32_to_cpu(upf->u.external[0].disk_secno); + dno = upf->u.external[0].disk_secno; brelse(bh); go_down: downd = 0; @@ -1043,7 +1049,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, return NULL; } next_de: - if (le32_to_cpu(de->fnode) == fno) { + if (de->fnode == fno) { kfree(name2); return de; } @@ -1059,7 +1065,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, goto go_down; } f: - if (le32_to_cpu(de->fnode) == fno) { + if (de->fnode == fno) { kfree(name2); return de; } @@ -1068,7 +1074,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, if ((de = de_next_de(de)) < de_end) goto next_de; if (d->root_dnode) goto not_found; downd = dno; - dno = le32_to_cpu(d->up); + dno = d->up; hpfs_brelse4(qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, downd, &d1, &d2, "map_fnode_dirent #2")) { diff --git a/trunk/fs/hpfs/ea.c b/trunk/fs/hpfs/ea.c index d8b84d113c89..45e53d972b42 100644 --- a/trunk/fs/hpfs/ea.c +++ b/trunk/fs/hpfs/ea.c @@ -24,7 +24,7 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len) } if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return; if (ea->indirect) { - if (ea_valuelen(ea) != 8) { + if (ea->valuelen != 8) { hpfs_error(s, "ea->indirect set while ea->valuelen!=8, %s %08x, pos %08x", ano ? "anode" : "sectors", a, pos); return; @@ -33,7 +33,7 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len) return; hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); } - pos += ea->namelen + ea_valuelen(ea) + 5; + pos += ea->namelen + ea->valuelen + 5; } if (!ano) hpfs_free_sectors(s, a, (len+511) >> 9); else { @@ -76,24 +76,24 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key, unsigned pos; int ano, len; secno a; - char ex[4 + 255 + 1 + 8]; struct extended_attribute *ea; struct extended_attribute *ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) if (!strcmp(ea->name, key)) { if (ea->indirect) goto indirect; - if (ea_valuelen(ea) >= size) + if (ea->valuelen >= size) return -EINVAL; - memcpy(buf, ea_data(ea), ea_valuelen(ea)); - buf[ea_valuelen(ea)] = 0; + memcpy(buf, ea_data(ea), ea->valuelen); + buf[ea->valuelen] = 0; return 0; } - a = le32_to_cpu(fnode->ea_secno); - len = le32_to_cpu(fnode->ea_size_l); + a = fnode->ea_secno; + len = fnode->ea_size_l; ano = fnode->ea_anode; pos = 0; while (pos < len) { + char ex[4 + 255 + 1 + 8]; ea = (struct extended_attribute *)ex; if (pos + 4 > len) { hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x", @@ -106,14 +106,14 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key, if (!strcmp(ea->name, key)) { if (ea->indirect) goto indirect; - if (ea_valuelen(ea) >= size) + if (ea->valuelen >= size) return -EINVAL; - if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), buf)) + if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea->valuelen, buf)) return -EIO; - buf[ea_valuelen(ea)] = 0; + buf[ea->valuelen] = 0; return 0; } - pos += ea->namelen + ea_valuelen(ea) + 5; + pos += ea->namelen + ea->valuelen + 5; } return -ENOENT; indirect: @@ -138,16 +138,16 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si if (!strcmp(ea->name, key)) { if (ea->indirect) return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea)); - if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) { + if (!(ret = kmalloc((*size = ea->valuelen) + 1, GFP_NOFS))) { printk("HPFS: out of memory for EA\n"); return NULL; } - memcpy(ret, ea_data(ea), ea_valuelen(ea)); - ret[ea_valuelen(ea)] = 0; + memcpy(ret, ea_data(ea), ea->valuelen); + ret[ea->valuelen] = 0; return ret; } - a = le32_to_cpu(fnode->ea_secno); - len = le32_to_cpu(fnode->ea_size_l); + a = fnode->ea_secno; + len = fnode->ea_size_l; ano = fnode->ea_anode; pos = 0; while (pos < len) { @@ -164,18 +164,18 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si if (!strcmp(ea->name, key)) { if (ea->indirect) return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea)); - if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) { + if (!(ret = kmalloc((*size = ea->valuelen) + 1, GFP_NOFS))) { printk("HPFS: out of memory for EA\n"); return NULL; } - if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), ret)) { + if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea->valuelen, ret)) { kfree(ret); return NULL; } - ret[ea_valuelen(ea)] = 0; + ret[ea->valuelen] = 0; return ret; } - pos += ea->namelen + ea_valuelen(ea) + 5; + pos += ea->namelen + ea->valuelen + 5; } return NULL; } @@ -202,13 +202,13 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, if (ea->indirect) { if (ea_len(ea) == size) set_indirect_ea(s, ea->anode, ea_sec(ea), data, size); - } else if (ea_valuelen(ea) == size) { + } else if (ea->valuelen == size) { memcpy(ea_data(ea), data, size); } return; } - a = le32_to_cpu(fnode->ea_secno); - len = le32_to_cpu(fnode->ea_size_l); + a = fnode->ea_secno; + len = fnode->ea_size_l; ano = fnode->ea_anode; pos = 0; while (pos < len) { @@ -228,70 +228,68 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, set_indirect_ea(s, ea->anode, ea_sec(ea), data, size); } else { - if (ea_valuelen(ea) == size) + if (ea->valuelen == size) hpfs_ea_write(s, a, ano, pos + 4 + ea->namelen + 1, size, data); } return; } - pos += ea->namelen + ea_valuelen(ea) + 5; + pos += ea->namelen + ea->valuelen + 5; } - if (!le16_to_cpu(fnode->ea_offs)) { - /*if (le16_to_cpu(fnode->ea_size_s)) { + if (!fnode->ea_offs) { + /*if (fnode->ea_size_s) { hpfs_error(s, "fnode %08x: ea_size_s == %03x, ea_offs == 0", - inode->i_ino, le16_to_cpu(fnode->ea_size_s)); + inode->i_ino, fnode->ea_size_s); return; }*/ - fnode->ea_offs = cpu_to_le16(0xc4); + fnode->ea_offs = 0xc4; } - if (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200) { + if (fnode->ea_offs < 0xc4 || fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200) { hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x", (unsigned long)inode->i_ino, - le32_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s)); + fnode->ea_offs, fnode->ea_size_s); return; } - if ((le16_to_cpu(fnode->ea_size_s) || !le32_to_cpu(fnode->ea_size_l)) && - le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) + strlen(key) + size + 5 <= 0x200) { + if ((fnode->ea_size_s || !fnode->ea_size_l) && + fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s + strlen(key) + size + 5 <= 0x200) { ea = fnode_end_ea(fnode); *(char *)ea = 0; ea->namelen = strlen(key); - ea->valuelen_lo = size; - ea->valuelen_hi = size >> 8; + ea->valuelen = size; strcpy(ea->name, key); memcpy(ea_data(ea), data, size); - fnode->ea_size_s = cpu_to_le16(le16_to_cpu(fnode->ea_size_s) + strlen(key) + size + 5); + fnode->ea_size_s += strlen(key) + size + 5; goto ret; } /* Most the code here is 99.9993422% unused. I hope there are no bugs. But what .. HPFS.IFS has also bugs in ea management. */ - if (le16_to_cpu(fnode->ea_size_s) && !le32_to_cpu(fnode->ea_size_l)) { + if (fnode->ea_size_s && !fnode->ea_size_l) { secno n; struct buffer_head *bh; char *data; - if (!(n = hpfs_alloc_sector(s, fno, 1, 0))) return; + if (!(n = hpfs_alloc_sector(s, fno, 1, 0, 1))) return; if (!(data = hpfs_get_sector(s, n, &bh))) { hpfs_free_sectors(s, n, 1); return; } - memcpy(data, fnode_ea(fnode), le16_to_cpu(fnode->ea_size_s)); - fnode->ea_size_l = cpu_to_le32(le16_to_cpu(fnode->ea_size_s)); - fnode->ea_size_s = cpu_to_le16(0); - fnode->ea_secno = cpu_to_le32(n); - fnode->ea_anode = cpu_to_le32(0); + memcpy(data, fnode_ea(fnode), fnode->ea_size_s); + fnode->ea_size_l = fnode->ea_size_s; + fnode->ea_size_s = 0; + fnode->ea_secno = n; + fnode->ea_anode = 0; mark_buffer_dirty(bh); brelse(bh); } - pos = le32_to_cpu(fnode->ea_size_l) + 5 + strlen(key) + size; - len = (le32_to_cpu(fnode->ea_size_l) + 511) >> 9; + pos = fnode->ea_size_l + 5 + strlen(key) + size; + len = (fnode->ea_size_l + 511) >> 9; if (pos >= 30000) goto bail; while (((pos + 511) >> 9) > len) { if (!len) { - secno q = hpfs_alloc_sector(s, fno, 1, 0); - if (!q) goto bail; - fnode->ea_secno = cpu_to_le32(q); + if (!(fnode->ea_secno = hpfs_alloc_sector(s, fno, 1, 0, 1))) + goto bail; fnode->ea_anode = 0; len++; } else if (!fnode->ea_anode) { - if (hpfs_alloc_if_possible(s, le32_to_cpu(fnode->ea_secno) + len)) { + if (hpfs_alloc_if_possible(s, fnode->ea_secno + len)) { len++; } else { /* Aargh... don't know how to create ea anodes :-( */ @@ -300,26 +298,26 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, anode_secno a_s; if (!(anode = hpfs_alloc_anode(s, fno, &a_s, &bh))) goto bail; - anode->up = cpu_to_le32(fno); + anode->up = fno; anode->btree.fnode_parent = 1; anode->btree.n_free_nodes--; anode->btree.n_used_nodes++; - anode->btree.first_free = cpu_to_le16(le16_to_cpu(anode->btree.first_free) + 12); - anode->u.external[0].disk_secno = cpu_to_le32(le32_to_cpu(fnode->ea_secno)); - anode->u.external[0].file_secno = cpu_to_le32(0); - anode->u.external[0].length = cpu_to_le32(len); + anode->btree.first_free += 12; + anode->u.external[0].disk_secno = fnode->ea_secno; + anode->u.external[0].file_secno = 0; + anode->u.external[0].length = len; mark_buffer_dirty(bh); brelse(bh); fnode->ea_anode = 1; - fnode->ea_secno = cpu_to_le32(a_s);*/ + fnode->ea_secno = a_s;*/ secno new_sec; int i; - if (!(new_sec = hpfs_alloc_sector(s, fno, 1, 1 - ((pos + 511) >> 9)))) + if (!(new_sec = hpfs_alloc_sector(s, fno, 1, 1 - ((pos + 511) >> 9), 1))) goto bail; for (i = 0; i < len; i++) { struct buffer_head *bh1, *bh2; void *b1, *b2; - if (!(b1 = hpfs_map_sector(s, le32_to_cpu(fnode->ea_secno) + i, &bh1, len - i - 1))) { + if (!(b1 = hpfs_map_sector(s, fnode->ea_secno + i, &bh1, len - i - 1))) { hpfs_free_sectors(s, new_sec, (pos + 511) >> 9); goto bail; } @@ -333,13 +331,13 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, mark_buffer_dirty(bh2); brelse(bh2); } - hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno), len); - fnode->ea_secno = cpu_to_le32(new_sec); + hpfs_free_sectors(s, fnode->ea_secno, len); + fnode->ea_secno = new_sec; len = (pos + 511) >> 9; } } if (fnode->ea_anode) { - if (hpfs_add_sector_to_btree(s, le32_to_cpu(fnode->ea_secno), + if (hpfs_add_sector_to_btree(s, fnode->ea_secno, 0, len) != -1) { len++; } else { @@ -351,17 +349,17 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, h[1] = strlen(key); h[2] = size & 0xff; h[3] = size >> 8; - if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail; - if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail; - if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail; - fnode->ea_size_l = cpu_to_le32(pos); + if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l, 4, h)) goto bail; + if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l + 4, h[1] + 1, key)) goto bail; + if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l + 5 + h[1], size, data)) goto bail; + fnode->ea_size_l = pos; ret: hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size; return; bail: - if (le32_to_cpu(fnode->ea_secno)) - if (fnode->ea_anode) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9); - else hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno) + ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9), len - ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9)); - else fnode->ea_secno = fnode->ea_size_l = cpu_to_le32(0); + if (fnode->ea_secno) + if (fnode->ea_anode) hpfs_truncate_btree(s, fnode->ea_secno, 1, (fnode->ea_size_l + 511) >> 9); + else hpfs_free_sectors(s, fnode->ea_secno + ((fnode->ea_size_l + 511) >> 9), len - ((fnode->ea_size_l + 511) >> 9)); + else fnode->ea_secno = fnode->ea_size_l = 0; } diff --git a/trunk/fs/hpfs/file.c b/trunk/fs/hpfs/file.c index 89c500ee5213..9b9eb6933e43 100644 --- a/trunk/fs/hpfs/file.c +++ b/trunk/fs/hpfs/file.c @@ -20,8 +20,8 @@ static int hpfs_file_release(struct inode *inode, struct file *file) int hpfs_file_fsync(struct file *file, int datasync) { - struct inode *inode = file->f_mapping->host; - return sync_blockdev(inode->i_sb->s_bdev); + /*return file_fsync(file, datasync);*/ + return 0; /* Don't fsync :-) */ } /* @@ -48,46 +48,38 @@ static secno hpfs_bmap(struct inode *inode, unsigned file_secno) static void hpfs_truncate(struct inode *i) { if (IS_IMMUTABLE(i)) return /*-EPERM*/; - hpfs_lock_assert(i->i_sb); - + hpfs_lock(i->i_sb); hpfs_i(i)->i_n_secs = 0; i->i_blocks = 1 + ((i->i_size + 511) >> 9); hpfs_i(i)->mmu_private = i->i_size; hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9)); hpfs_write_inode(i); hpfs_i(i)->i_n_secs = 0; + hpfs_unlock(i->i_sb); } static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { - int r; secno s; - hpfs_lock(inode->i_sb); s = hpfs_bmap(inode, iblock); if (s) { map_bh(bh_result, inode->i_sb, s); - goto ret_0; + return 0; } - if (!create) goto ret_0; + if (!create) return 0; if (iblock<<9 != hpfs_i(inode)->mmu_private) { BUG(); - r = -EIO; - goto ret_r; + return -EIO; } if ((s = hpfs_add_sector_to_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1)) == -1) { hpfs_truncate_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1); - r = -ENOSPC; - goto ret_r; + return -ENOSPC; } inode->i_blocks++; hpfs_i(inode)->mmu_private += 512; set_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, s); - ret_0: - r = 0; - ret_r: - hpfs_unlock(inode->i_sb); - return r; + return 0; } static int hpfs_writepage(struct page *page, struct writeback_control *wbc) @@ -138,11 +130,8 @@ static ssize_t hpfs_file_write(struct file *file, const char __user *buf, ssize_t retval; retval = do_sync_write(file, buf, count, ppos); - if (retval > 0) { - hpfs_lock(file->f_path.dentry->d_sb); + if (retval > 0) hpfs_i(file->f_path.dentry->d_inode)->i_dirty = 1; - hpfs_unlock(file->f_path.dentry->d_sb); - } return retval; } diff --git a/trunk/fs/hpfs/hpfs.h b/trunk/fs/hpfs/hpfs.h index 8b0650aae328..0e84c73cd9c4 100644 --- a/trunk/fs/hpfs/hpfs.h +++ b/trunk/fs/hpfs/hpfs.h @@ -19,13 +19,9 @@ For definitive information on HPFS, ask somebody else -- this is guesswork. There are certain to be many mistakes. */ -#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) -#error unknown endian -#endif - /* Notation */ -typedef u32 secno; /* sector number, partition relative */ +typedef unsigned secno; /* sector number, partition relative */ typedef secno dnode_secno; /* sector number of a dnode */ typedef secno fnode_secno; /* sector number of an fnode */ @@ -42,28 +38,28 @@ typedef u32 time32_t; /* 32-bit time_t type */ struct hpfs_boot_block { - u8 jmp[3]; - u8 oem_id[8]; - u8 bytes_per_sector[2]; /* 512 */ - u8 sectors_per_cluster; - u8 n_reserved_sectors[2]; - u8 n_fats; - u8 n_rootdir_entries[2]; - u8 n_sectors_s[2]; - u8 media_byte; - u16 sectors_per_fat; - u16 sectors_per_track; - u16 heads_per_cyl; - u32 n_hidden_sectors; - u32 n_sectors_l; /* size of partition */ - u8 drive_number; - u8 mbz; - u8 sig_28h; /* 28h */ - u8 vol_serno[4]; - u8 vol_label[11]; - u8 sig_hpfs[8]; /* "HPFS " */ - u8 pad[448]; - u16 magic; /* aa55 */ + unsigned char jmp[3]; + unsigned char oem_id[8]; + unsigned char bytes_per_sector[2]; /* 512 */ + unsigned char sectors_per_cluster; + unsigned char n_reserved_sectors[2]; + unsigned char n_fats; + unsigned char n_rootdir_entries[2]; + unsigned char n_sectors_s[2]; + unsigned char media_byte; + unsigned short sectors_per_fat; + unsigned short sectors_per_track; + unsigned short heads_per_cyl; + unsigned int n_hidden_sectors; + unsigned int n_sectors_l; /* size of partition */ + unsigned char drive_number; + unsigned char mbz; + unsigned char sig_28h; /* 28h */ + unsigned char vol_serno[4]; + unsigned char vol_label[11]; + unsigned char sig_hpfs[8]; /* "HPFS " */ + unsigned char pad[448]; + unsigned short magic; /* aa55 */ }; @@ -75,29 +71,31 @@ struct hpfs_boot_block struct hpfs_super_block { - u32 magic; /* f995 e849 */ - u32 magic1; /* fa53 e9c5, more magic? */ - u8 version; /* version of a filesystem usually 2 */ - u8 funcversion; /* functional version - oldest version + unsigned magic; /* f995 e849 */ + unsigned magic1; /* fa53 e9c5, more magic? */ + /*unsigned huh202;*/ /* ?? 202 = N. of B. in 1.00390625 S.*/ + char version; /* version of a filesystem usually 2 */ + char funcversion; /* functional version - oldest version of filesystem that can understand this disk */ - u16 zero; /* 0 */ + unsigned short int zero; /* 0 */ fnode_secno root; /* fnode of root directory */ secno n_sectors; /* size of filesystem */ - u32 n_badblocks; /* number of bad blocks */ + unsigned n_badblocks; /* number of bad blocks */ secno bitmaps; /* pointers to free space bit maps */ - u32 zero1; /* 0 */ + unsigned zero1; /* 0 */ secno badblocks; /* bad block list */ - u32 zero3; /* 0 */ + unsigned zero3; /* 0 */ time32_t last_chkdsk; /* date last checked, 0 if never */ - time32_t last_optimize; /* date last optimized, 0 if never */ + /*unsigned zero4;*/ /* 0 */ + time32_t last_optimize; /* date last optimized, 0 if never */ secno n_dir_band; /* number of sectors in dir band */ secno dir_band_start; /* first sector in dir band */ secno dir_band_end; /* last sector in dir band */ secno dir_band_bitmap; /* free space map, 1 dnode per bit */ - u8 volume_name[32]; /* not used */ + char volume_name[32]; /* not used */ secno user_id_table; /* 8 preallocated sectors - user id */ - u32 zero6[103]; /* 0 */ + unsigned zero6[103]; /* 0 */ }; @@ -109,65 +107,44 @@ struct hpfs_super_block struct hpfs_spare_block { - u32 magic; /* f991 1849 */ - u32 magic1; /* fa52 29c5, more magic? */ - -#ifdef __LITTLE_ENDIAN - u8 dirty: 1; /* 0 clean, 1 "improperly stopped" */ - u8 sparedir_used: 1; /* spare dirblks used */ - u8 hotfixes_used: 1; /* hotfixes used */ - u8 bad_sector: 1; /* bad sector, corrupted disk (???) */ - u8 bad_bitmap: 1; /* bad bitmap */ - u8 fast: 1; /* partition was fast formatted */ - u8 old_wrote: 1; /* old version wrote to partion */ - u8 old_wrote_1: 1; /* old version wrote to partion (?) */ -#else - u8 old_wrote_1: 1; /* old version wrote to partion (?) */ - u8 old_wrote: 1; /* old version wrote to partion */ - u8 fast: 1; /* partition was fast formatted */ - u8 bad_bitmap: 1; /* bad bitmap */ - u8 bad_sector: 1; /* bad sector, corrupted disk (???) */ - u8 hotfixes_used: 1; /* hotfixes used */ - u8 sparedir_used: 1; /* spare dirblks used */ - u8 dirty: 1; /* 0 clean, 1 "improperly stopped" */ -#endif - -#ifdef __LITTLE_ENDIAN - u8 install_dasd_limits: 1; /* HPFS386 flags */ - u8 resynch_dasd_limits: 1; - u8 dasd_limits_operational: 1; - u8 multimedia_active: 1; - u8 dce_acls_active: 1; - u8 dasd_limits_dirty: 1; - u8 flag67: 2; -#else - u8 flag67: 2; - u8 dasd_limits_dirty: 1; - u8 dce_acls_active: 1; - u8 multimedia_active: 1; - u8 dasd_limits_operational: 1; - u8 resynch_dasd_limits: 1; - u8 install_dasd_limits: 1; /* HPFS386 flags */ -#endif - - u8 mm_contlgulty; - u8 unused; + unsigned magic; /* f991 1849 */ + unsigned magic1; /* fa52 29c5, more magic? */ + + unsigned dirty: 1; /* 0 clean, 1 "improperly stopped" */ + /*unsigned flag1234: 4;*/ /* unknown flags */ + unsigned sparedir_used: 1; /* spare dirblks used */ + unsigned hotfixes_used: 1; /* hotfixes used */ + unsigned bad_sector: 1; /* bad sector, corrupted disk (???) */ + unsigned bad_bitmap: 1; /* bad bitmap */ + unsigned fast: 1; /* partition was fast formatted */ + unsigned old_wrote: 1; /* old version wrote to partion */ + unsigned old_wrote_1: 1; /* old version wrote to partion (?) */ + unsigned install_dasd_limits: 1; /* HPFS386 flags */ + unsigned resynch_dasd_limits: 1; + unsigned dasd_limits_operational: 1; + unsigned multimedia_active: 1; + unsigned dce_acls_active: 1; + unsigned dasd_limits_dirty: 1; + unsigned flag67: 2; + unsigned char mm_contlgulty; + unsigned char unused; secno hotfix_map; /* info about remapped bad sectors */ - u32 n_spares_used; /* number of hotfixes */ - u32 n_spares; /* number of spares in hotfix map */ - u32 n_dnode_spares_free; /* spare dnodes unused */ - u32 n_dnode_spares; /* length of spare_dnodes[] list, + unsigned n_spares_used; /* number of hotfixes */ + unsigned n_spares; /* number of spares in hotfix map */ + unsigned n_dnode_spares_free; /* spare dnodes unused */ + unsigned n_dnode_spares; /* length of spare_dnodes[] list, follows in this block*/ secno code_page_dir; /* code page directory block */ - u32 n_code_pages; /* number of code pages */ - u32 super_crc; /* on HPFS386 and LAN Server this is + unsigned n_code_pages; /* number of code pages */ + /*unsigned large_numbers[2];*/ /* ?? */ + unsigned super_crc; /* on HPFS386 and LAN Server this is checksum of superblock, on normal OS/2 unused */ - u32 spare_crc; /* on HPFS386 checksum of spareblock */ - u32 zero1[15]; /* unused */ + unsigned spare_crc; /* on HPFS386 checksum of spareblock */ + unsigned zero1[15]; /* unused */ dnode_secno spare_dnodes[100]; /* emergency free dnode list */ - u32 zero2[1]; /* room for more? */ + unsigned zero2[1]; /* room for more? */ }; /* The bad block list is 4 sectors long. The first word must be zero, @@ -202,18 +179,18 @@ struct hpfs_spare_block struct code_page_directory { - u32 magic; /* 4945 21f7 */ - u32 n_code_pages; /* number of pointers following */ - u32 zero1[2]; + unsigned magic; /* 4945 21f7 */ + unsigned n_code_pages; /* number of pointers following */ + unsigned zero1[2]; struct { - u16 ix; /* index */ - u16 code_page_number; /* code page number */ - u32 bounds; /* matches corresponding word + unsigned short ix; /* index */ + unsigned short code_page_number; /* code page number */ + unsigned bounds; /* matches corresponding word in data block */ secno code_page_data; /* sector number of a code_page_data containing c.p. array */ - u16 index; /* index in c.p. array in that sector*/ - u16 unknown; /* some unknown value; usually 0; + unsigned short index; /* index in c.p. array in that sector*/ + unsigned short unknown; /* some unknown value; usually 0; 2 in Japanese version */ } array[31]; /* unknown length */ }; @@ -224,21 +201,21 @@ struct code_page_directory struct code_page_data { - u32 magic; /* 8945 21f7 */ - u32 n_used; /* # elements used in c_p_data[] */ - u32 bounds[3]; /* looks a bit like + unsigned magic; /* 8945 21f7 */ + unsigned n_used; /* # elements used in c_p_data[] */ + unsigned bounds[3]; /* looks a bit like (beg1,end1), (beg2,end2) one byte each */ - u16 offs[3]; /* offsets from start of sector + unsigned short offs[3]; /* offsets from start of sector to start of c_p_data[ix] */ struct { - u16 ix; /* index */ - u16 code_page_number; /* code page number */ - u16 unknown; /* the same as in cp directory */ - u8 map[128]; /* upcase table for chars 80..ff */ - u16 zero2; + unsigned short ix; /* index */ + unsigned short code_page_number; /* code page number */ + unsigned short unknown; /* the same as in cp directory */ + unsigned char map[128]; /* upcase table for chars 80..ff */ + unsigned short zero2; } code_page[3]; - u8 incognita[78]; + unsigned char incognita[78]; }; @@ -278,84 +255,50 @@ struct code_page_data #define DNODE_MAGIC 0x77e40aae struct dnode { - u32 magic; /* 77e4 0aae */ - u32 first_free; /* offset from start of dnode to + unsigned magic; /* 77e4 0aae */ + unsigned first_free; /* offset from start of dnode to first free dir entry */ -#ifdef __LITTLE_ENDIAN - u8 root_dnode: 1; /* Is it root dnode? */ - u8 increment_me: 7; /* some kind of activity counter? */ - /* Neither HPFS.IFS nor CHKDSK cares - if you change this word */ -#else - u8 increment_me: 7; /* some kind of activity counter? */ - /* Neither HPFS.IFS nor CHKDSK cares + unsigned root_dnode:1; /* Is it root dnode? */ + unsigned increment_me:31; /* some kind of activity counter? + Neither HPFS.IFS nor CHKDSK cares if you change this word */ - u8 root_dnode: 1; /* Is it root dnode? */ -#endif - u8 increment_me2[3]; secno up; /* (root dnode) directory's fnode (nonroot) parent dnode */ dnode_secno self; /* pointer to this dnode */ - u8 dirent[2028]; /* one or more dirents */ + unsigned char dirent[2028]; /* one or more dirents */ }; struct hpfs_dirent { - u16 length; /* offset to next dirent */ - -#ifdef __LITTLE_ENDIAN - u8 first: 1; /* set on phony ^A^A (".") entry */ - u8 has_acl: 1; - u8 down: 1; /* down pointer present (after name) */ - u8 last: 1; /* set on phony \377 entry */ - u8 has_ea: 1; /* entry has EA */ - u8 has_xtd_perm: 1; /* has extended perm list (???) */ - u8 has_explicit_acl: 1; - u8 has_needea: 1; /* ?? some EA has NEEDEA set - I have no idea why this is - interesting in a dir entry */ -#else - u8 has_needea: 1; /* ?? some EA has NEEDEA set + unsigned short length; /* offset to next dirent */ + unsigned first: 1; /* set on phony ^A^A (".") entry */ + unsigned has_acl: 1; + unsigned down: 1; /* down pointer present (after name) */ + unsigned last: 1; /* set on phony \377 entry */ + unsigned has_ea: 1; /* entry has EA */ + unsigned has_xtd_perm: 1; /* has extended perm list (???) */ + unsigned has_explicit_acl: 1; + unsigned has_needea: 1; /* ?? some EA has NEEDEA set I have no idea why this is interesting in a dir entry */ - u8 has_explicit_acl: 1; - u8 has_xtd_perm: 1; /* has extended perm list (???) */ - u8 has_ea: 1; /* entry has EA */ - u8 last: 1; /* set on phony \377 entry */ - u8 down: 1; /* down pointer present (after name) */ - u8 has_acl: 1; - u8 first: 1; /* set on phony ^A^A (".") entry */ -#endif - -#ifdef __LITTLE_ENDIAN - u8 read_only: 1; /* dos attrib */ - u8 hidden: 1; /* dos attrib */ - u8 system: 1; /* dos attrib */ - u8 flag11: 1; /* would be volume label dos attrib */ - u8 directory: 1; /* dos attrib */ - u8 archive: 1; /* dos attrib */ - u8 not_8x3: 1; /* name is not 8.3 */ - u8 flag15: 1; -#else - u8 flag15: 1; - u8 not_8x3: 1; /* name is not 8.3 */ - u8 archive: 1; /* dos attrib */ - u8 directory: 1; /* dos attrib */ - u8 flag11: 1; /* would be volume label dos attrib */ - u8 system: 1; /* dos attrib */ - u8 hidden: 1; /* dos attrib */ - u8 read_only: 1; /* dos attrib */ -#endif - + unsigned read_only: 1; /* dos attrib */ + unsigned hidden: 1; /* dos attrib */ + unsigned system: 1; /* dos attrib */ + unsigned flag11: 1; /* would be volume label dos attrib */ + unsigned directory: 1; /* dos attrib */ + unsigned archive: 1; /* dos attrib */ + unsigned not_8x3: 1; /* name is not 8.3 */ + unsigned flag15: 1; fnode_secno fnode; /* fnode giving allocation info */ time32_t write_date; /* mtime */ - u32 file_size; /* file length, bytes */ + unsigned file_size; /* file length, bytes */ time32_t read_date; /* atime */ time32_t creation_date; /* ctime */ - u32 ea_size; /* total EA length, bytes */ - u8 no_of_acls; /* number of ACL's (low 3 bits) */ - u8 ix; /* code page index (of filename), see + unsigned ea_size; /* total EA length, bytes */ + unsigned char no_of_acls : 3; /* number of ACL's */ + unsigned char reserver : 5; + unsigned char ix; /* code page index (of filename), see struct code_page_data */ - u8 namelen, name[1]; /* file name */ + unsigned char namelen, name[1]; /* file name */ /* dnode_secno down; btree down pointer, if present, follows name on next word boundary, or maybe it precedes next dirent, which is on a word boundary. */ @@ -375,50 +318,38 @@ struct hpfs_dirent { struct bplus_leaf_node { - u32 file_secno; /* first file sector in extent */ - u32 length; /* length, sectors */ + unsigned file_secno; /* first file sector in extent */ + unsigned length; /* length, sectors */ secno disk_secno; /* first corresponding disk sector */ }; struct bplus_internal_node { - u32 file_secno; /* subtree maps sectors < this */ + unsigned file_secno; /* subtree maps sectors < this */ anode_secno down; /* pointer to subtree */ }; struct bplus_header { -#ifdef __LITTLE_ENDIAN - u8 hbff: 1; /* high bit of first free entry offset */ - u8 flag1234: 4; - u8 fnode_parent: 1; /* ? we're pointed to by an fnode, + unsigned hbff: 1; /* high bit of first free entry offset */ + unsigned flag1: 1; + unsigned flag2: 1; + unsigned flag3: 1; + unsigned flag4: 1; + unsigned fnode_parent: 1; /* ? we're pointed to by an fnode, the data btree or some ea or the main ea bootage pointer ea_secno */ /* also can get set in fnodes, which may be a chkdsk glitch or may mean this bit is irrelevant in fnodes, or this interpretation is all wet */ - u8 binary_search: 1; /* suggest binary search (unused) */ - u8 internal: 1; /* 1 -> (internal) tree of anodes - 0 -> (leaf) list of extents */ -#else - u8 internal: 1; /* 1 -> (internal) tree of anodes + unsigned binary_search: 1; /* suggest binary search (unused) */ + unsigned internal: 1; /* 1 -> (internal) tree of anodes 0 -> (leaf) list of extents */ - u8 binary_search: 1; /* suggest binary search (unused) */ - u8 fnode_parent: 1; /* ? we're pointed to by an fnode, - the data btree or some ea or the - main ea bootage pointer ea_secno */ - /* also can get set in fnodes, which - may be a chkdsk glitch or may mean - this bit is irrelevant in fnodes, - or this interpretation is all wet */ - u8 flag1234: 4; - u8 hbff: 1; /* high bit of first free entry offset */ -#endif - u8 fill[3]; - u8 n_free_nodes; /* free nodes in following array */ - u8 n_used_nodes; /* used nodes in following array */ - u16 first_free; /* offset from start of header to + unsigned char fill[3]; + unsigned char n_free_nodes; /* free nodes in following array */ + unsigned char n_used_nodes; /* used nodes in following array */ + unsigned short first_free; /* offset from start of header to first free node in array */ union { struct bplus_internal_node internal[0]; /* (internal) 2-word entries giving @@ -438,38 +369,37 @@ struct bplus_header struct fnode { - u32 magic; /* f7e4 0aae */ - u32 zero1[2]; /* read history */ - u8 len, name[15]; /* true length, truncated name */ + unsigned magic; /* f7e4 0aae */ + unsigned zero1[2]; /* read history */ + unsigned char len, name[15]; /* true length, truncated name */ fnode_secno up; /* pointer to file's directory fnode */ + /*unsigned zero2[3];*/ secno acl_size_l; secno acl_secno; - u16 acl_size_s; - u8 acl_anode; - u8 zero2; /* history bit count */ - u32 ea_size_l; /* length of disk-resident ea's */ + unsigned short acl_size_s; + char acl_anode; + char zero2; /* history bit count */ + unsigned ea_size_l; /* length of disk-resident ea's */ secno ea_secno; /* first sector of disk-resident ea's*/ - u16 ea_size_s; /* length of fnode-resident ea's */ - -#ifdef __LITTLE_ENDIAN - u8 flag0: 1; - u8 ea_anode: 1; /* 1 -> ea_secno is an anode */ - u8 flag234567: 6; -#else - u8 flag234567: 6; - u8 ea_anode: 1; /* 1 -> ea_secno is an anode */ - u8 flag0: 1; -#endif - -#ifdef __LITTLE_ENDIAN - u8 dirflag: 1; /* 1 -> directory. first & only extent + unsigned short ea_size_s; /* length of fnode-resident ea's */ + + unsigned flag0: 1; + unsigned ea_anode: 1; /* 1 -> ea_secno is an anode */ + unsigned flag2: 1; + unsigned flag3: 1; + unsigned flag4: 1; + unsigned flag5: 1; + unsigned flag6: 1; + unsigned flag7: 1; + unsigned dirflag: 1; /* 1 -> directory. first & only extent points to dnode. */ - u8 flag9012345: 7; -#else - u8 flag9012345: 7; - u8 dirflag: 1; /* 1 -> directory. first & only extent - points to dnode. */ -#endif + unsigned flag9: 1; + unsigned flag10: 1; + unsigned flag11: 1; + unsigned flag12: 1; + unsigned flag13: 1; + unsigned flag14: 1; + unsigned flag15: 1; struct bplus_header btree; /* b+ tree, 8 extents or 12 subtrees */ union { @@ -477,16 +407,17 @@ struct fnode struct bplus_internal_node internal[12]; } u; - u32 file_size; /* file length, bytes */ - u32 n_needea; /* number of EA's with NEEDEA set */ - u8 user_id[16]; /* unused */ - u16 ea_offs; /* offset from start of fnode + unsigned file_size; /* file length, bytes */ + unsigned n_needea; /* number of EA's with NEEDEA set */ + char user_id[16]; /* unused */ + unsigned short ea_offs; /* offset from start of fnode to first fnode-resident ea */ - u8 dasd_limit_treshhold; - u8 dasd_limit_delta; - u32 dasd_limit; - u32 dasd_usage; - u8 ea[316]; /* zero or more EA's, packed together + char dasd_limit_treshhold; + char dasd_limit_delta; + unsigned dasd_limit; + unsigned dasd_usage; + /*unsigned zero5[2];*/ + unsigned char ea[316]; /* zero or more EA's, packed together with no alignment padding. (Do not use this name, get here via fnode + ea_offs. I think.) */ @@ -499,7 +430,7 @@ struct fnode struct anode { - u32 magic; /* 37e4 0aae */ + unsigned magic; /* 37e4 0aae */ anode_secno self; /* pointer to this anode */ secno up; /* parent anode or fnode */ @@ -509,7 +440,7 @@ struct anode struct bplus_internal_node internal[60]; } u; - u32 fill[3]; /* unused */ + unsigned fill[3]; /* unused */ }; @@ -530,31 +461,25 @@ struct anode struct extended_attribute { -#ifdef __LITTLE_ENDIAN - u8 indirect: 1; /* 1 -> value gives sector number + unsigned indirect: 1; /* 1 -> value gives sector number where real value starts */ - u8 anode: 1; /* 1 -> sector is an anode - that points to fragmented value */ - u8 flag23456: 5; - u8 needea: 1; /* required ea */ -#else - u8 needea: 1; /* required ea */ - u8 flag23456: 5; - u8 anode: 1; /* 1 -> sector is an anode + unsigned anode: 1; /* 1 -> sector is an anode that points to fragmented value */ - u8 indirect: 1; /* 1 -> value gives sector number - where real value starts */ -#endif - u8 namelen; /* length of name, bytes */ - u8 valuelen_lo; /* length of value, bytes */ - u8 valuelen_hi; /* length of value, bytes */ - u8 name[0]; + unsigned flag2: 1; + unsigned flag3: 1; + unsigned flag4: 1; + unsigned flag5: 1; + unsigned flag6: 1; + unsigned needea: 1; /* required ea */ + unsigned char namelen; /* length of name, bytes */ + unsigned short valuelen; /* length of value, bytes */ + unsigned char name[0]; /* - u8 name[namelen]; ascii attrib name - u8 nul; terminating '\0', not counted - u8 value[valuelen]; value, arbitrary + unsigned char name[namelen]; ascii attrib name + unsigned char nul; terminating '\0', not counted + unsigned char value[valuelen]; value, arbitrary if this.indirect, valuelen is 8 and the value is - u32 length; real length of value, bytes + unsigned length; real length of value, bytes secno secno; sector address where it starts if this.anode, the above sector number is the root of an anode tree which points to the value. diff --git a/trunk/fs/hpfs/hpfs_fn.h b/trunk/fs/hpfs/hpfs_fn.h index dd552f862c8f..c15adbca07ff 100644 --- a/trunk/fs/hpfs/hpfs_fn.h +++ b/trunk/fs/hpfs/hpfs_fn.h @@ -13,7 +13,6 @@ #include #include #include -#include #include "hpfs.h" @@ -52,16 +51,18 @@ struct hpfs_inode_info { unsigned i_disk_sec; /* (files) minimalist cache of alloc info */ unsigned i_n_secs; /* (files) minimalist cache of alloc info */ unsigned i_ea_size; /* size of extended attributes */ + unsigned i_conv : 2; /* (files) crlf->newline hackery */ unsigned i_ea_mode : 1; /* file's permission is stored in ea */ unsigned i_ea_uid : 1; /* file's uid is stored in ea */ unsigned i_ea_gid : 1; /* file's gid is stored in ea */ unsigned i_dirty : 1; + struct mutex i_mutex; + struct mutex i_parent_mutex; loff_t **i_rddir_off; struct inode vfs_inode; }; struct hpfs_sb_info { - struct mutex hpfs_mutex; /* global hpfs lock */ ino_t sb_root; /* inode number of root dir */ unsigned sb_fs_size; /* file system size, sectors */ unsigned sb_bitmaps; /* sector number of bitmap list */ @@ -73,6 +74,7 @@ struct hpfs_sb_info { uid_t sb_uid; /* uid from mount options */ gid_t sb_gid; /* gid from mount options */ umode_t sb_mode; /* mode from mount options */ + unsigned sb_conv : 2; /* crlf->newline hackery */ unsigned sb_eas : 2; /* eas: 0-ignore, 1-ro, 2-rw */ unsigned sb_err : 2; /* on errs: 0-cont, 1-ro, 2-panic */ unsigned sb_chk : 2; /* checks: 0-no, 1-normal, 2-strict */ @@ -85,9 +87,20 @@ struct hpfs_sb_info { unsigned *sb_bmp_dir; /* main bitmap directory */ unsigned sb_c_bitmap; /* current bitmap */ unsigned sb_max_fwd_alloc; /* max forwad allocation */ + struct mutex hpfs_creation_de; /* when creating dirents, nobody else + can alloc blocks */ + /*unsigned sb_mounting : 1;*/ int sb_timeshift; }; +/* + * conv= options + */ + +#define CONV_BINARY 0 /* no conversion */ +#define CONV_TEXT 1 /* crlf->newline */ +#define CONV_AUTO 2 /* decide based on file contents */ + /* Four 512-byte buffers and the 2k block obtained by concatenating them */ struct quad_buffer_head { @@ -100,7 +113,7 @@ struct quad_buffer_head { static inline dnode_secno de_down_pointer (struct hpfs_dirent *de) { CHKCOND(de->down,("HPFS: de_down_pointer: !de->down\n")); - return le32_to_cpu(*(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4)); + return *(dnode_secno *) ((void *) de + de->length - 4); } /* The first dir entry in a dnode */ @@ -114,46 +127,41 @@ static inline struct hpfs_dirent *dnode_first_de (struct dnode *dnode) static inline struct hpfs_dirent *dnode_end_de (struct dnode *dnode) { - CHKCOND(le32_to_cpu(dnode->first_free)>=0x14 && le32_to_cpu(dnode->first_free)<=0xa00,("HPFS: dnode_end_de: dnode->first_free = %x\n",(unsigned)le32_to_cpu(dnode->first_free))); - return (void *) dnode + le32_to_cpu(dnode->first_free); + CHKCOND(dnode->first_free>=0x14 && dnode->first_free<=0xa00,("HPFS: dnode_end_de: dnode->first_free = %d\n",(int)dnode->first_free)); + return (void *) dnode + dnode->first_free; } /* The dir entry after dir entry de */ static inline struct hpfs_dirent *de_next_de (struct hpfs_dirent *de) { - CHKCOND(le16_to_cpu(de->length)>=0x20 && le16_to_cpu(de->length)<0x800,("HPFS: de_next_de: de->length = %x\n",(unsigned)le16_to_cpu(de->length))); - return (void *) de + le16_to_cpu(de->length); + CHKCOND(de->length>=0x20 && de->length<0x800,("HPFS: de_next_de: de->length = %d\n",(int)de->length)); + return (void *) de + de->length; } static inline struct extended_attribute *fnode_ea(struct fnode *fnode) { - return (struct extended_attribute *)((char *)fnode + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s)); + return (struct extended_attribute *)((char *)fnode + fnode->ea_offs + fnode->acl_size_s); } static inline struct extended_attribute *fnode_end_ea(struct fnode *fnode) { - return (struct extended_attribute *)((char *)fnode + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s)); -} - -static unsigned ea_valuelen(struct extended_attribute *ea) -{ - return ea->valuelen_lo + 256 * ea->valuelen_hi; + return (struct extended_attribute *)((char *)fnode + fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s); } static inline struct extended_attribute *next_ea(struct extended_attribute *ea) { - return (struct extended_attribute *)((char *)ea + 5 + ea->namelen + ea_valuelen(ea)); + return (struct extended_attribute *)((char *)ea + 5 + ea->namelen + ea->valuelen); } static inline secno ea_sec(struct extended_attribute *ea) { - return le32_to_cpu(get_unaligned((secno *)((char *)ea + 9 + ea->namelen))); + return *(secno *)((char *)ea + 9 + ea->namelen); } static inline secno ea_len(struct extended_attribute *ea) { - return le32_to_cpu(get_unaligned((secno *)((char *)ea + 5 + ea->namelen))); + return *(secno *)((char *)ea + 5 + ea->namelen); } static inline char *ea_data(struct extended_attribute *ea) @@ -178,13 +186,13 @@ static inline void copy_de(struct hpfs_dirent *dst, struct hpfs_dirent *src) dst->not_8x3 = n; } -static inline unsigned tstbits(u32 *bmp, unsigned b, unsigned n) +static inline unsigned tstbits(unsigned *bmp, unsigned b, unsigned n) { int i; if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n; - if (!((le32_to_cpu(bmp[(b & 0x3fff) >> 5]) >> (b & 0x1f)) & 1)) return 1; + if (!((bmp[(b & 0x3fff) >> 5] >> (b & 0x1f)) & 1)) return 1; for (i = 1; i < n; i++) - if (!((le32_to_cpu(bmp[((b+i) & 0x3fff) >> 5]) >> ((b+i) & 0x1f)) & 1)) + if (/*b+i < 0x4000 &&*/ !((bmp[((b+i) & 0x3fff) >> 5] >> ((b+i) & 0x1f)) & 1)) return i + 1; return 0; } @@ -192,12 +200,12 @@ static inline unsigned tstbits(u32 *bmp, unsigned b, unsigned n) /* alloc.c */ int hpfs_chk_sectors(struct super_block *, secno, int, char *); -secno hpfs_alloc_sector(struct super_block *, secno, unsigned, int); +secno hpfs_alloc_sector(struct super_block *, secno, unsigned, int, int); int hpfs_alloc_if_possible(struct super_block *, secno); void hpfs_free_sectors(struct super_block *, secno, unsigned); int hpfs_check_free_dnodes(struct super_block *, int); void hpfs_free_dnode(struct super_block *, secno); -struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *); +struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *, int); struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **); struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **); @@ -214,6 +222,8 @@ void hpfs_remove_fnode(struct super_block *, fnode_secno fno); /* buffer.c */ +void hpfs_lock_creation(struct super_block *); +void hpfs_unlock_creation(struct super_block *); void *hpfs_map_sector(struct super_block *, unsigned, struct buffer_head **, int); void *hpfs_get_sector(struct super_block *, unsigned, struct buffer_head **); void *hpfs_map_4sectors(struct super_block *, unsigned, struct quad_buffer_head *, int); @@ -237,7 +247,7 @@ void hpfs_del_pos(struct inode *, loff_t *); struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *, const unsigned char *, unsigned, secno); int hpfs_add_dirent(struct inode *, const unsigned char *, unsigned, - struct hpfs_dirent *); + struct hpfs_dirent *, int); int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int); void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *); dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno); @@ -293,6 +303,7 @@ int hpfs_compare_names(struct super_block *, const unsigned char *, unsigned, const unsigned char *, unsigned, int); int hpfs_is_name_long(const unsigned char *, unsigned); void hpfs_adjust_length(const unsigned char *, unsigned *); +void hpfs_decide_conv(struct inode *, const unsigned char *, unsigned); /* namei.c */ @@ -335,26 +346,21 @@ static inline time32_t gmt_to_local(struct super_block *s, time_t t) /* * Locking: * - * hpfs_lock() locks the whole filesystem. It must be taken - * on any method called by the VFS. + * hpfs_lock() is a leftover from the big kernel lock. + * Right now, these functions are empty and only left + * for documentation purposes. The file system no longer + * works on SMP systems, so the lock is not needed + * any more. * - * We don't do any per-file locking anymore, it is hard to - * review and HPFS is not performance-sensitive anyway. + * If someone is interested in making it work again, this + * would be the place to start by adding a per-superblock + * mutex and fixing all the bugs and performance issues + * caused by that. */ static inline void hpfs_lock(struct super_block *s) { - struct hpfs_sb_info *sbi = hpfs_sb(s); - mutex_lock(&sbi->hpfs_mutex); } static inline void hpfs_unlock(struct super_block *s) { - struct hpfs_sb_info *sbi = hpfs_sb(s); - mutex_unlock(&sbi->hpfs_mutex); -} - -static inline void hpfs_lock_assert(struct super_block *s) -{ - struct hpfs_sb_info *sbi = hpfs_sb(s); - WARN_ON(!mutex_is_locked(&sbi->hpfs_mutex)); } diff --git a/trunk/fs/hpfs/inode.c b/trunk/fs/hpfs/inode.c index 338cd8368451..87f1f787e767 100644 --- a/trunk/fs/hpfs/inode.c +++ b/trunk/fs/hpfs/inode.c @@ -17,6 +17,7 @@ void hpfs_init_inode(struct inode *i) i->i_uid = hpfs_sb(sb)->sb_uid; i->i_gid = hpfs_sb(sb)->sb_gid; i->i_mode = hpfs_sb(sb)->sb_mode; + hpfs_inode->i_conv = hpfs_sb(sb)->sb_conv; i->i_size = -1; i->i_blocks = -1; @@ -115,8 +116,8 @@ void hpfs_read_inode(struct inode *i) i->i_mode |= S_IFDIR; i->i_op = &hpfs_dir_iops; i->i_fop = &hpfs_dir_ops; - hpfs_inode->i_parent_dir = le32_to_cpu(fnode->up); - hpfs_inode->i_dno = le32_to_cpu(fnode->u.external[0].disk_secno); + hpfs_inode->i_parent_dir = fnode->up; + hpfs_inode->i_dno = fnode->u.external[0].disk_secno; if (hpfs_sb(sb)->sb_chk >= 2) { struct buffer_head *bh0; if (hpfs_map_fnode(sb, hpfs_inode->i_parent_dir, &bh0)) brelse(bh0); @@ -132,7 +133,7 @@ void hpfs_read_inode(struct inode *i) i->i_op = &hpfs_file_iops; i->i_fop = &hpfs_file_ops; i->i_nlink = 1; - i->i_size = le32_to_cpu(fnode->file_size); + i->i_size = fnode->file_size; i->i_blocks = ((i->i_size + 511) >> 9) + 1; i->i_data.a_ops = &hpfs_aops; hpfs_i(i)->mmu_private = i->i_size; @@ -143,7 +144,7 @@ void hpfs_read_inode(struct inode *i) static void hpfs_write_inode_ea(struct inode *i, struct fnode *fnode) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); - /*if (le32_to_cpu(fnode->acl_size_l) || le16_to_cpu(fnode->acl_size_s)) { + /*if (fnode->acl_size_l || fnode->acl_size_s) { Some unknown structures like ACL may be in fnode, we'd better not overwrite them hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 stuctures", i->i_ino); @@ -186,7 +187,9 @@ void hpfs_write_inode(struct inode *i) kfree(hpfs_inode->i_rddir_off); hpfs_inode->i_rddir_off = NULL; } + mutex_lock(&hpfs_inode->i_parent_mutex); if (!i->i_nlink) { + mutex_unlock(&hpfs_inode->i_parent_mutex); return; } parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); @@ -197,9 +200,14 @@ void hpfs_write_inode(struct inode *i) hpfs_read_inode(parent); unlock_new_inode(parent); } + mutex_lock(&hpfs_inode->i_mutex); hpfs_write_inode_nolock(i); + mutex_unlock(&hpfs_inode->i_mutex); iput(parent); + } else { + mark_inode_dirty(i); } + mutex_unlock(&hpfs_inode->i_parent_mutex); } void hpfs_write_inode_nolock(struct inode *i) @@ -218,30 +226,30 @@ void hpfs_write_inode_nolock(struct inode *i) } } else de = NULL; if (S_ISREG(i->i_mode)) { - fnode->file_size = cpu_to_le32(i->i_size); - if (de) de->file_size = cpu_to_le32(i->i_size); + fnode->file_size = i->i_size; + if (de) de->file_size = i->i_size; } else if (S_ISDIR(i->i_mode)) { - fnode->file_size = cpu_to_le32(0); - if (de) de->file_size = cpu_to_le32(0); + fnode->file_size = 0; + if (de) de->file_size = 0; } hpfs_write_inode_ea(i, fnode); if (de) { - de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec)); - de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec)); - de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec)); + de->write_date = gmt_to_local(i->i_sb, i->i_mtime.tv_sec); + de->read_date = gmt_to_local(i->i_sb, i->i_atime.tv_sec); + de->creation_date = gmt_to_local(i->i_sb, i->i_ctime.tv_sec); de->read_only = !(i->i_mode & 0222); - de->ea_size = cpu_to_le32(hpfs_inode->i_ea_size); + de->ea_size = hpfs_inode->i_ea_size; hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); } if (S_ISDIR(i->i_mode)) { if ((de = map_dirent(i, hpfs_inode->i_dno, "\001\001", 2, NULL, &qbh))) { - de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec)); - de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec)); - de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec)); + de->write_date = gmt_to_local(i->i_sb, i->i_mtime.tv_sec); + de->read_date = gmt_to_local(i->i_sb, i->i_atime.tv_sec); + de->creation_date = gmt_to_local(i->i_sb, i->i_ctime.tv_sec); de->read_only = !(i->i_mode & 0222); - de->ea_size = cpu_to_le32(/*hpfs_inode->i_ea_size*/0); - de->file_size = cpu_to_le32(0); + de->ea_size = /*hpfs_inode->i_ea_size*/0; + de->file_size = 0; hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); } else @@ -261,10 +269,6 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr) hpfs_lock(inode->i_sb); if (inode->i_ino == hpfs_sb(inode->i_sb)->sb_root) goto out_unlock; - if ((attr->ia_valid & ATTR_UID) && attr->ia_uid >= 0x10000) - goto out_unlock; - if ((attr->ia_valid & ATTR_GID) && attr->ia_gid >= 0x10000) - goto out_unlock; if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size) goto out_unlock; @@ -280,6 +284,7 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr) } setattr_copy(inode, attr); + mark_inode_dirty(inode); hpfs_write_inode(inode); diff --git a/trunk/fs/hpfs/map.c b/trunk/fs/hpfs/map.c index a790821366a7..840d033ecee8 100644 --- a/trunk/fs/hpfs/map.c +++ b/trunk/fs/hpfs/map.c @@ -21,7 +21,7 @@ unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block, hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id); return NULL; } - sec = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]); + sec = hpfs_sb(s)->sb_bmp_dir[bmp_block]; if (!sec || sec > hpfs_sb(s)->sb_fs_size-4) { hpfs_error(s, "invalid bitmap block pointer %08x -> %08x at %s", bmp_block, sec, id); return NULL; @@ -46,18 +46,18 @@ unsigned char *hpfs_load_code_page(struct super_block *s, secno cps) struct code_page_data *cpd; struct code_page_directory *cp = hpfs_map_sector(s, cps, &bh, 0); if (!cp) return NULL; - if (le32_to_cpu(cp->magic) != CP_DIR_MAGIC) { - printk("HPFS: Code page directory magic doesn't match (magic = %08x)\n", le32_to_cpu(cp->magic)); + if (cp->magic != CP_DIR_MAGIC) { + printk("HPFS: Code page directory magic doesn't match (magic = %08x)\n", cp->magic); brelse(bh); return NULL; } - if (!le32_to_cpu(cp->n_code_pages)) { + if (!cp->n_code_pages) { printk("HPFS: n_code_pages == 0\n"); brelse(bh); return NULL; } - cpds = le32_to_cpu(cp->array[0].code_page_data); - cpi = le16_to_cpu(cp->array[0].index); + cpds = cp->array[0].code_page_data; + cpi = cp->array[0].index; brelse(bh); if (cpi >= 3) { @@ -66,12 +66,12 @@ unsigned char *hpfs_load_code_page(struct super_block *s, secno cps) } if (!(cpd = hpfs_map_sector(s, cpds, &bh, 0))) return NULL; - if (le16_to_cpu(cpd->offs[cpi]) > 0x178) { + if ((unsigned)cpd->offs[cpi] > 0x178) { printk("HPFS: Code page index out of sector\n"); brelse(bh); return NULL; } - ptr = (unsigned char *)cpd + le16_to_cpu(cpd->offs[cpi]) + 6; + ptr = (unsigned char *)cpd + cpd->offs[cpi] + 6; if (!(cp_table = kmalloc(256, GFP_KERNEL))) { printk("HPFS: out of memory for code page table\n"); brelse(bh); @@ -125,7 +125,7 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea if (hpfs_sb(s)->sb_chk) { struct extended_attribute *ea; struct extended_attribute *ea_end; - if (le32_to_cpu(fnode->magic) != FNODE_MAGIC) { + if (fnode->magic != FNODE_MAGIC) { hpfs_error(s, "bad magic on fnode %08lx", (unsigned long)ino); goto bail; @@ -138,7 +138,7 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea (unsigned long)ino); goto bail; } - if (le16_to_cpu(fnode->btree.first_free) != + if (fnode->btree.first_free != 8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) { hpfs_error(s, "bad first_free pointer in fnode %08lx", @@ -146,12 +146,12 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea goto bail; } } - if (le16_to_cpu(fnode->ea_size_s) && (le16_to_cpu(fnode->ea_offs) < 0xc4 || - le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200)) { + if (fnode->ea_size_s && ((signed int)fnode->ea_offs < 0xc4 || + (signed int)fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200)) { hpfs_error(s, "bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x", (unsigned long)ino, - le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s)); + fnode->ea_offs, fnode->ea_size_s); goto bail; } ea = fnode_ea(fnode); @@ -178,20 +178,16 @@ struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buff if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ano, 1, "anode")) return NULL; if ((anode = hpfs_map_sector(s, ano, bhp, ANODE_RD_AHEAD))) if (hpfs_sb(s)->sb_chk) { - if (le32_to_cpu(anode->magic) != ANODE_MAGIC) { + if (anode->magic != ANODE_MAGIC || anode->self != ano) { hpfs_error(s, "bad magic on anode %08x", ano); goto bail; } - if (le32_to_cpu(anode->self) != ano) { - hpfs_error(s, "self pointer invalid on anode %08x", ano); - goto bail; - } if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes != (anode->btree.internal ? 60 : 40)) { hpfs_error(s, "bad number of nodes in anode %08x", ano); goto bail; } - if (le16_to_cpu(anode->btree.first_free) != + if (anode->btree.first_free != 8 + anode->btree.n_used_nodes * (anode->btree.internal ? 8 : 12)) { hpfs_error(s, "bad first_free pointer in anode %08x", ano); goto bail; @@ -223,26 +219,26 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno, unsigned p, pp = 0; unsigned char *d = (unsigned char *)dnode; int b = 0; - if (le32_to_cpu(dnode->magic) != DNODE_MAGIC) { + if (dnode->magic != DNODE_MAGIC) { hpfs_error(s, "bad magic on dnode %08x", secno); goto bail; } - if (le32_to_cpu(dnode->self) != secno) - hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, le32_to_cpu(dnode->self)); + if (dnode->self != secno) + hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, dnode->self); /* Check dirents - bad dirents would cause infinite loops or shooting to memory */ - if (le32_to_cpu(dnode->first_free) > 2048) { - hpfs_error(s, "dnode %08x has first_free == %08x", secno, le32_to_cpu(dnode->first_free)); + if (dnode->first_free > 2048/* || dnode->first_free < 84*/) { + hpfs_error(s, "dnode %08x has first_free == %08x", secno, dnode->first_free); goto bail; } - for (p = 20; p < le32_to_cpu(dnode->first_free); p += d[p] + (d[p+1] << 8)) { + for (p = 20; p < dnode->first_free; p += d[p] + (d[p+1] << 8)) { struct hpfs_dirent *de = (struct hpfs_dirent *)((char *)dnode + p); - if (le16_to_cpu(de->length) > 292 || (le16_to_cpu(de->length) < 32) || (le16_to_cpu(de->length) & 3) || p + le16_to_cpu(de->length) > 2048) { + if (de->length > 292 || (de->length < 32) || (de->length & 3) || p + de->length > 2048) { hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp); goto bail; } - if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) { - if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & MS_RDONLY) goto ok; + if (((31 + de->namelen + de->down*4 + 3) & ~3) != de->length) { + if (((31 + de->namelen + de->down*4 + 3) & ~3) < de->length && s->s_flags & MS_RDONLY) goto ok; hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp); goto bail; } @@ -255,7 +251,7 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno, pp = p; } - if (p != le32_to_cpu(dnode->first_free)) { + if (p != dnode->first_free) { hpfs_error(s, "size on last dirent does not match first_free; dnode %08x", secno); goto bail; } @@ -281,7 +277,7 @@ dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino) if (!fnode) return 0; - dno = le32_to_cpu(fnode->u.external[0].disk_secno); + dno = fnode->u.external[0].disk_secno; brelse(bh); return dno; } diff --git a/trunk/fs/hpfs/name.c b/trunk/fs/hpfs/name.c index 9acdf338def0..f24736d7a439 100644 --- a/trunk/fs/hpfs/name.c +++ b/trunk/fs/hpfs/name.c @@ -8,6 +8,39 @@ #include "hpfs_fn.h" +static const char *text_postfix[]={ +".ASM", ".BAS", ".BAT", ".C", ".CC", ".CFG", ".CMD", ".CON", ".CPP", ".DEF", +".DOC", ".DPR", ".ERX", ".H", ".HPP", ".HTM", ".HTML", ".JAVA", ".LOG", ".PAS", +".RC", ".TEX", ".TXT", ".Y", ""}; + +static const char *text_prefix[]={ +"AUTOEXEC.", "CHANGES", "COPYING", "CONFIG.", "CREDITS", "FAQ", "FILE_ID.DIZ", +"MAKEFILE", "READ.ME", "README", "TERMCAP", ""}; + +void hpfs_decide_conv(struct inode *inode, const unsigned char *name, unsigned len) +{ + struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); + int i; + if (hpfs_inode->i_conv != CONV_AUTO) return; + for (i = 0; *text_postfix[i]; i++) { + int l = strlen(text_postfix[i]); + if (l <= len) + if (!hpfs_compare_names(inode->i_sb, text_postfix[i], l, name + len - l, l, 0)) + goto text; + } + for (i = 0; *text_prefix[i]; i++) { + int l = strlen(text_prefix[i]); + if (l <= len) + if (!hpfs_compare_names(inode->i_sb, text_prefix[i], l, name, l, 0)) + goto text; + } + hpfs_inode->i_conv = CONV_BINARY; + return; + text: + hpfs_inode->i_conv = CONV_TEXT; + return; +} + static inline int not_allowed_char(unsigned char c) { return c<' ' || c=='"' || c=='*' || c=='/' || c==':' || c=='<' || diff --git a/trunk/fs/hpfs/namei.c b/trunk/fs/hpfs/namei.c index 1f05839c27a7..d5f8c8a19023 100644 --- a/trunk/fs/hpfs/namei.c +++ b/trunk/fs/hpfs/namei.c @@ -29,7 +29,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh); if (!fnode) goto bail; - dnode = hpfs_alloc_dnode(dir->i_sb, fno, &dno, &qbh0); + dnode = hpfs_alloc_dnode(dir->i_sb, fno, &dno, &qbh0, 1); if (!dnode) goto bail1; memset(&dee, 0, sizeof dee); @@ -37,8 +37,8 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) if (!(mode & 0222)) dee.read_only = 1; /*dee.archive = 0;*/ dee.hidden = name[0] == '.'; - dee.fnode = cpu_to_le32(fno); - dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); + dee.fnode = fno; + dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); result = new_inode(dir->i_sb); if (!result) goto bail2; @@ -46,7 +46,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) result->i_ino = fno; hpfs_i(result)->i_parent_dir = dir->i_ino; hpfs_i(result)->i_dno = dno; - result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)); + result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); result->i_ctime.tv_nsec = 0; result->i_mtime.tv_nsec = 0; result->i_atime.tv_nsec = 0; @@ -60,7 +60,8 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) if (dee.read_only) result->i_mode &= ~0222; - r = hpfs_add_dirent(dir, name, len, &dee); + mutex_lock(&hpfs_i(dir)->i_mutex); + r = hpfs_add_dirent(dir, name, len, &dee, 0); if (r == 1) goto bail3; if (r == -1) { @@ -69,21 +70,21 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) } fnode->len = len; memcpy(fnode->name, name, len > 15 ? 15 : len); - fnode->up = cpu_to_le32(dir->i_ino); + fnode->up = dir->i_ino; fnode->dirflag = 1; fnode->btree.n_free_nodes = 7; fnode->btree.n_used_nodes = 1; - fnode->btree.first_free = cpu_to_le16(0x14); - fnode->u.external[0].disk_secno = cpu_to_le32(dno); - fnode->u.external[0].file_secno = cpu_to_le32(-1); + fnode->btree.first_free = 0x14; + fnode->u.external[0].disk_secno = dno; + fnode->u.external[0].file_secno = -1; dnode->root_dnode = 1; - dnode->up = cpu_to_le32(fno); + dnode->up = fno; de = hpfs_add_de(dir->i_sb, dnode, "\001\001", 2, 0); - de->creation_date = de->write_date = de->read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); + de->creation_date = de->write_date = de->read_date = gmt_to_local(dir->i_sb, get_seconds()); if (!(mode & 0222)) de->read_only = 1; de->first = de->directory = 1; /*de->hidden = de->system = 0;*/ - de->fnode = cpu_to_le32(fno); + de->fnode = fno; mark_buffer_dirty(bh); brelse(bh); hpfs_mark_4buffers_dirty(&qbh0); @@ -100,9 +101,11 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) hpfs_write_inode_nolock(result); } d_instantiate(dentry, result); + mutex_unlock(&hpfs_i(dir)->i_mutex); hpfs_unlock(dir->i_sb); return 0; bail3: + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail2: hpfs_brelse4(&qbh0); @@ -137,8 +140,8 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc if (!(mode & 0222)) dee.read_only = 1; dee.archive = 1; dee.hidden = name[0] == '.'; - dee.fnode = cpu_to_le32(fno); - dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); + dee.fnode = fno; + dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); result = new_inode(dir->i_sb); if (!result) @@ -151,8 +154,9 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc result->i_op = &hpfs_file_iops; result->i_fop = &hpfs_file_ops; result->i_nlink = 1; + hpfs_decide_conv(result, name, len); hpfs_i(result)->i_parent_dir = dir->i_ino; - result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)); + result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); result->i_ctime.tv_nsec = 0; result->i_mtime.tv_nsec = 0; result->i_atime.tv_nsec = 0; @@ -164,7 +168,8 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc result->i_data.a_ops = &hpfs_aops; hpfs_i(result)->mmu_private = 0; - r = hpfs_add_dirent(dir, name, len, &dee); + mutex_lock(&hpfs_i(dir)->i_mutex); + r = hpfs_add_dirent(dir, name, len, &dee, 0); if (r == 1) goto bail2; if (r == -1) { @@ -173,7 +178,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc } fnode->len = len; memcpy(fnode->name, name, len > 15 ? 15 : len); - fnode->up = cpu_to_le32(dir->i_ino); + fnode->up = dir->i_ino; mark_buffer_dirty(bh); brelse(bh); @@ -188,10 +193,12 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc hpfs_write_inode_nolock(result); } d_instantiate(dentry, result); + mutex_unlock(&hpfs_i(dir)->i_mutex); hpfs_unlock(dir->i_sb); return 0; bail2: + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -225,8 +232,8 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t if (!(mode & 0222)) dee.read_only = 1; dee.archive = 1; dee.hidden = name[0] == '.'; - dee.fnode = cpu_to_le32(fno); - dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); + dee.fnode = fno; + dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); result = new_inode(dir->i_sb); if (!result) @@ -235,7 +242,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t hpfs_init_inode(result); result->i_ino = fno; hpfs_i(result)->i_parent_dir = dir->i_ino; - result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)); + result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); result->i_ctime.tv_nsec = 0; result->i_mtime.tv_nsec = 0; result->i_atime.tv_nsec = 0; @@ -247,7 +254,8 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t result->i_blocks = 1; init_special_inode(result, mode, rdev); - r = hpfs_add_dirent(dir, name, len, &dee); + mutex_lock(&hpfs_i(dir)->i_mutex); + r = hpfs_add_dirent(dir, name, len, &dee, 0); if (r == 1) goto bail2; if (r == -1) { @@ -256,17 +264,19 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t } fnode->len = len; memcpy(fnode->name, name, len > 15 ? 15 : len); - fnode->up = cpu_to_le32(dir->i_ino); + fnode->up = dir->i_ino; mark_buffer_dirty(bh); insert_inode_hash(result); hpfs_write_inode_nolock(result); d_instantiate(dentry, result); + mutex_unlock(&hpfs_i(dir)->i_mutex); brelse(bh); hpfs_unlock(dir->i_sb); return 0; bail2: + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -300,8 +310,8 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy memset(&dee, 0, sizeof dee); dee.archive = 1; dee.hidden = name[0] == '.'; - dee.fnode = cpu_to_le32(fno); - dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds())); + dee.fnode = fno; + dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds()); result = new_inode(dir->i_sb); if (!result) @@ -309,7 +319,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy result->i_ino = fno; hpfs_init_inode(result); hpfs_i(result)->i_parent_dir = dir->i_ino; - result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)); + result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); result->i_ctime.tv_nsec = 0; result->i_mtime.tv_nsec = 0; result->i_atime.tv_nsec = 0; @@ -323,7 +333,8 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy result->i_op = &page_symlink_inode_operations; result->i_data.a_ops = &hpfs_symlink_aops; - r = hpfs_add_dirent(dir, name, len, &dee); + mutex_lock(&hpfs_i(dir)->i_mutex); + r = hpfs_add_dirent(dir, name, len, &dee, 0); if (r == 1) goto bail2; if (r == -1) { @@ -332,7 +343,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy } fnode->len = len; memcpy(fnode->name, name, len > 15 ? 15 : len); - fnode->up = cpu_to_le32(dir->i_ino); + fnode->up = dir->i_ino; hpfs_set_ea(result, fnode, "SYMLINK", symlink, strlen(symlink)); mark_buffer_dirty(bh); brelse(bh); @@ -341,9 +352,11 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy hpfs_write_inode_nolock(result); d_instantiate(dentry, result); + mutex_unlock(&hpfs_i(dir)->i_mutex); hpfs_unlock(dir->i_sb); return 0; bail2: + mutex_unlock(&hpfs_i(dir)->i_mutex); iput(result); bail1: brelse(bh); @@ -361,6 +374,7 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) struct hpfs_dirent *de; struct inode *inode = dentry->d_inode; dnode_secno dno; + fnode_secno fno; int r; int rep = 0; int err; @@ -368,6 +382,8 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) hpfs_lock(dir->i_sb); hpfs_adjust_length(name, &len); again: + mutex_lock(&hpfs_i(inode)->i_parent_mutex); + mutex_lock(&hpfs_i(dir)->i_mutex); err = -ENOENT; de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); if (!de) @@ -381,6 +397,7 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) if (de->directory) goto out1; + fno = de->fnode; r = hpfs_remove_dirent(dir, dno, de, &qbh, 1); switch (r) { case 1: @@ -393,6 +410,8 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) if (rep++) break; + mutex_unlock(&hpfs_i(dir)->i_mutex); + mutex_unlock(&hpfs_i(inode)->i_parent_mutex); dentry_unhash(dentry); if (!d_unhashed(dentry)) { dput(dentry); @@ -426,6 +445,8 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) out1: hpfs_brelse4(&qbh); out: + mutex_unlock(&hpfs_i(dir)->i_mutex); + mutex_unlock(&hpfs_i(inode)->i_parent_mutex); hpfs_unlock(dir->i_sb); return err; } @@ -438,12 +459,15 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) struct hpfs_dirent *de; struct inode *inode = dentry->d_inode; dnode_secno dno; + fnode_secno fno; int n_items = 0; int err; int r; hpfs_adjust_length(name, &len); hpfs_lock(dir->i_sb); + mutex_lock(&hpfs_i(inode)->i_parent_mutex); + mutex_lock(&hpfs_i(dir)->i_mutex); err = -ENOENT; de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); if (!de) @@ -462,6 +486,7 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) if (n_items) goto out1; + fno = de->fnode; r = hpfs_remove_dirent(dir, dno, de, &qbh, 1); switch (r) { case 1: @@ -480,6 +505,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) out1: hpfs_brelse4(&qbh); out: + mutex_unlock(&hpfs_i(dir)->i_mutex); + mutex_unlock(&hpfs_i(inode)->i_parent_mutex); hpfs_unlock(dir->i_sb); return err; } @@ -541,6 +568,12 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, hpfs_lock(i->i_sb); /* order doesn't matter, due to VFS exclusion */ + mutex_lock(&hpfs_i(i)->i_parent_mutex); + if (new_inode) + mutex_lock(&hpfs_i(new_inode)->i_parent_mutex); + mutex_lock(&hpfs_i(old_dir)->i_mutex); + if (new_dir != old_dir) + mutex_lock(&hpfs_i(new_dir)->i_mutex); /* Erm? Moving over the empty non-busy directory is perfectly legal */ if (new_inode && S_ISDIR(new_inode->i_mode)) { @@ -577,7 +610,9 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_dir == old_dir) hpfs_brelse4(&qbh); - if ((r = hpfs_add_dirent(new_dir, new_name, new_len, &de))) { + hpfs_lock_creation(i->i_sb); + if ((r = hpfs_add_dirent(new_dir, new_name, new_len, &de, 1))) { + hpfs_unlock_creation(i->i_sb); if (r == -1) hpfs_error(new_dir->i_sb, "hpfs_rename: dirent already exists!"); err = r == 1 ? -ENOSPC : -EFSERROR; if (new_dir != old_dir) hpfs_brelse4(&qbh); @@ -586,17 +621,20 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_dir == old_dir) if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) { + hpfs_unlock_creation(i->i_sb); hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2"); err = -ENOENT; goto end1; } if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 0))) { + hpfs_unlock_creation(i->i_sb); hpfs_error(i->i_sb, "hpfs_rename: could not remove dirent"); err = r == 2 ? -ENOSPC : -EFSERROR; goto end1; } - + hpfs_unlock_creation(i->i_sb); + end: hpfs_i(i)->i_parent_dir = new_dir->i_ino; if (S_ISDIR(i->i_mode)) { @@ -604,14 +642,22 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, drop_nlink(old_dir); } if ((fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh))) { - fnode->up = cpu_to_le32(new_dir->i_ino); + fnode->up = new_dir->i_ino; fnode->len = new_len; memcpy(fnode->name, new_name, new_len>15?15:new_len); if (new_len < 15) memset(&fnode->name[new_len], 0, 15 - new_len); mark_buffer_dirty(bh); brelse(bh); } + hpfs_i(i)->i_conv = hpfs_sb(i->i_sb)->sb_conv; + hpfs_decide_conv(i, new_name, new_len); end1: + if (old_dir != new_dir) + mutex_unlock(&hpfs_i(new_dir)->i_mutex); + mutex_unlock(&hpfs_i(old_dir)->i_mutex); + mutex_unlock(&hpfs_i(i)->i_parent_mutex); + if (new_inode) + mutex_unlock(&hpfs_i(new_inode)->i_parent_mutex); hpfs_unlock(i->i_sb); return err; } diff --git a/trunk/fs/hpfs/super.c b/trunk/fs/hpfs/super.c index 98580a3b5005..c89b40808587 100644 --- a/trunk/fs/hpfs/super.c +++ b/trunk/fs/hpfs/super.c @@ -18,16 +18,15 @@ /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */ -static void mark_dirty(struct super_block *s, int remount) +static void mark_dirty(struct super_block *s) { - if (hpfs_sb(s)->sb_chkdsk && (remount || !(s->s_flags & MS_RDONLY))) { + if (hpfs_sb(s)->sb_chkdsk && !(s->s_flags & MS_RDONLY)) { struct buffer_head *bh; struct hpfs_spare_block *sb; if ((sb = hpfs_map_sector(s, 17, &bh, 0))) { sb->dirty = 1; sb->old_wrote = 0; mark_buffer_dirty(bh); - sync_dirty_buffer(bh); brelse(bh); } } @@ -41,12 +40,10 @@ static void unmark_dirty(struct super_block *s) struct buffer_head *bh; struct hpfs_spare_block *sb; if (s->s_flags & MS_RDONLY) return; - sync_blockdev(s->s_bdev); if ((sb = hpfs_map_sector(s, 17, &bh, 0))) { sb->dirty = hpfs_sb(s)->sb_chkdsk > 1 - hpfs_sb(s)->sb_was_error; sb->old_wrote = hpfs_sb(s)->sb_chkdsk >= 2 && !hpfs_sb(s)->sb_was_error; mark_buffer_dirty(bh); - sync_dirty_buffer(bh); brelse(bh); } } @@ -66,13 +63,13 @@ void hpfs_error(struct super_block *s, const char *fmt, ...) if (!hpfs_sb(s)->sb_was_error) { if (hpfs_sb(s)->sb_err == 2) { printk("; crashing the system because you wanted it\n"); - mark_dirty(s, 0); + mark_dirty(s); panic("HPFS panic"); } else if (hpfs_sb(s)->sb_err == 1) { if (s->s_flags & MS_RDONLY) printk("; already mounted read-only\n"); else { printk("; remounting read-only\n"); - mark_dirty(s, 0); + mark_dirty(s); s->s_flags |= MS_RDONLY; } } else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n"); @@ -105,12 +102,9 @@ static void hpfs_put_super(struct super_block *s) { struct hpfs_sb_info *sbi = hpfs_sb(s); - hpfs_lock(s); - unmark_dirty(s); - hpfs_unlock(s); - kfree(sbi->sb_cp_table); kfree(sbi->sb_bmp_dir); + unmark_dirty(s); s->s_fs_info = NULL; kfree(sbi); } @@ -135,7 +129,7 @@ static unsigned count_bitmaps(struct super_block *s) n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14; count = 0; for (n = 0; n < n_bands; n++) - count += hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n])); + count += hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_bmp_dir[n]); return count; } @@ -194,6 +188,8 @@ static void init_once(void *foo) { struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; + mutex_init(&ei->i_mutex); + mutex_init(&ei->i_parent_mutex); inode_init_once(&ei->vfs_inode); } @@ -222,6 +218,7 @@ static void destroy_inodecache(void) enum { Opt_help, Opt_uid, Opt_gid, Opt_umask, Opt_case_lower, Opt_case_asis, + Opt_conv_binary, Opt_conv_text, Opt_conv_auto, Opt_check_none, Opt_check_normal, Opt_check_strict, Opt_err_cont, Opt_err_ro, Opt_err_panic, Opt_eas_no, Opt_eas_ro, Opt_eas_rw, @@ -236,6 +233,9 @@ static const match_table_t tokens = { {Opt_umask, "umask=%o"}, {Opt_case_lower, "case=lower"}, {Opt_case_asis, "case=asis"}, + {Opt_conv_binary, "conv=binary"}, + {Opt_conv_text, "conv=text"}, + {Opt_conv_auto, "conv=auto"}, {Opt_check_none, "check=none"}, {Opt_check_normal, "check=normal"}, {Opt_check_strict, "check=strict"}, @@ -253,7 +253,7 @@ static const match_table_t tokens = { }; static int parse_opts(char *opts, uid_t *uid, gid_t *gid, umode_t *umask, - int *lowercase, int *eas, int *chk, int *errs, + int *lowercase, int *conv, int *eas, int *chk, int *errs, int *chkdsk, int *timeshift) { char *p; @@ -295,6 +295,15 @@ static int parse_opts(char *opts, uid_t *uid, gid_t *gid, umode_t *umask, case Opt_case_asis: *lowercase = 0; break; + case Opt_conv_binary: + *conv = CONV_BINARY; + break; + case Opt_conv_text: + *conv = CONV_TEXT; + break; + case Opt_conv_auto: + *conv = CONV_AUTO; + break; case Opt_check_none: *chk = 0; break; @@ -361,6 +370,9 @@ HPFS filesystem options:\n\ umask=xxx set mode of files that don't have mode specified in eas\n\ case=lower lowercase all files\n\ case=asis do not lowercase files (default)\n\ + conv=binary do not convert CR/LF -> LF (default)\n\ + conv=auto convert only files with known text extensions\n\ + conv=text convert all files\n\ check=none no fs checks - kernel may crash on corrupted filesystem\n\ check=normal do some checks - it should not crash (default)\n\ check=strict do extra time-consuming checks, used for debugging\n\ @@ -382,7 +394,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) uid_t uid; gid_t gid; umode_t umask; - int lowercase, eas, chk, errs, chkdsk, timeshift; + int lowercase, conv, eas, chk, errs, chkdsk, timeshift; int o; struct hpfs_sb_info *sbi = hpfs_sb(s); char *new_opts = kstrdup(data, GFP_KERNEL); @@ -393,11 +405,11 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) lock_super(s); uid = sbi->sb_uid; gid = sbi->sb_gid; umask = 0777 & ~sbi->sb_mode; - lowercase = sbi->sb_lowercase; + lowercase = sbi->sb_lowercase; conv = sbi->sb_conv; eas = sbi->sb_eas; chk = sbi->sb_chk; chkdsk = sbi->sb_chkdsk; errs = sbi->sb_err; timeshift = sbi->sb_timeshift; - if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase, + if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase, &conv, &eas, &chk, &errs, &chkdsk, ×hift))) { printk("HPFS: bad mount options.\n"); goto out_err; @@ -415,11 +427,11 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) sbi->sb_uid = uid; sbi->sb_gid = gid; sbi->sb_mode = 0777 & ~umask; - sbi->sb_lowercase = lowercase; + sbi->sb_lowercase = lowercase; sbi->sb_conv = conv; sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk; sbi->sb_err = errs; sbi->sb_timeshift = timeshift; - if (!(*flags & MS_RDONLY)) mark_dirty(s, 1); + if (!(*flags & MS_RDONLY)) mark_dirty(s); replace_mount_options(s, new_opts); @@ -459,7 +471,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) uid_t uid; gid_t gid; umode_t umask; - int lowercase, eas, chk, errs, chkdsk, timeshift; + int lowercase, conv, eas, chk, errs, chkdsk, timeshift; dnode_secno root_dno; struct hpfs_dirent *de = NULL; @@ -467,6 +479,11 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) int o; + if (num_possible_cpus() > 1) { + printk(KERN_ERR "HPFS is not SMP safe\n"); + return -EINVAL; + } + save_mount_options(s, options); sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); @@ -478,20 +495,20 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) sbi->sb_bmp_dir = NULL; sbi->sb_cp_table = NULL; - mutex_init(&sbi->hpfs_mutex); - hpfs_lock(s); + mutex_init(&sbi->hpfs_creation_de); uid = current_uid(); gid = current_gid(); umask = current_umask(); lowercase = 0; + conv = CONV_BINARY; eas = 2; chk = 1; errs = 1; chkdsk = 1; timeshift = 0; - if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase, + if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase, &conv, &eas, &chk, &errs, &chkdsk, ×hift))) { printk("HPFS: bad mount options.\n"); goto bail0; @@ -509,9 +526,9 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) if (!(spareblock = hpfs_map_sector(s, 17, &bh2, 0))) goto bail3; /* Check magics */ - if (/*le16_to_cpu(bootblock->magic) != BB_MAGIC - ||*/ le32_to_cpu(superblock->magic) != SB_MAGIC - || le32_to_cpu(spareblock->magic) != SP_MAGIC) { + if (/*bootblock->magic != BB_MAGIC + ||*/ superblock->magic != SB_MAGIC + || spareblock->magic != SP_MAGIC) { if (!silent) printk("HPFS: Bad magic ... probably not HPFS\n"); goto bail4; } @@ -532,18 +549,19 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) s->s_op = &hpfs_sops; s->s_d_op = &hpfs_dentry_operations; - sbi->sb_root = le32_to_cpu(superblock->root); - sbi->sb_fs_size = le32_to_cpu(superblock->n_sectors); - sbi->sb_bitmaps = le32_to_cpu(superblock->bitmaps); - sbi->sb_dirband_start = le32_to_cpu(superblock->dir_band_start); - sbi->sb_dirband_size = le32_to_cpu(superblock->n_dir_band); - sbi->sb_dmap = le32_to_cpu(superblock->dir_band_bitmap); + sbi->sb_root = superblock->root; + sbi->sb_fs_size = superblock->n_sectors; + sbi->sb_bitmaps = superblock->bitmaps; + sbi->sb_dirband_start = superblock->dir_band_start; + sbi->sb_dirband_size = superblock->n_dir_band; + sbi->sb_dmap = superblock->dir_band_bitmap; sbi->sb_uid = uid; sbi->sb_gid = gid; sbi->sb_mode = 0777 & ~umask; sbi->sb_n_free = -1; sbi->sb_n_free_dnodes = -1; sbi->sb_lowercase = lowercase; + sbi->sb_conv = conv; sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk; @@ -555,7 +573,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) sbi->sb_max_fwd_alloc = 0xffffff; /* Load bitmap directory */ - if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps)))) + if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, superblock->bitmaps))) goto bail4; /* Check for general fs errors*/ @@ -573,20 +591,20 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) mark_buffer_dirty(bh2); } - if (le32_to_cpu(spareblock->hotfixes_used) || le32_to_cpu(spareblock->n_spares_used)) { + if (spareblock->hotfixes_used || spareblock->n_spares_used) { if (errs >= 2) { printk("HPFS: Hotfixes not supported here, try chkdsk\n"); - mark_dirty(s, 0); + mark_dirty(s); goto bail4; } hpfs_error(s, "hotfixes not supported here, try chkdsk"); if (errs == 0) printk("HPFS: Proceeding, but your filesystem will be probably corrupted by this driver...\n"); else printk("HPFS: This driver may read bad files or crash when operating on disk with hotfixes.\n"); } - if (le32_to_cpu(spareblock->n_dnode_spares) != le32_to_cpu(spareblock->n_dnode_spares_free)) { + if (spareblock->n_dnode_spares != spareblock->n_dnode_spares_free) { if (errs >= 2) { printk("HPFS: Spare dnodes used, try chkdsk\n"); - mark_dirty(s, 0); + mark_dirty(s); goto bail4; } hpfs_error(s, "warning: spare dnodes used, try chkdsk"); @@ -594,26 +612,26 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) } if (chk) { unsigned a; - if (le32_to_cpu(superblock->dir_band_end) - le32_to_cpu(superblock->dir_band_start) + 1 != le32_to_cpu(superblock->n_dir_band) || - le32_to_cpu(superblock->dir_band_end) < le32_to_cpu(superblock->dir_band_start) || le32_to_cpu(superblock->n_dir_band) > 0x4000) { + if (superblock->dir_band_end - superblock->dir_band_start + 1 != superblock->n_dir_band || + superblock->dir_band_end < superblock->dir_band_start || superblock->n_dir_band > 0x4000) { hpfs_error(s, "dir band size mismatch: dir_band_start==%08x, dir_band_end==%08x, n_dir_band==%08x", - le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->dir_band_end), le32_to_cpu(superblock->n_dir_band)); + superblock->dir_band_start, superblock->dir_band_end, superblock->n_dir_band); goto bail4; } a = sbi->sb_dirband_size; sbi->sb_dirband_size = 0; - if (hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->n_dir_band), "dir_band") || - hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_bitmap), 4, "dir_band_bitmap") || - hpfs_chk_sectors(s, le32_to_cpu(superblock->bitmaps), 4, "bitmaps")) { - mark_dirty(s, 0); + if (hpfs_chk_sectors(s, superblock->dir_band_start, superblock->n_dir_band, "dir_band") || + hpfs_chk_sectors(s, superblock->dir_band_bitmap, 4, "dir_band_bitmap") || + hpfs_chk_sectors(s, superblock->bitmaps, 4, "bitmaps")) { + mark_dirty(s); goto bail4; } sbi->sb_dirband_size = a; } else printk("HPFS: You really don't want any checks? You are crazy...\n"); /* Load code page table */ - if (le32_to_cpu(spareblock->n_code_pages)) - if (!(sbi->sb_cp_table = hpfs_load_code_page(s, le32_to_cpu(spareblock->code_page_dir)))) + if (spareblock->n_code_pages) + if (!(sbi->sb_cp_table = hpfs_load_code_page(s, spareblock->code_page_dir))) printk("HPFS: Warning: code page support is disabled\n"); brelse(bh2); @@ -642,13 +660,13 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) if (!de) hpfs_error(s, "unable to find root dir"); else { - root->i_atime.tv_sec = local_to_gmt(s, le32_to_cpu(de->read_date)); + root->i_atime.tv_sec = local_to_gmt(s, de->read_date); root->i_atime.tv_nsec = 0; - root->i_mtime.tv_sec = local_to_gmt(s, le32_to_cpu(de->write_date)); + root->i_mtime.tv_sec = local_to_gmt(s, de->write_date); root->i_mtime.tv_nsec = 0; - root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date)); + root->i_ctime.tv_sec = local_to_gmt(s, de->creation_date); root->i_ctime.tv_nsec = 0; - hpfs_i(root)->i_ea_size = le16_to_cpu(de->ea_size); + hpfs_i(root)->i_ea_size = de->ea_size; hpfs_i(root)->i_parent_dir = root->i_ino; if (root->i_size == -1) root->i_size = 2048; @@ -656,7 +674,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) root->i_blocks = 5; hpfs_brelse4(&qbh); } - hpfs_unlock(s); return 0; bail4: brelse(bh2); @@ -664,7 +681,6 @@ bail3: brelse(bh1); bail2: brelse(bh0); bail1: bail0: - hpfs_unlock(s); kfree(sbi->sb_bmp_dir); kfree(sbi->sb_cp_table); s->s_fs_info = NULL; diff --git a/trunk/fs/logfs/super.c b/trunk/fs/logfs/super.c index ce03a182c771..33435e4b14d2 100644 --- a/trunk/fs/logfs/super.c +++ b/trunk/fs/logfs/super.c @@ -480,6 +480,10 @@ static int logfs_read_sb(struct super_block *sb, int read_only) !read_only) return -EIO; + mutex_init(&super->s_dirop_mutex); + mutex_init(&super->s_object_alias_mutex); + INIT_LIST_HEAD(&super->s_freeing_list); + ret = logfs_init_rw(sb); if (ret) return ret; @@ -597,10 +601,6 @@ static struct dentry *logfs_mount(struct file_system_type *type, int flags, if (!super) return ERR_PTR(-ENOMEM); - mutex_init(&super->s_dirop_mutex); - mutex_init(&super->s_object_alias_mutex); - INIT_LIST_HEAD(&super->s_freeing_list); - if (!devname) err = logfs_get_sb_bdev(super, type, devname); else if (strncmp(devname, "mtd", 3)) diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index e3c4f112ebf7..54fc993e3027 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -179,7 +179,7 @@ EXPORT_SYMBOL(putname); static int acl_permission_check(struct inode *inode, int mask, unsigned int flags, int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) { - unsigned int mode = inode->i_mode; + umode_t mode = inode->i_mode; mask &= MAY_READ | MAY_WRITE | MAY_EXEC; diff --git a/trunk/fs/nfs/namespace.c b/trunk/fs/nfs/namespace.c index 1f063bacd285..89fc160fd5b0 100644 --- a/trunk/fs/nfs/namespace.c +++ b/trunk/fs/nfs/namespace.c @@ -119,7 +119,7 @@ char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen) } #ifdef CONFIG_NFS_V4 -static rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors) +static rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors, struct inode *inode) { struct gss_api_mech *mech; struct xdr_netobj oid; @@ -166,7 +166,7 @@ static int nfs_negotiate_security(const struct dentry *parent, } flavors = page_address(page); ret = secinfo(parent->d_inode, &dentry->d_name, flavors); - *flavor = nfs_find_best_sec(flavors); + *flavor = nfs_find_best_sec(flavors, dentry->d_inode); put_page(page); } diff --git a/trunk/fs/nfs/nfs4_fs.h b/trunk/fs/nfs/nfs4_fs.h index c4a69833dd0d..e1c261ddd65d 100644 --- a/trunk/fs/nfs/nfs4_fs.h +++ b/trunk/fs/nfs/nfs4_fs.h @@ -47,7 +47,6 @@ enum nfs4_client_state { NFS4CLNT_LAYOUTRECALL, NFS4CLNT_SESSION_RESET, NFS4CLNT_RECALL_SLOT, - NFS4CLNT_LEASE_CONFIRM, }; enum nfs4_session_state { diff --git a/trunk/fs/nfs/nfs4filelayout.c b/trunk/fs/nfs/nfs4filelayout.c index be79dc9f386d..6f8192f4cfc7 100644 --- a/trunk/fs/nfs/nfs4filelayout.c +++ b/trunk/fs/nfs/nfs4filelayout.c @@ -117,8 +117,6 @@ static int filelayout_async_handle_error(struct rpc_task *task, case -EKEYEXPIRED: rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX); break; - case -NFS4ERR_RETRY_UNCACHED_REP: - break; default: dprintk("%s DS error. Retry through MDS %d\n", __func__, task->tk_status); @@ -418,8 +416,7 @@ static int filelayout_check_layout(struct pnfs_layout_hdr *lo, struct nfs4_filelayout_segment *fl, struct nfs4_layoutget_res *lgr, - struct nfs4_deviceid *id, - gfp_t gfp_flags) + struct nfs4_deviceid *id) { struct nfs4_file_layout_dsaddr *dsaddr; int status = -EINVAL; @@ -442,7 +439,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo, /* find and reference the deviceid */ dsaddr = nfs4_fl_find_get_deviceid(id); if (dsaddr == NULL) { - dsaddr = get_device_info(lo->plh_inode, id, gfp_flags); + dsaddr = get_device_info(lo->plh_inode, id); if (dsaddr == NULL) goto out; } @@ -503,8 +500,7 @@ static int filelayout_decode_layout(struct pnfs_layout_hdr *flo, struct nfs4_filelayout_segment *fl, struct nfs4_layoutget_res *lgr, - struct nfs4_deviceid *id, - gfp_t gfp_flags) + struct nfs4_deviceid *id) { struct xdr_stream stream; struct xdr_buf buf = { @@ -520,7 +516,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, dprintk("%s: set_layout_map Begin\n", __func__); - scratch = alloc_page(gfp_flags); + scratch = alloc_page(GFP_KERNEL); if (!scratch) return -ENOMEM; @@ -558,13 +554,13 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, goto out_err; fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), - gfp_flags); + GFP_KERNEL); if (!fl->fh_array) goto out_err; for (i = 0; i < fl->num_fh; i++) { /* Do we want to use a mempool here? */ - fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags); + fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL); if (!fl->fh_array[i]) goto out_err_free; @@ -609,20 +605,19 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg) static struct pnfs_layout_segment * filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, - struct nfs4_layoutget_res *lgr, - gfp_t gfp_flags) + struct nfs4_layoutget_res *lgr) { struct nfs4_filelayout_segment *fl; int rc; struct nfs4_deviceid id; dprintk("--> %s\n", __func__); - fl = kzalloc(sizeof(*fl), gfp_flags); + fl = kzalloc(sizeof(*fl), GFP_KERNEL); if (!fl) return NULL; - rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags); - if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) { + rc = filelayout_decode_layout(layoutid, fl, lgr, &id); + if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id)) { _filelayout_free_lseg(fl); return NULL; } @@ -638,7 +633,7 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, int size = (fl->stripe_type == STRIPE_SPARSE) ? fl->dsaddr->ds_num : fl->dsaddr->stripe_count; - fl->commit_buckets = kcalloc(size, sizeof(struct list_head), gfp_flags); + fl->commit_buckets = kcalloc(size, sizeof(struct list_head), GFP_KERNEL); if (!fl->commit_buckets) { filelayout_free_lseg(&fl->generic_hdr); return NULL; diff --git a/trunk/fs/nfs/nfs4filelayout.h b/trunk/fs/nfs/nfs4filelayout.h index 2b461d77b43a..7c44579f5832 100644 --- a/trunk/fs/nfs/nfs4filelayout.h +++ b/trunk/fs/nfs/nfs4filelayout.h @@ -104,6 +104,6 @@ extern struct nfs4_file_layout_dsaddr * nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id); extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr); struct nfs4_file_layout_dsaddr * -get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags); +get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id); #endif /* FS_NFS_NFS4FILELAYOUT_H */ diff --git a/trunk/fs/nfs/nfs4filelayoutdev.c b/trunk/fs/nfs/nfs4filelayoutdev.c index db07c7af1395..de5350f2b249 100644 --- a/trunk/fs/nfs/nfs4filelayoutdev.c +++ b/trunk/fs/nfs/nfs4filelayoutdev.c @@ -225,11 +225,11 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) } static struct nfs4_pnfs_ds * -nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port, gfp_t gfp_flags) +nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port) { struct nfs4_pnfs_ds *tmp_ds, *ds; - ds = kzalloc(sizeof(*tmp_ds), gfp_flags); + ds = kzalloc(sizeof(*tmp_ds), GFP_KERNEL); if (!ds) goto out; @@ -261,7 +261,7 @@ nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port, gfp_t gfp_flags) * Currently only support ipv4, and one multi-path address. */ static struct nfs4_pnfs_ds * -decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_flags) +decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode) { struct nfs4_pnfs_ds *ds = NULL; char *buf; @@ -303,7 +303,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_fla rlen); goto out_err; } - buf = kmalloc(rlen + 1, gfp_flags); + buf = kmalloc(rlen + 1, GFP_KERNEL); if (!buf) { dprintk("%s: Not enough memory\n", __func__); goto out_err; @@ -333,7 +333,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_fla sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]); port = htons((tmp[0] << 8) | (tmp[1])); - ds = nfs4_pnfs_ds_add(inode, ip_addr, port, gfp_flags); + ds = nfs4_pnfs_ds_add(inode, ip_addr, port); dprintk("%s: Decoded address and port %s\n", __func__, buf); out_free: kfree(buf); @@ -343,7 +343,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_fla /* Decode opaque device data and return the result */ static struct nfs4_file_layout_dsaddr* -decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) +decode_device(struct inode *ino, struct pnfs_device *pdev) { int i; u32 cnt, num; @@ -362,7 +362,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) struct page *scratch; /* set up xdr stream */ - scratch = alloc_page(gfp_flags); + scratch = alloc_page(GFP_KERNEL); if (!scratch) goto out_err; @@ -384,7 +384,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) } /* read stripe indices */ - stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags); + stripe_indices = kcalloc(cnt, sizeof(u8), GFP_KERNEL); if (!stripe_indices) goto out_err_free_scratch; @@ -423,7 +423,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) dsaddr = kzalloc(sizeof(*dsaddr) + (sizeof(struct nfs4_pnfs_ds *) * (num - 1)), - gfp_flags); + GFP_KERNEL); if (!dsaddr) goto out_err_free_stripe_indices; @@ -452,7 +452,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) for (j = 0; j < mp_count; j++) { if (j == 0) { dsaddr->ds_list[i] = decode_and_add_ds(&stream, - ino, gfp_flags); + ino); if (dsaddr->ds_list[i] == NULL) goto out_err_free_deviceid; } else { @@ -503,12 +503,12 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) * available devices. */ static struct nfs4_file_layout_dsaddr * -decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags) +decode_and_add_device(struct inode *inode, struct pnfs_device *dev) { struct nfs4_file_layout_dsaddr *d, *new; long hash; - new = decode_device(inode, dev, gfp_flags); + new = decode_device(inode, dev); if (!new) { printk(KERN_WARNING "%s: Could not decode or add device\n", __func__); @@ -537,7 +537,7 @@ decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_fl * of available devices, and return it. */ struct nfs4_file_layout_dsaddr * -get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags) +get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id) { struct pnfs_device *pdev = NULL; u32 max_resp_sz; @@ -556,17 +556,17 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_fla dprintk("%s inode %p max_resp_sz %u max_pages %d\n", __func__, inode, max_resp_sz, max_pages); - pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags); + pdev = kzalloc(sizeof(struct pnfs_device), GFP_KERNEL); if (pdev == NULL) return NULL; - pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); + pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); if (pages == NULL) { kfree(pdev); return NULL; } for (i = 0; i < max_pages; i++) { - pages[i] = alloc_page(gfp_flags); + pages[i] = alloc_page(GFP_KERNEL); if (!pages[i]) goto out_free; } @@ -587,7 +587,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_fla * Found new device, need to decode it and then add it to the * list of known devices for this mountpoint. */ - dsaddr = decode_and_add_device(inode, pdev, gfp_flags); + dsaddr = decode_and_add_device(inode, pdev); out_free: for (i = 0; i < max_pages; i++) __free_page(pages[i]); diff --git a/trunk/fs/nfs/nfs4proc.c b/trunk/fs/nfs/nfs4proc.c index cf1b339c3937..9bf41eab3e46 100644 --- a/trunk/fs/nfs/nfs4proc.c +++ b/trunk/fs/nfs/nfs4proc.c @@ -46,7 +46,6 @@ #include #include #include -#include #include #include #include @@ -300,7 +299,6 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc ret = nfs4_delay(server->client, &exception->timeout); if (ret != 0) break; - case -NFS4ERR_RETRY_UNCACHED_REP: case -NFS4ERR_OLD_STATEID: exception->retry = 1; break; @@ -445,8 +443,8 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * if (res->sr_status == 1) res->sr_status = NFS_OK; - /* don't increment the sequence number if the task wasn't sent */ - if (!RPC_WAS_SENT(task)) + /* -ERESTARTSYS can result in skipping nfs41_sequence_setup */ + if (!res->sr_slot) goto out; /* Check the SEQUENCE operation status */ @@ -2187,14 +2185,9 @@ static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs4_exception exception = { }; int err; do { - err = _nfs4_lookup_root(server, fhandle, info); - switch (err) { - case 0: - case -NFS4ERR_WRONGSEC: - break; - default: - err = nfs4_handle_exception(server, err, &exception); - } + err = nfs4_handle_exception(server, + _nfs4_lookup_root(server, fhandle, info), + &exception); } while (exception.retry); return err; } @@ -2215,47 +2208,25 @@ static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandl return ret; } -static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, +/* + * get the file handle for the "/" directory on the server + */ +static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { int i, len, status = 0; - rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS]; + rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS + 2]; - len = gss_mech_list_pseudoflavors(&flav_array[0]); - flav_array[len] = RPC_AUTH_NULL; - len += 1; + flav_array[0] = RPC_AUTH_UNIX; + len = gss_mech_list_pseudoflavors(&flav_array[1]); + flav_array[1+len] = RPC_AUTH_NULL; + len += 2; for (i = 0; i < len; i++) { status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); - if (status == -NFS4ERR_WRONGSEC || status == -EACCES) - continue; - break; + if (status != -EPERM) + break; } - /* - * -EACCESS could mean that the user doesn't have correct permissions - * to access the mount. It could also mean that we tried to mount - * with a gss auth flavor, but rpc.gssd isn't running. Either way, - * existing mount programs don't handle -EACCES very well so it should - * be mapped to -EPERM instead. - */ - if (status == -EACCES) - status = -EPERM; - return status; -} - -/* - * get the file handle for the "/" directory on the server - */ -static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, - struct nfs_fsinfo *info) -{ - int status = nfs4_lookup_root(server, fhandle, info); - if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR)) - /* - * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM - * by nfs4_map_errors() as this function exits. - */ - status = nfs4_find_root_sec(server, fhandle, info); if (status == 0) status = nfs4_server_capabilities(server, fhandle); if (status == 0) @@ -3696,7 +3667,6 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, rpc_delay(task, NFS4_POLL_RETRY_MAX); task->tk_status = 0; return -EAGAIN; - case -NFS4ERR_RETRY_UNCACHED_REP: case -NFS4ERR_OLD_STATEID: task->tk_status = 0; return -EAGAIN; @@ -3753,20 +3723,21 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, sizeof(setclientid.sc_uaddr), "%s.%u.%u", clp->cl_ipaddr, port >> 8, port & 255); - status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); + status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); if (status != -NFS4ERR_CLID_INUSE) break; - if (loop != 0) { - ++clp->cl_id_uniquifier; + if (signalled()) break; - } - ++loop; - ssleep(clp->cl_lease_time / HZ + 1); + if (loop++ & 1) + ssleep(clp->cl_lease_time / HZ + 1); + else + if (++clp->cl_id_uniquifier == 0) + break; } return status; } -int nfs4_proc_setclientid_confirm(struct nfs_client *clp, +static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct nfs4_setclientid_res *arg, struct rpc_cred *cred) { @@ -3781,7 +3752,7 @@ int nfs4_proc_setclientid_confirm(struct nfs_client *clp, int status; now = jiffies; - status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); + status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); if (status == 0) { spin_lock(&clp->cl_lock); clp->cl_lease_time = fsinfo.lease_time * HZ; @@ -3791,6 +3762,26 @@ int nfs4_proc_setclientid_confirm(struct nfs_client *clp, return status; } +int nfs4_proc_setclientid_confirm(struct nfs_client *clp, + struct nfs4_setclientid_res *arg, + struct rpc_cred *cred) +{ + long timeout = 0; + int err; + do { + err = _nfs4_proc_setclientid_confirm(clp, arg, cred); + switch (err) { + case 0: + return err; + case -NFS4ERR_RESOURCE: + /* The IBM lawyers misread another document! */ + case -NFS4ERR_DELAY: + err = nfs4_delay(clp->cl_rpcclient, &timeout); + } + } while (err == 0); + return err; +} + struct nfs4_delegreturndata { struct nfs4_delegreturnargs args; struct nfs4_delegreturnres res; @@ -4795,7 +4786,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) init_utsname()->domainname, clp->cl_rpcclient->cl_auth->au_flavor); - status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); + status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); if (!status) status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags); dprintk("<-- %s status= %d\n", __func__, status); @@ -4846,8 +4837,6 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); rpc_delay(task, NFS4_POLL_RETRY_MIN); task->tk_status = 0; - /* fall through */ - case -NFS4ERR_RETRY_UNCACHED_REP: nfs_restart_rpc(task, data->clp); return; } @@ -4880,8 +4869,7 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) .rpc_client = clp->cl_rpcclient, .rpc_message = &msg, .callback_ops = &nfs4_get_lease_time_ops, - .callback_data = &data, - .flags = RPC_TASK_TIMEOUT, + .callback_data = &data }; int status; @@ -5183,7 +5171,7 @@ static int _nfs4_proc_create_session(struct nfs_client *clp) nfs4_init_channel_attrs(&args); args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); - status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); + status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0); if (!status) /* Verify the session's negotiated channel_attrs values */ @@ -5206,10 +5194,20 @@ int nfs4_proc_create_session(struct nfs_client *clp) int status; unsigned *ptr; struct nfs4_session *session = clp->cl_session; + long timeout = 0; + int err; dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); - status = _nfs4_proc_create_session(clp); + do { + status = _nfs4_proc_create_session(clp); + if (status == -NFS4ERR_DELAY) { + err = nfs4_delay(clp->cl_rpcclient, &timeout); + if (err) + status = err; + } + } while (status == -NFS4ERR_DELAY); + if (status) goto out; @@ -5250,7 +5248,7 @@ int nfs4_proc_destroy_session(struct nfs4_session *session) msg.rpc_argp = session; msg.rpc_resp = NULL; msg.rpc_cred = NULL; - status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); + status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0); if (status) printk(KERN_WARNING @@ -5483,8 +5481,6 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf break; case -NFS4ERR_DELAY: rpc_delay(task, NFS4_POLL_RETRY_MAX); - /* fall through */ - case -NFS4ERR_RETRY_UNCACHED_REP: return -EAGAIN; default: nfs4_schedule_lease_recovery(clp); diff --git a/trunk/fs/nfs/nfs4state.c b/trunk/fs/nfs/nfs4state.c index 036f5adc9e1f..a6804f704d9d 100644 --- a/trunk/fs/nfs/nfs4state.c +++ b/trunk/fs/nfs/nfs4state.c @@ -64,15 +64,10 @@ static LIST_HEAD(nfs4_clientid_list); int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) { - struct nfs4_setclientid_res clid = { - .clientid = clp->cl_clientid, - .confirm = clp->cl_confirm, - }; + struct nfs4_setclientid_res clid; unsigned short port; int status; - if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state)) - goto do_confirm; port = nfs_callback_tcpport; if (clp->cl_addr.ss_family == AF_INET6) port = nfs_callback_tcpport6; @@ -80,14 +75,10 @@ int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid); if (status != 0) goto out; - clp->cl_clientid = clid.clientid; - clp->cl_confirm = clid.confirm; - set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); -do_confirm: status = nfs4_proc_setclientid_confirm(clp, &clid, cred); if (status != 0) goto out; - clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); + clp->cl_clientid = clid.clientid; nfs4_schedule_state_renewal(clp); out: return status; @@ -239,18 +230,13 @@ int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) { int status; - if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state)) - goto do_confirm; nfs4_begin_drain_session(clp); status = nfs4_proc_exchange_id(clp, cred); if (status != 0) goto out; - set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); -do_confirm: status = nfs4_proc_create_session(clp); if (status != 0) goto out; - clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); nfs41_setup_state_renewal(clp); nfs_mark_client_ready(clp, NFS_CS_READY); out: @@ -1598,23 +1584,20 @@ static int nfs4_recall_slot(struct nfs_client *clp) { return 0; } */ static void nfs4_set_lease_expired(struct nfs_client *clp, int status) { - switch (status) { - case -NFS4ERR_CLID_INUSE: - case -NFS4ERR_STALE_CLIENTID: - clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); - break; - case -NFS4ERR_DELAY: - case -ETIMEDOUT: - case -EAGAIN: - ssleep(1); - break; + if (nfs4_has_session(clp)) { + switch (status) { + case -NFS4ERR_DELAY: + case -NFS4ERR_CLID_INUSE: + case -EAGAIN: + break; - case -EKEYEXPIRED: - nfs4_warn_keyexpired(clp->cl_hostname); - case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery - * in nfs4_exchange_id */ - default: - return; + case -EKEYEXPIRED: + nfs4_warn_keyexpired(clp->cl_hostname); + case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery + * in nfs4_exchange_id */ + default: + return; + } } set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); } @@ -1624,7 +1607,7 @@ static void nfs4_state_manager(struct nfs_client *clp) int status = 0; /* Ensure exclusive access to NFSv4 state */ - do { + for(;;) { if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) { /* We're going to have to re-establish a clientid */ status = nfs4_reclaim_lease(clp); @@ -1708,7 +1691,7 @@ static void nfs4_state_manager(struct nfs_client *clp) break; if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) break; - } while (atomic_read(&clp->cl_count) > 1); + } return; out_error: printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s" diff --git a/trunk/fs/nfs/nfs4xdr.c b/trunk/fs/nfs/nfs4xdr.c index c3ccd2c46834..dddfb5795d7b 100644 --- a/trunk/fs/nfs/nfs4xdr.c +++ b/trunk/fs/nfs/nfs4xdr.c @@ -1452,25 +1452,26 @@ static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr) { - uint32_t attrs[2] = { - FATTR4_WORD0_RDATTR_ERROR, - FATTR4_WORD1_MOUNTED_ON_FILEID, - }; + uint32_t attrs[2] = {0, 0}; uint32_t dircount = readdir->count >> 1; __be32 *p; if (readdir->plus) { attrs[0] |= FATTR4_WORD0_TYPE|FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE| - FATTR4_WORD0_FSID|FATTR4_WORD0_FILEHANDLE|FATTR4_WORD0_FILEID; + FATTR4_WORD0_FSID|FATTR4_WORD0_FILEHANDLE; attrs[1] |= FATTR4_WORD1_MODE|FATTR4_WORD1_NUMLINKS|FATTR4_WORD1_OWNER| FATTR4_WORD1_OWNER_GROUP|FATTR4_WORD1_RAWDEV| FATTR4_WORD1_SPACE_USED|FATTR4_WORD1_TIME_ACCESS| FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; dircount >>= 1; } - /* Use mounted_on_fileid only if the server supports it */ - if (!(readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) - attrs[0] |= FATTR4_WORD0_FILEID; + attrs[0] |= FATTR4_WORD0_RDATTR_ERROR|FATTR4_WORD0_FILEID; + attrs[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; + /* Switch to mounted_on_fileid if the server supports it */ + if (readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) + attrs[0] &= ~FATTR4_WORD0_FILEID; + else + attrs[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; p = reserve_space(xdr, 12+NFS4_VERIFIER_SIZE+20); *p++ = cpu_to_be32(OP_READDIR); @@ -3139,7 +3140,7 @@ static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitma goto out_overflow; xdr_decode_hyper(p, fileid); bitmap[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; - ret = NFS_ATTR_FATTR_MOUNTED_ON_FILEID; + ret = NFS_ATTR_FATTR_FILEID; } dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid); return ret; @@ -4001,6 +4002,7 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap, { int status; umode_t fmode = 0; + uint64_t fileid; uint32_t type; status = decode_attr_type(xdr, bitmap, &type); @@ -4099,10 +4101,13 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap, goto xdr_error; fattr->valid |= status; - status = decode_attr_mounted_on_fileid(xdr, bitmap, &fattr->mounted_on_fileid); + status = decode_attr_mounted_on_fileid(xdr, bitmap, &fileid); if (status < 0) goto xdr_error; - fattr->valid |= status; + if (status != 0 && !(fattr->valid & status)) { + fattr->fileid = fileid; + fattr->valid |= status; + } xdr_error: dprintk("%s: xdr returned %d\n", __func__, -status); @@ -4833,21 +4838,17 @@ static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res) struct nfs4_secinfo_flavor *sec_flavor; int status; __be32 *p; - int i, num_flavors; + int i; status = decode_op_hdr(xdr, OP_SECINFO); - if (status) - goto out; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; + res->flavors->num_flavors = be32_to_cpup(p); - res->flavors->num_flavors = 0; - num_flavors = be32_to_cpup(p); - - for (i = 0; i < num_flavors; i++) { + for (i = 0; i < res->flavors->num_flavors; i++) { sec_flavor = &res->flavors->flavors[i]; - if ((char *)&sec_flavor[1] - (char *)res->flavors > PAGE_SIZE) + if ((char *)&sec_flavor[1] - (char *)res > PAGE_SIZE) break; p = xdr_inline_decode(xdr, 4); @@ -4856,15 +4857,13 @@ static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res) sec_flavor->flavor = be32_to_cpup(p); if (sec_flavor->flavor == RPC_AUTH_GSS) { - status = decode_secinfo_gss(xdr, sec_flavor); - if (status) - goto out; + if (decode_secinfo_gss(xdr, sec_flavor)) + break; } - res->flavors->num_flavors++; } -out: - return status; + return 0; + out_overflow: print_overflow_msg(__func__, xdr); return -EIO; @@ -6409,9 +6408,7 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh, entry->server, 1) < 0) goto out_overflow; - if (entry->fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) - entry->ino = entry->fattr->mounted_on_fileid; - else if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID) + if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID) entry->ino = entry->fattr->fileid; entry->d_type = DT_UNKNOWN; diff --git a/trunk/fs/nfs/pnfs.c b/trunk/fs/nfs/pnfs.c index f57f5281a520..d9ab97269ce6 100644 --- a/trunk/fs/nfs/pnfs.c +++ b/trunk/fs/nfs/pnfs.c @@ -383,7 +383,6 @@ pnfs_destroy_all_layouts(struct nfs_client *clp) plh_layouts); dprintk("%s freeing layout for inode %lu\n", __func__, lo->plh_inode->i_ino); - list_del_init(&lo->plh_layouts); pnfs_destroy_layout(NFS_I(lo->plh_inode)); } } @@ -467,8 +466,7 @@ pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, static struct pnfs_layout_segment * send_layoutget(struct pnfs_layout_hdr *lo, struct nfs_open_context *ctx, - u32 iomode, - gfp_t gfp_flags) + u32 iomode) { struct inode *ino = lo->plh_inode; struct nfs_server *server = NFS_SERVER(ino); @@ -481,7 +479,7 @@ send_layoutget(struct pnfs_layout_hdr *lo, dprintk("--> %s\n", __func__); BUG_ON(ctx == NULL); - lgp = kzalloc(sizeof(*lgp), gfp_flags); + lgp = kzalloc(sizeof(*lgp), GFP_KERNEL); if (lgp == NULL) return NULL; @@ -489,12 +487,12 @@ send_layoutget(struct pnfs_layout_hdr *lo, max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; max_pages = max_resp_sz >> PAGE_SHIFT; - pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); + pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); if (!pages) goto out_err_free; for (i = 0; i < max_pages; i++) { - pages[i] = alloc_page(gfp_flags); + pages[i] = alloc_page(GFP_KERNEL); if (!pages[i]) goto out_err_free; } @@ -510,7 +508,6 @@ send_layoutget(struct pnfs_layout_hdr *lo, lgp->args.layout.pages = pages; lgp->args.layout.pglen = max_pages * PAGE_SIZE; lgp->lsegpp = &lseg; - lgp->gfp_flags = gfp_flags; /* Synchronously retrieve layout information from server and * store in lseg. @@ -668,11 +665,11 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo, } static struct pnfs_layout_hdr * -alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags) +alloc_init_layout_hdr(struct inode *ino) { struct pnfs_layout_hdr *lo; - lo = kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags); + lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL); if (!lo) return NULL; atomic_set(&lo->plh_refcount, 1); @@ -684,7 +681,7 @@ alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags) } static struct pnfs_layout_hdr * -pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags) +pnfs_find_alloc_layout(struct inode *ino) { struct nfs_inode *nfsi = NFS_I(ino); struct pnfs_layout_hdr *new = NULL; @@ -699,7 +696,7 @@ pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags) return nfsi->layout; } spin_unlock(&ino->i_lock); - new = alloc_init_layout_hdr(ino, gfp_flags); + new = alloc_init_layout_hdr(ino); spin_lock(&ino->i_lock); if (likely(nfsi->layout == NULL)) /* Won the race? */ @@ -759,8 +756,7 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode) struct pnfs_layout_segment * pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, - enum pnfs_iomode iomode, - gfp_t gfp_flags) + enum pnfs_iomode iomode) { struct nfs_inode *nfsi = NFS_I(ino); struct nfs_client *clp = NFS_SERVER(ino)->nfs_client; @@ -771,7 +767,7 @@ pnfs_update_layout(struct inode *ino, if (!pnfs_enabled_sb(NFS_SERVER(ino))) return NULL; spin_lock(&ino->i_lock); - lo = pnfs_find_alloc_layout(ino, gfp_flags); + lo = pnfs_find_alloc_layout(ino); if (lo == NULL) { dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__); goto out_unlock; @@ -811,7 +807,7 @@ pnfs_update_layout(struct inode *ino, spin_unlock(&clp->cl_lock); } - lseg = send_layoutget(lo, ctx, iomode, gfp_flags); + lseg = send_layoutget(lo, ctx, iomode); if (!lseg && first) { spin_lock(&clp->cl_lock); list_del_init(&lo->plh_layouts); @@ -850,7 +846,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) goto out; } /* Inject layout blob into I/O device driver */ - lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags); + lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res); if (!lseg || IS_ERR(lseg)) { if (!lseg) status = -ENOMEM; @@ -903,8 +899,7 @@ static int pnfs_read_pg_test(struct nfs_pageio_descriptor *pgio, /* This is first coelesce call for a series of nfs_pages */ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, prev->wb_context, - IOMODE_READ, - GFP_KERNEL); + IOMODE_READ); } return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); } @@ -926,8 +921,7 @@ static int pnfs_write_pg_test(struct nfs_pageio_descriptor *pgio, /* This is first coelesce call for a series of nfs_pages */ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, prev->wb_context, - IOMODE_RW, - GFP_NOFS); + IOMODE_RW); } return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req); } @@ -1010,7 +1004,6 @@ pnfs_set_layoutcommit(struct nfs_write_data *wdata) { struct nfs_inode *nfsi = NFS_I(wdata->inode); loff_t end_pos = wdata->args.offset + wdata->res.count; - bool mark_as_dirty = false; spin_lock(&nfsi->vfs_inode.i_lock); if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) { @@ -1018,18 +1011,13 @@ pnfs_set_layoutcommit(struct nfs_write_data *wdata) get_lseg(wdata->lseg); wdata->lseg->pls_lc_cred = get_rpccred(wdata->args.context->state->owner->so_cred); - mark_as_dirty = true; + mark_inode_dirty_sync(wdata->inode); dprintk("%s: Set layoutcommit for inode %lu ", __func__, wdata->inode->i_ino); } if (end_pos > wdata->lseg->pls_end_pos) wdata->lseg->pls_end_pos = end_pos; spin_unlock(&nfsi->vfs_inode.i_lock); - - /* if pnfs_layoutcommit_inode() runs between inode locks, the next one - * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */ - if (mark_as_dirty) - mark_inode_dirty_sync(wdata->inode); } EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit); diff --git a/trunk/fs/nfs/pnfs.h b/trunk/fs/nfs/pnfs.h index 0c015bad9e7a..bc4827202e7a 100644 --- a/trunk/fs/nfs/pnfs.h +++ b/trunk/fs/nfs/pnfs.h @@ -70,7 +70,7 @@ struct pnfs_layoutdriver_type { const u32 id; const char *name; struct module *owner; - struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags); + struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr); void (*free_lseg) (struct pnfs_layout_segment *lseg); /* test for nfs page cache coalescing */ @@ -126,7 +126,7 @@ void get_layout_hdr(struct pnfs_layout_hdr *lo); void put_lseg(struct pnfs_layout_segment *lseg); struct pnfs_layout_segment * pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, - enum pnfs_iomode access_type, gfp_t gfp_flags); + enum pnfs_iomode access_type); void set_pnfs_layoutdriver(struct nfs_server *, u32 id); void unset_pnfs_layoutdriver(struct nfs_server *); enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *, @@ -245,7 +245,7 @@ static inline void put_lseg(struct pnfs_layout_segment *lseg) static inline struct pnfs_layout_segment * pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, - enum pnfs_iomode access_type, gfp_t gfp_flags) + enum pnfs_iomode access_type) { return NULL; } diff --git a/trunk/fs/nfs/read.c b/trunk/fs/nfs/read.c index 2bcf0dc306a1..7cded2b12a05 100644 --- a/trunk/fs/nfs/read.c +++ b/trunk/fs/nfs/read.c @@ -288,7 +288,7 @@ static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc) atomic_set(&req->wb_complete, requests); BUG_ON(desc->pg_lseg != NULL); - lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL); + lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ); ClearPageError(page); offset = 0; nbytes = desc->pg_count; @@ -351,7 +351,7 @@ static int nfs_pagein_one(struct nfs_pageio_descriptor *desc) } req = nfs_list_entry(data->pages.next); if ((!lseg) && list_is_singular(&data->pages)) - lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL); + lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ); ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count, 0, lseg); diff --git a/trunk/fs/nfs/super.c b/trunk/fs/nfs/super.c index e288f06d3fa7..2b8e9a5e366a 100644 --- a/trunk/fs/nfs/super.c +++ b/trunk/fs/nfs/super.c @@ -1004,7 +1004,6 @@ static int nfs_parse_security_flavors(char *value, return 0; } - mnt->flags |= NFS_MOUNT_SECFLAVOUR; mnt->auth_flavor_len = 1; return 1; } @@ -1977,15 +1976,6 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data) if (error < 0) goto out; - /* - * noac is a special case. It implies -o sync, but that's not - * necessarily reflected in the mtab options. do_remount_sb - * will clear MS_SYNCHRONOUS if -o sync wasn't specified in the - * remount options, so we have to explicitly reset it. - */ - if (data->flags & NFS_MOUNT_NOAC) - *flags |= MS_SYNCHRONOUS; - /* compare new mount options with old ones */ error = nfs_compare_remount_data(nfss, data); out: @@ -2245,7 +2235,8 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type, if (!s->s_root) { /* initial superblock/root creation */ nfs_fill_super(s, data); - nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL); + nfs_fscache_get_super_cookie( + s, data ? data->fscache_uniq : NULL, NULL); } mntroot = nfs_get_root(s, mntfh, dev_name); diff --git a/trunk/fs/nfs/write.c b/trunk/fs/nfs/write.c index 49c715b4ac92..e4cbc11a74ab 100644 --- a/trunk/fs/nfs/write.c +++ b/trunk/fs/nfs/write.c @@ -680,6 +680,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, req = nfs_setup_write_request(ctx, page, offset, count); if (IS_ERR(req)) return PTR_ERR(req); + nfs_mark_request_dirty(req); /* Update file length */ nfs_grow_file(page, offset, count); nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); @@ -939,7 +940,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc) atomic_set(&req->wb_complete, requests); BUG_ON(desc->pg_lseg); - lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS); + lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW); ClearPageError(page); offset = 0; nbytes = desc->pg_count; @@ -1013,7 +1014,7 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc) } req = nfs_list_entry(data->pages.next); if ((!lseg) && list_is_singular(&data->pages)) - lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS); + lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW); if ((desc->pg_ioflags & FLUSH_COND_STABLE) && (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit)) @@ -1417,7 +1418,8 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata) task->tk_pid, task->tk_status); /* Call the NFS version-specific code */ - NFS_PROTO(data->inode)->commit_done(task, data); + if (NFS_PROTO(data->inode)->commit_done(task, data) != 0) + return; } void nfs_commit_release_pages(struct nfs_write_data *data) diff --git a/trunk/fs/nilfs2/alloc.c b/trunk/fs/nilfs2/alloc.c index f7684483785e..0a0a66d98cce 100644 --- a/trunk/fs/nilfs2/alloc.c +++ b/trunk/fs/nilfs2/alloc.c @@ -646,7 +646,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) unsigned long group, group_offset; int i, j, n, ret; - for (i = 0; i < nitems; i = j) { + for (i = 0; i < nitems; i += n) { group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset); ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh); if (ret < 0) diff --git a/trunk/fs/ocfs2/cluster/heartbeat.c b/trunk/fs/ocfs2/cluster/heartbeat.c index 9a3e6bbff27b..643720209a98 100644 --- a/trunk/fs/ocfs2/cluster/heartbeat.c +++ b/trunk/fs/ocfs2/cluster/heartbeat.c @@ -539,41 +539,25 @@ static int o2hb_verify_crc(struct o2hb_region *reg, /* We want to make sure that nobody is heartbeating on top of us -- * this will help detect an invalid configuration. */ -static void o2hb_check_last_timestamp(struct o2hb_region *reg) +static int o2hb_check_last_timestamp(struct o2hb_region *reg) { + int node_num, ret; struct o2hb_disk_slot *slot; struct o2hb_disk_heartbeat_block *hb_block; - char *errstr; - slot = ®->hr_slots[o2nm_this_node()]; - /* Don't check on our 1st timestamp */ - if (!slot->ds_last_time) - return; - - hb_block = slot->ds_raw_block; - if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && - le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && - hb_block->hb_node == slot->ds_node_num) - return; + node_num = o2nm_this_node(); -#define ERRSTR1 "Another node is heartbeating on device" -#define ERRSTR2 "Heartbeat generation mismatch on device" -#define ERRSTR3 "Heartbeat sequence mismatch on device" + ret = 1; + slot = ®->hr_slots[node_num]; + /* Don't check on our 1st timestamp */ + if (slot->ds_last_time) { + hb_block = slot->ds_raw_block; - if (hb_block->hb_node != slot->ds_node_num) - errstr = ERRSTR1; - else if (le64_to_cpu(hb_block->hb_generation) != - slot->ds_last_generation) - errstr = ERRSTR2; - else - errstr = ERRSTR3; + if (le64_to_cpu(hb_block->hb_seq) != slot->ds_last_time) + ret = 0; + } - mlog(ML_ERROR, "%s (%s): expected(%u:0x%llx, 0x%llx), " - "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_dev_name, - slot->ds_node_num, (unsigned long long)slot->ds_last_generation, - (unsigned long long)slot->ds_last_time, hb_block->hb_node, - (unsigned long long)le64_to_cpu(hb_block->hb_generation), - (unsigned long long)le64_to_cpu(hb_block->hb_seq)); + return ret; } static inline void o2hb_prepare_block(struct o2hb_region *reg, @@ -999,7 +983,9 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) /* With an up to date view of the slots, we can check that no * other node has been improperly configured to heartbeat in * our slot. */ - o2hb_check_last_timestamp(reg); + if (!o2hb_check_last_timestamp(reg)) + mlog(ML_ERROR, "Device \"%s\": another node is heartbeating " + "in our slot!\n", reg->hr_dev_name); /* fill in the proper info for our next heartbeat */ o2hb_prepare_block(reg, reg->hr_generation); @@ -1013,8 +999,8 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) } i = -1; - while((i = find_next_bit(configured_nodes, - O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { + while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { + change |= o2hb_check_slot(reg, ®->hr_slots[i]); } @@ -1704,7 +1690,6 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, struct file *filp = NULL; struct inode *inode = NULL; ssize_t ret = -EINVAL; - int live_threshold; if (reg->hr_bdev) goto out; @@ -1781,18 +1766,8 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, * A node is considered live after it has beat LIVE_THRESHOLD * times. We're not steady until we've given them a chance * _after_ our first read. - * The default threshold is bare minimum so as to limit the delay - * during mounts. For global heartbeat, the threshold doubled for the - * first region. */ - live_threshold = O2HB_LIVE_THRESHOLD; - if (o2hb_global_heartbeat_active()) { - spin_lock(&o2hb_live_lock); - if (o2hb_pop_count(&o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1) - live_threshold <<= 1; - spin_unlock(&o2hb_live_lock); - } - atomic_set(®->hr_steady_iterations, live_threshold + 1); + atomic_set(®->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1); hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", reg->hr_item.ci_name); diff --git a/trunk/fs/ocfs2/dir.c b/trunk/fs/ocfs2/dir.c index 8582e3f4f120..9fe5b8fd658f 100644 --- a/trunk/fs/ocfs2/dir.c +++ b/trunk/fs/ocfs2/dir.c @@ -2868,7 +2868,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, bytes = blocks_wanted << sb->s_blocksize_bits; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(dir); - struct ocfs2_alloc_context *data_ac = NULL; + struct ocfs2_alloc_context *data_ac; struct ocfs2_alloc_context *meta_ac = NULL; struct buffer_head *dirdata_bh = NULL; struct buffer_head *dx_root_bh = NULL; diff --git a/trunk/fs/ocfs2/dlm/dlmdomain.c b/trunk/fs/ocfs2/dlm/dlmdomain.c index 3b179d6cbde0..7540a492eaba 100644 --- a/trunk/fs/ocfs2/dlm/dlmdomain.c +++ b/trunk/fs/ocfs2/dlm/dlmdomain.c @@ -1614,8 +1614,7 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) spin_unlock(&dlm->spinlock); /* Support for global heartbeat and node info was added in 1.1 */ - if (dlm->dlm_locking_proto.pv_major > 1 || - dlm->dlm_locking_proto.pv_minor > 0) { + if (dlm_protocol.pv_major > 1 || dlm_protocol.pv_minor > 0) { status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map); if (status) { mlog_errno(status); diff --git a/trunk/fs/ocfs2/dlm/dlmmaster.c b/trunk/fs/ocfs2/dlm/dlmmaster.c index 84d166328cf7..fede57ed005f 100644 --- a/trunk/fs/ocfs2/dlm/dlmmaster.c +++ b/trunk/fs/ocfs2/dlm/dlmmaster.c @@ -2574,9 +2574,6 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, res->state &= ~DLM_LOCK_RES_MIGRATING; wake = 1; spin_unlock(&res->spinlock); - if (dlm_is_host_down(ret)) - dlm_wait_for_node_death(dlm, target, - DLM_NODE_DEATH_WAIT_MAX); goto leave; } diff --git a/trunk/fs/ocfs2/file.c b/trunk/fs/ocfs2/file.c index 89659d6dc206..41565ae52856 100644 --- a/trunk/fs/ocfs2/file.c +++ b/trunk/fs/ocfs2/file.c @@ -1607,9 +1607,6 @@ static void ocfs2_calc_trunc_pos(struct inode *inode, range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); if (le32_to_cpu(rec->e_cpos) >= trunc_start) { - /* - * remove an entire extent record. - */ *trunc_cpos = le32_to_cpu(rec->e_cpos); /* * Skip holes if any. @@ -1620,16 +1617,7 @@ static void ocfs2_calc_trunc_pos(struct inode *inode, *blkno = le64_to_cpu(rec->e_blkno); *trunc_end = le32_to_cpu(rec->e_cpos); } else if (range > trunc_start) { - /* - * remove a partial extent record, which means we're - * removing the last extent record. - */ *trunc_cpos = trunc_start; - /* - * skip hole if any. - */ - if (range < *trunc_end) - *trunc_end = range; *trunc_len = *trunc_end - trunc_start; coff = trunc_start - le32_to_cpu(rec->e_cpos); *blkno = le64_to_cpu(rec->e_blkno) + diff --git a/trunk/fs/ocfs2/journal.c b/trunk/fs/ocfs2/journal.c index 295d56454e8b..b141a44605ca 100644 --- a/trunk/fs/ocfs2/journal.c +++ b/trunk/fs/ocfs2/journal.c @@ -1260,9 +1260,6 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb) { struct ocfs2_journal *journal = osb->journal; - if (ocfs2_is_hard_readonly(osb)) - return; - /* No need to queue up our truncate_log as regular cleanup will catch * that */ ocfs2_queue_recovery_completion(journal, osb->slot_num, diff --git a/trunk/fs/ocfs2/ocfs2_fs.h b/trunk/fs/ocfs2/ocfs2_fs.h index 938387a10d5d..b68f87a83924 100644 --- a/trunk/fs/ocfs2/ocfs2_fs.h +++ b/trunk/fs/ocfs2/ocfs2_fs.h @@ -1019,7 +1019,7 @@ struct ocfs2_xattr_entry { __le16 xe_name_offset; /* byte offset from the 1st entry in the local xattr storage(inode, xattr block or xattr bucket). */ - __u8 xe_name_len; /* xattr name len, doesn't include prefix. */ + __u8 xe_name_len; /* xattr name len, does't include prefix. */ __u8 xe_type; /* the low 7 bits indicate the name prefix * type and the highest bit indicates whether * the EA is stored in the local storage. */ diff --git a/trunk/fs/partitions/efi.c b/trunk/fs/partitions/efi.c index 19d6750d1d6c..ac0ccb5026a2 100644 --- a/trunk/fs/partitions/efi.c +++ b/trunk/fs/partitions/efi.c @@ -348,12 +348,6 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba, goto fail; } - /* Check that sizeof_partition_entry has the correct value */ - if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) { - pr_debug("GUID Partitition Entry Size check failed.\n"); - goto fail; - } - if (!(*ptes = alloc_read_gpt_entries(state, *gpt))) goto fail; diff --git a/trunk/fs/proc/task_mmu.c b/trunk/fs/proc/task_mmu.c index 318d8654989b..2e7addfd9803 100644 --- a/trunk/fs/proc/task_mmu.c +++ b/trunk/fs/proc/task_mmu.c @@ -214,7 +214,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) int flags = vma->vm_flags; unsigned long ino = 0; unsigned long long pgoff = 0; - unsigned long start, end; + unsigned long start; dev_t dev = 0; int len; @@ -227,15 +227,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; - if (stack_guard_page_start(vma, start)) - start += PAGE_SIZE; - end = vma->vm_end; - if (stack_guard_page_end(vma, end)) - end -= PAGE_SIZE; + if (vma->vm_flags & VM_GROWSDOWN) + if (!vma_stack_continue(vma->vm_prev, vma->vm_start)) + start += PAGE_SIZE; seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", start, - end, + vma->vm_end, flags & VM_READ ? 'r' : '-', flags & VM_WRITE ? 'w' : '-', flags & VM_EXEC ? 'x' : '-', diff --git a/trunk/fs/ubifs/log.c b/trunk/fs/ubifs/log.c index 40fa780ebea7..4d0cb1241460 100644 --- a/trunk/fs/ubifs/log.c +++ b/trunk/fs/ubifs/log.c @@ -174,6 +174,26 @@ void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud) spin_unlock(&c->buds_lock); } +/** + * ubifs_create_buds_lists - create journal head buds lists for remount rw. + * @c: UBIFS file-system description object + */ +void ubifs_create_buds_lists(struct ubifs_info *c) +{ + struct rb_node *p; + + spin_lock(&c->buds_lock); + p = rb_first(&c->buds); + while (p) { + struct ubifs_bud *bud = rb_entry(p, struct ubifs_bud, rb); + struct ubifs_jhead *jhead = &c->jheads[bud->jhead]; + + list_add_tail(&bud->list, &jhead->buds_list); + p = rb_next(p); + } + spin_unlock(&c->buds_lock); +} + /** * ubifs_add_bud_to_log - add a new bud to the log. * @c: UBIFS file-system description object diff --git a/trunk/fs/ubifs/replay.c b/trunk/fs/ubifs/replay.c index d3d6d365bfc1..eed0fcff8d73 100644 --- a/trunk/fs/ubifs/replay.c +++ b/trunk/fs/ubifs/replay.c @@ -59,7 +59,6 @@ enum { * @new_size: truncation new size * @free: amount of free space in a bud * @dirty: amount of dirty space in a bud from padding and deletion nodes - * @jhead: journal head number of the bud * * UBIFS journal replay must compare node sequence numbers, which means it must * build a tree of node information to insert into the TNC. @@ -81,7 +80,6 @@ struct replay_entry { struct { int free; int dirty; - int jhead; }; }; }; @@ -161,11 +159,6 @@ static int set_bud_lprops(struct ubifs_info *c, struct replay_entry *r) err = PTR_ERR(lp); goto out; } - - /* Make sure the journal head points to the latest bud */ - err = ubifs_wbuf_seek_nolock(&c->jheads[r->jhead].wbuf, r->lnum, - c->leb_size - r->free, UBI_SHORTTERM); - out: ubifs_release_lprops(c); return err; @@ -634,6 +627,10 @@ static int replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead, ubifs_assert(sleb->endpt - offs >= used); ubifs_assert(sleb->endpt % c->min_io_size == 0); + if (sleb->endpt + c->min_io_size <= c->leb_size && !c->ro_mount) + err = ubifs_wbuf_seek_nolock(&c->jheads[jhead].wbuf, lnum, + sleb->endpt, UBI_SHORTTERM); + *dirty = sleb->endpt - offs - used; *free = c->leb_size - sleb->endpt; @@ -656,14 +653,12 @@ static int replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead, * @sqnum: sequence number * @free: amount of free space in bud * @dirty: amount of dirty space from padding and deletion nodes - * @jhead: journal head number for the bud * * This function inserts a reference node to the replay tree and returns zero * in case of success or a negative error code in case of failure. */ static int insert_ref_node(struct ubifs_info *c, int lnum, int offs, - unsigned long long sqnum, int free, int dirty, - int jhead) + unsigned long long sqnum, int free, int dirty) { struct rb_node **p = &c->replay_tree.rb_node, *parent = NULL; struct replay_entry *r; @@ -693,7 +688,6 @@ static int insert_ref_node(struct ubifs_info *c, int lnum, int offs, r->flags = REPLAY_REF; r->free = free; r->dirty = dirty; - r->jhead = jhead; rb_link_node(&r->rb, parent, p); rb_insert_color(&r->rb, &c->replay_tree); @@ -718,7 +712,7 @@ static int replay_buds(struct ubifs_info *c) if (err) return err; err = insert_ref_node(c, b->bud->lnum, b->bud->start, b->sqnum, - free, dirty, b->bud->jhead); + free, dirty); if (err) return err; } diff --git a/trunk/fs/ubifs/super.c b/trunk/fs/ubifs/super.c index 04ad07f4fcc3..be6c7b008f38 100644 --- a/trunk/fs/ubifs/super.c +++ b/trunk/fs/ubifs/super.c @@ -1257,12 +1257,12 @@ static int mount_ubifs(struct ubifs_info *c) goto out_free; } - err = alloc_wbufs(c); - if (err) - goto out_cbuf; - sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id); if (!c->ro_mount) { + err = alloc_wbufs(c); + if (err) + goto out_cbuf; + /* Create background thread */ c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name); if (IS_ERR(c->bgt)) { @@ -1631,6 +1631,12 @@ static int ubifs_remount_rw(struct ubifs_info *c) if (err) goto out; + err = alloc_wbufs(c); + if (err) + goto out; + + ubifs_create_buds_lists(c); + /* Create background thread */ c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name); if (IS_ERR(c->bgt)) { @@ -1738,6 +1744,7 @@ static void ubifs_remount_ro(struct ubifs_info *c) if (err) ubifs_ro_mode(c, err); + free_wbufs(c); vfree(c->orph_buf); c->orph_buf = NULL; kfree(c->write_reserve_buf); diff --git a/trunk/fs/xfs/linux-2.6/xfs_sync.c b/trunk/fs/xfs/linux-2.6/xfs_sync.c index 3e898a48122d..e4f9c1b0836c 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_sync.c +++ b/trunk/fs/xfs/linux-2.6/xfs_sync.c @@ -926,7 +926,6 @@ xfs_reclaim_inodes_ag( XFS_LOOKUP_BATCH, XFS_ICI_RECLAIM_TAG); if (!nr_found) { - done = 1; rcu_read_unlock(); break; } diff --git a/trunk/fs/xfs/xfs_trans_ail.c b/trunk/fs/xfs/xfs_trans_ail.c index 5fc2380092c8..acdb92f14d51 100644 --- a/trunk/fs/xfs/xfs_trans_ail.c +++ b/trunk/fs/xfs/xfs_trans_ail.c @@ -346,23 +346,20 @@ xfs_ail_delete( */ STATIC void xfs_ail_worker( - struct work_struct *work) + struct work_struct *work) { - struct xfs_ail *ailp = container_of(to_delayed_work(work), + struct xfs_ail *ailp = container_of(to_delayed_work(work), struct xfs_ail, xa_work); - xfs_mount_t *mp = ailp->xa_mount; + long tout; + xfs_lsn_t target = ailp->xa_target; + xfs_lsn_t lsn; + xfs_log_item_t *lip; + int flush_log, count, stuck; + xfs_mount_t *mp = ailp->xa_mount; struct xfs_ail_cursor *cur = &ailp->xa_cursors; - xfs_log_item_t *lip; - xfs_lsn_t lsn; - xfs_lsn_t target; - long tout = 10; - int flush_log = 0; - int stuck = 0; - int count = 0; - int push_xfsbufd = 0; + int push_xfsbufd = 0; spin_lock(&ailp->xa_lock); - target = ailp->xa_target; xfs_trans_ail_cursor_init(ailp, cur); lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); if (!lip || XFS_FORCED_SHUTDOWN(mp)) { @@ -371,7 +368,8 @@ xfs_ail_worker( */ xfs_trans_ail_cursor_done(ailp, cur); spin_unlock(&ailp->xa_lock); - goto out_done; + ailp->xa_last_pushed_lsn = 0; + return; } XFS_STATS_INC(xs_push_ail); @@ -388,7 +386,8 @@ xfs_ail_worker( * lots of contention on the AIL lists. */ lsn = lip->li_lsn; - while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { + flush_log = stuck = count = 0; + while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) { int lock_result; /* * If we can lock the item without sleeping, unlock the AIL @@ -481,25 +480,21 @@ xfs_ail_worker( } /* assume we have more work to do in a short while */ -out_done: + tout = 10; if (!count) { /* We're past our target or empty, so idle */ ailp->xa_last_pushed_lsn = 0; /* - * We clear the XFS_AIL_PUSHING_BIT first before checking - * whether the target has changed. If the target has changed, - * this pushes the requeue race directly onto the result of the - * atomic test/set bit, so we are guaranteed that either the - * the pusher that changed the target or ourselves will requeue - * the work (but not both). + * Check for an updated push target before clearing the + * XFS_AIL_PUSHING_BIT. If the target changed, we've got more + * work to do. Wait a bit longer before starting that work. */ - clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); smp_rmb(); - if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || - test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) + if (ailp->xa_target == target) { + clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); return; - + } tout = 50; } else if (XFS_LSN_CMP(lsn, target) >= 0) { /* @@ -558,7 +553,7 @@ xfs_ail_push( * the XFS_AIL_PUSHING_BIT. */ smp_wmb(); - xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); + ailp->xa_target = threshold_lsn; if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); } diff --git a/trunk/include/asm-generic/vmlinux.lds.h b/trunk/include/asm-generic/vmlinux.lds.h index 077c00d94f6e..bd297a20ab98 100644 --- a/trunk/include/asm-generic/vmlinux.lds.h +++ b/trunk/include/asm-generic/vmlinux.lds.h @@ -170,10 +170,6 @@ STRUCT_ALIGN(); \ *(__tracepoints) \ /* implement dynamic printk debug */ \ - . = ALIGN(8); \ - VMLINUX_SYMBOL(__start___jump_table) = .; \ - *(__jump_table) \ - VMLINUX_SYMBOL(__stop___jump_table) = .; \ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___verbose) = .; \ *(__verbose) \ @@ -232,6 +228,8 @@ \ BUG_TABLE \ \ + JUMP_TABLE \ + \ /* PCI quirks */ \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ @@ -276,70 +274,70 @@ /* Kernel symbol table: Normal symbols */ \ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab) = .; \ - *(SORT(___ksymtab+*)) \ + *(__ksymtab) \ VMLINUX_SYMBOL(__stop___ksymtab) = .; \ } \ \ /* Kernel symbol table: GPL-only symbols */ \ __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ - *(SORT(___ksymtab_gpl+*)) \ + *(__ksymtab_gpl) \ VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ } \ \ /* Kernel symbol table: Normal unused symbols */ \ __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ - *(SORT(___ksymtab_unused+*)) \ + *(__ksymtab_unused) \ VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ } \ \ /* Kernel symbol table: GPL-only unused symbols */ \ __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ - *(SORT(___ksymtab_unused_gpl+*)) \ + *(__ksymtab_unused_gpl) \ VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ } \ \ /* Kernel symbol table: GPL-future-only symbols */ \ __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ - *(SORT(___ksymtab_gpl_future+*)) \ + *(__ksymtab_gpl_future) \ VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ } \ \ /* Kernel symbol table: Normal symbols */ \ __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab) = .; \ - *(SORT(___kcrctab+*)) \ + *(__kcrctab) \ VMLINUX_SYMBOL(__stop___kcrctab) = .; \ } \ \ /* Kernel symbol table: GPL-only symbols */ \ __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ - *(SORT(___kcrctab_gpl+*)) \ + *(__kcrctab_gpl) \ VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ } \ \ /* Kernel symbol table: Normal unused symbols */ \ __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ - *(SORT(___kcrctab_unused+*)) \ + *(__kcrctab_unused) \ VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ } \ \ /* Kernel symbol table: GPL-only unused symbols */ \ __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ - *(SORT(___kcrctab_unused_gpl+*)) \ + *(__kcrctab_unused_gpl) \ VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ } \ \ /* Kernel symbol table: GPL-future-only symbols */ \ __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ - *(SORT(___kcrctab_gpl_future+*)) \ + *(__kcrctab_gpl_future) \ VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ } \ \ @@ -591,6 +589,14 @@ #define BUG_TABLE #endif +#define JUMP_TABLE \ + . = ALIGN(8); \ + __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start___jump_table) = .; \ + *(__jump_table) \ + VMLINUX_SYMBOL(__stop___jump_table) = .; \ + } + #ifdef CONFIG_PM_TRACE #define TRACEDATA \ . = ALIGN(4); \ diff --git a/trunk/include/drm/drm_fb_helper.h b/trunk/include/drm/drm_fb_helper.h index c99c3d3e7811..f22e7fe4b6db 100644 --- a/trunk/include/drm/drm_fb_helper.h +++ b/trunk/include/drm/drm_fb_helper.h @@ -118,7 +118,6 @@ int drm_fb_helper_setcolreg(unsigned regno, unsigned transp, struct fb_info *info); -bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper); void drm_fb_helper_restore(void); void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, uint32_t fb_width, uint32_t fb_height); @@ -127,7 +126,7 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); -int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); +bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); int drm_fb_helper_debug_enter(struct fb_info *info); diff --git a/trunk/include/drm/drm_mm.h b/trunk/include/drm/drm_mm.h index 564b14aa7e16..c2f93a8ae2e1 100644 --- a/trunk/include/drm/drm_mm.h +++ b/trunk/include/drm/drm_mm.h @@ -86,7 +86,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm) } #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ &(mm)->head_node.node_list, \ - node_list) + node_list); #define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \ for (entry = (mm)->prev_scanned_node, \ next = entry ? list_entry(entry->node_list.next, \ diff --git a/trunk/include/drm/drm_pciids.h b/trunk/include/drm/drm_pciids.h index f04b2a3b0f49..816e30cbd968 100644 --- a/trunk/include/drm/drm_pciids.h +++ b/trunk/include/drm/drm_pciids.h @@ -155,7 +155,6 @@ {0x1002, 0x6719, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x671c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x671d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x671f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ @@ -168,7 +167,6 @@ {0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x673e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ @@ -201,7 +199,6 @@ {0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x689b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ {0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ {0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ @@ -212,9 +209,7 @@ {0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x68ba, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x68bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ diff --git a/trunk/include/drm/radeon_drm.h b/trunk/include/drm/radeon_drm.h index 787f7b6fd622..3bce1a4fc305 100644 --- a/trunk/include/drm/radeon_drm.h +++ b/trunk/include/drm/radeon_drm.h @@ -909,8 +909,6 @@ struct drm_radeon_cs { #define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */ #define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */ #define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */ -#define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */ -#define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */ struct drm_radeon_info { uint32_t request; diff --git a/trunk/include/linux/bootmem.h b/trunk/include/linux/bootmem.h index 01eca1794e14..b8613e806aa9 100644 --- a/trunk/include/linux/bootmem.h +++ b/trunk/include/linux/bootmem.h @@ -111,8 +111,6 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_node(pgdat, x) \ __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_node_nopanic(pgdat, x) \ - __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_node(pgdat, x) \ __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_node_nopanic(pgdat, x) \ diff --git a/trunk/include/linux/bsearch.h b/trunk/include/linux/bsearch.h deleted file mode 100644 index 90b1aa867224..000000000000 --- a/trunk/include/linux/bsearch.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _LINUX_BSEARCH_H -#define _LINUX_BSEARCH_H - -#include - -void *bsearch(const void *key, const void *base, size_t num, size_t size, - int (*cmp)(const void *key, const void *elt)); - -#endif /* _LINUX_BSEARCH_H */ diff --git a/trunk/include/linux/capability.h b/trunk/include/linux/capability.h index d4675af963fa..16ee8b49a200 100644 --- a/trunk/include/linux/capability.h +++ b/trunk/include/linux/capability.h @@ -546,7 +546,18 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap); extern bool capable(int cap); extern bool ns_capable(struct user_namespace *ns, int cap); extern bool task_ns_capable(struct task_struct *t, int cap); -extern bool nsown_capable(int cap); + +/** + * nsown_capable - Check superior capability to one's own user_ns + * @cap: The capability in question + * + * Return true if the current task has the given superior capability + * targeted at its own user namespace. + */ +static inline bool nsown_capable(int cap) +{ + return ns_capable(current_user_ns(), cap); +} /* audit system wants to get cap info from files as well */ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); diff --git a/trunk/include/linux/clockchips.h b/trunk/include/linux/clockchips.h index d6733e27af34..fc53492b6ad7 100644 --- a/trunk/include/linux/clockchips.h +++ b/trunk/include/linux/clockchips.h @@ -56,52 +56,46 @@ enum clock_event_nofitiers { /** * struct clock_event_device - clock event device descriptor - * @event_handler: Assigned by the framework to be called by the low - * level handler of the event source - * @set_next_event: set next event function - * @next_event: local storage for the next event in oneshot mode + * @name: ptr to clock event name + * @features: features * @max_delta_ns: maximum delta value in ns * @min_delta_ns: minimum delta value in ns * @mult: nanosecond to cycles multiplier * @shift: nanoseconds to cycles divisor (power of two) - * @mode: operating mode assigned by the management code - * @features: features - * @retries: number of forced programming retries - * @set_mode: set mode function - * @broadcast: function to broadcast events - * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration - * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration - * @name: ptr to clock event name * @rating: variable to rate clock event devices * @irq: IRQ number (only for non CPU local devices) * @cpumask: cpumask to indicate for which CPUs this device works + * @set_next_event: set next event function + * @set_mode: set mode function + * @event_handler: Assigned by the framework to be called by the low + * level handler of the event source + * @broadcast: function to broadcast events * @list: list head for the management code + * @mode: operating mode assigned by the management code + * @next_event: local storage for the next event in oneshot mode + * @retries: number of forced programming retries */ struct clock_event_device { - void (*event_handler)(struct clock_event_device *); - int (*set_next_event)(unsigned long evt, - struct clock_event_device *); - ktime_t next_event; + const char *name; + unsigned int features; u64 max_delta_ns; u64 min_delta_ns; u32 mult; u32 shift; - enum clock_event_mode mode; - unsigned int features; - unsigned long retries; - - void (*broadcast)(const struct cpumask *mask); - void (*set_mode)(enum clock_event_mode mode, - struct clock_event_device *); - unsigned long min_delta_ticks; - unsigned long max_delta_ticks; - - const char *name; int rating; int irq; const struct cpumask *cpumask; + int (*set_next_event)(unsigned long evt, + struct clock_event_device *); + void (*set_mode)(enum clock_event_mode mode, + struct clock_event_device *); + void (*event_handler)(struct clock_event_device *); + void (*broadcast)(const struct cpumask *mask); struct list_head list; -} ____cacheline_aligned; + enum clock_event_mode mode; + ktime_t next_event; + unsigned long retries; +}; /* * Calculate a multiplication factor for scaled math, which is used to convert @@ -128,12 +122,6 @@ extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt); extern void clockevents_register_device(struct clock_event_device *dev); -extern void clockevents_config_and_register(struct clock_event_device *dev, - u32 freq, unsigned long min_delta, - unsigned long max_delta); - -extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); - extern void clockevents_exchange_device(struct clock_event_device *old, struct clock_event_device *new); extern void clockevents_set_mode(struct clock_event_device *dev, diff --git a/trunk/include/linux/clocksource.h b/trunk/include/linux/clocksource.h index c918fbd33ee5..c37b21ad5a3b 100644 --- a/trunk/include/linux/clocksource.h +++ b/trunk/include/linux/clocksource.h @@ -159,38 +159,42 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, */ struct clocksource { /* - * Hotpath data, fits in a single cache line when the - * clocksource itself is cacheline aligned. + * First part of structure is read mostly */ + char *name; + struct list_head list; + int rating; cycle_t (*read)(struct clocksource *cs); - cycle_t cycle_last; + int (*enable)(struct clocksource *cs); + void (*disable)(struct clocksource *cs); cycle_t mask; u32 mult; u32 shift; u64 max_idle_ns; - + unsigned long flags; + cycle_t (*vread)(void); + void (*suspend)(struct clocksource *cs); + void (*resume)(struct clocksource *cs); #ifdef CONFIG_IA64 void *fsys_mmio; /* used by fsyscall asm code */ #define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr)) #else #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) #endif - const char *name; - struct list_head list; - int rating; - cycle_t (*vread)(void); - int (*enable)(struct clocksource *cs); - void (*disable)(struct clocksource *cs); - unsigned long flags; - void (*suspend)(struct clocksource *cs); - void (*resume)(struct clocksource *cs); + + /* + * Second part is written at each timer interrupt + * Keep it in a different cache line to dirty no + * more than one cache line. + */ + cycle_t cycle_last ____cacheline_aligned_in_smp; #ifdef CONFIG_CLOCKSOURCE_WATCHDOG /* Watchdog related data, used by the framework */ struct list_head wd_list; cycle_t wd_last; #endif -} ____cacheline_aligned; +}; /* * Clock source flags bits:: @@ -337,6 +341,4 @@ static inline void update_vsyscall_tz(void) extern void timekeeping_notify(struct clocksource *clock); -extern int clocksource_i8253_init(void); - #endif /* _LINUX_CLOCKSOURCE_H */ diff --git a/trunk/include/linux/cpufreq.h b/trunk/include/linux/cpufreq.h index 11be48e0d168..9343dd3de858 100644 --- a/trunk/include/linux/cpufreq.h +++ b/trunk/include/linux/cpufreq.h @@ -3,7 +3,7 @@ * * Copyright (C) 2001 Russell King * (C) 2002 - 2003 Dominik Brodowski - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -56,9 +56,9 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb, #define CPUFREQ_POLICY_POWERSAVE (1) #define CPUFREQ_POLICY_PERFORMANCE (2) -/* Frequency values here are CPU kHz so that hardware which doesn't run - * with some frequencies can complain without having to guess what per - * cent / per mille means. +/* Frequency values here are CPU kHz so that hardware which doesn't run + * with some frequencies can complain without having to guess what per + * cent / per mille means. * Maximum transition latency is in nanoseconds - if it's unknown, * CPUFREQ_ETERNAL shall be used. */ @@ -72,15 +72,13 @@ extern struct kobject *cpufreq_global_kobject; struct cpufreq_cpuinfo { unsigned int max_freq; unsigned int min_freq; - - /* in 10^(-9) s = nanoseconds */ - unsigned int transition_latency; + unsigned int transition_latency; /* in 10^(-9) s = nanoseconds */ }; struct cpufreq_real_policy { unsigned int min; /* in kHz */ unsigned int max; /* in kHz */ - unsigned int policy; /* see above */ + unsigned int policy; /* see above */ struct cpufreq_governor *governor; /* see below */ }; @@ -96,7 +94,7 @@ struct cpufreq_policy { unsigned int max; /* in kHz */ unsigned int cur; /* in kHz, only needed if cpufreq * governors are used */ - unsigned int policy; /* see above */ + unsigned int policy; /* see above */ struct cpufreq_governor *governor; /* see below */ struct work_struct update; /* if update_policy() needs to be @@ -169,11 +167,11 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mu struct cpufreq_governor { char name[CPUFREQ_NAME_LEN]; - int (*governor) (struct cpufreq_policy *policy, + int (*governor) (struct cpufreq_policy *policy, unsigned int event); ssize_t (*show_setspeed) (struct cpufreq_policy *policy, char *buf); - int (*store_setspeed) (struct cpufreq_policy *policy, + int (*store_setspeed) (struct cpufreq_policy *policy, unsigned int freq); unsigned int max_transition_latency; /* HW must be able to switch to next freq faster than this value in nano secs or we @@ -182,8 +180,7 @@ struct cpufreq_governor { struct module *owner; }; -/* - * Pass a target to the cpufreq driver. +/* pass a target to the cpufreq driver */ extern int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, @@ -240,9 +237,9 @@ struct cpufreq_driver { /* flags */ -#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if +#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if * all ->init() calls failed */ -#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel +#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel * "constants" aren't affected by * frequency transitions */ #define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed @@ -255,7 +252,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state); -static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) +static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) { if (policy->min < min) policy->min = min; @@ -389,15 +386,34 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, /* the following 3 funtions are for cpufreq core use only */ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); -void cpufreq_cpu_put(struct cpufreq_policy *data); +void cpufreq_cpu_put (struct cpufreq_policy *data); /* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; -void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, +void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, unsigned int cpu); void cpufreq_frequency_table_put_attr(unsigned int cpu); +/********************************************************************* + * UNIFIED DEBUG HELPERS * + *********************************************************************/ + +#define CPUFREQ_DEBUG_CORE 1 +#define CPUFREQ_DEBUG_DRIVER 2 +#define CPUFREQ_DEBUG_GOVERNOR 4 + +#ifdef CONFIG_CPU_FREQ_DEBUG + +extern void cpufreq_debug_printk(unsigned int type, const char *prefix, + const char *fmt, ...); + +#else + +#define cpufreq_debug_printk(msg...) do { } while(0) + +#endif /* CONFIG_CPU_FREQ_DEBUG */ + #endif /* _LINUX_CPUFREQ_H */ diff --git a/trunk/include/linux/cred.h b/trunk/include/linux/cred.h index be16b61283cc..9aeeb0ba2003 100644 --- a/trunk/include/linux/cred.h +++ b/trunk/include/linux/cred.h @@ -146,7 +146,6 @@ struct cred { void *security; /* subjective LSM security */ #endif struct user_struct *user; /* real user ID subscription */ - struct user_namespace *user_ns; /* cached user->user_ns */ struct group_info *group_info; /* supplementary groups for euid/fsgid */ struct rcu_head rcu; /* RCU deletion hook */ }; @@ -355,15 +354,10 @@ static inline void put_cred(const struct cred *_cred) #define current_fsgid() (current_cred_xxx(fsgid)) #define current_cap() (current_cred_xxx(cap_effective)) #define current_user() (current_cred_xxx(user)) +#define _current_user_ns() (current_cred_xxx(user)->user_ns) #define current_security() (current_cred_xxx(security)) -#ifdef CONFIG_USER_NS -#define current_user_ns() (current_cred_xxx(user_ns)) -#else -extern struct user_namespace init_user_ns; -#define current_user_ns() (&init_user_ns) -#endif - +extern struct user_namespace *current_user_ns(void); #define current_uid_gid(_uid, _gid) \ do { \ diff --git a/trunk/include/linux/device.h b/trunk/include/linux/device.h index 0d7535000821..ab8dfc095709 100644 --- a/trunk/include/linux/device.h +++ b/trunk/include/linux/device.h @@ -442,6 +442,7 @@ struct device { struct dev_archdata archdata; struct device_node *of_node; /* associated device tree node */ + const struct of_device_id *of_match; /* matching of_device_id from driver */ dev_t devt; /* dev_t, creates the sysfs "dev" */ @@ -632,6 +633,13 @@ static inline int devtmpfs_mount(const char *mountpoint) { return 0; } /* drivers/base/power/shutdown.c */ extern void device_shutdown(void); +#ifndef CONFIG_ARCH_NO_SYSDEV_OPS +/* drivers/base/sys.c */ +extern void sysdev_shutdown(void); +#else +static inline void sysdev_shutdown(void) { } +#endif + /* debugging and troubleshooting/diagnostic helpers. */ extern const char *dev_driver_string(const struct device *dev); diff --git a/trunk/include/linux/dynamic_debug.h b/trunk/include/linux/dynamic_debug.h index e747ecd48e1c..0c9653f11c18 100644 --- a/trunk/include/linux/dynamic_debug.h +++ b/trunk/include/linux/dynamic_debug.h @@ -1,6 +1,8 @@ #ifndef _DYNAMIC_DEBUG_H #define _DYNAMIC_DEBUG_H +#include + /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They * use independent hash functions, to reduce the chance of false positives. diff --git a/trunk/include/linux/fb.h b/trunk/include/linux/fb.h index 6a8274877171..df728c1c29ed 100644 --- a/trunk/include/linux/fb.h +++ b/trunk/include/linux/fb.h @@ -832,7 +832,6 @@ struct fb_tile_ops { #define FBINFO_CAN_FORCE_OUTPUT 0x200000 struct fb_info { - atomic_t count; int node; int flags; struct mutex lock; /* Lock for open/release/ioctl funcs */ diff --git a/trunk/include/linux/flex_array.h b/trunk/include/linux/flex_array.h index ebeb2f3ad068..70e4efabe0fb 100644 --- a/trunk/include/linux/flex_array.h +++ b/trunk/include/linux/flex_array.h @@ -61,7 +61,7 @@ struct flex_array { struct flex_array *flex_array_alloc(int element_size, unsigned int total, gfp_t flags); int flex_array_prealloc(struct flex_array *fa, unsigned int start, - unsigned int nr_elements, gfp_t flags); + unsigned int end, gfp_t flags); void flex_array_free(struct flex_array *fa); void flex_array_free_parts(struct flex_array *fa); int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, diff --git a/trunk/include/linux/fs.h b/trunk/include/linux/fs.h index cdf9495df204..dbd860af0804 100644 --- a/trunk/include/linux/fs.h +++ b/trunk/include/linux/fs.h @@ -358,6 +358,7 @@ struct inodes_stat_t { #define FS_EXTENT_FL 0x00080000 /* Extents */ #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ +#define FS_COW_FL 0x02000000 /* Cow file */ #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ diff --git a/trunk/include/linux/ftrace.h b/trunk/include/linux/ftrace.h index 9d88e1cb5dbb..ca29e03c1fac 100644 --- a/trunk/include/linux/ftrace.h +++ b/trunk/include/linux/ftrace.h @@ -29,22 +29,9 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); -struct ftrace_hash; - -enum { - FTRACE_OPS_FL_ENABLED = 1 << 0, - FTRACE_OPS_FL_GLOBAL = 1 << 1, - FTRACE_OPS_FL_DYNAMIC = 1 << 2, -}; - struct ftrace_ops { - ftrace_func_t func; - struct ftrace_ops *next; - unsigned long flags; -#ifdef CONFIG_DYNAMIC_FTRACE - struct ftrace_hash *notrace_hash; - struct ftrace_hash *filter_hash; -#endif + ftrace_func_t func; + struct ftrace_ops *next; }; extern int function_trace_stop; @@ -159,13 +146,14 @@ extern void unregister_ftrace_function_probe_all(char *glob); extern int ftrace_text_reserved(void *start, void *end); enum { - FTRACE_FL_ENABLED = (1 << 30), - FTRACE_FL_FREE = (1 << 31), + FTRACE_FL_FREE = (1 << 0), + FTRACE_FL_FAILED = (1 << 1), + FTRACE_FL_FILTER = (1 << 2), + FTRACE_FL_ENABLED = (1 << 3), + FTRACE_FL_NOTRACE = (1 << 4), + FTRACE_FL_CONVERTED = (1 << 5), }; -#define FTRACE_FL_MASK (0x3UL << 30) -#define FTRACE_REF_MAX ((1 << 30) - 1) - struct dyn_ftrace { union { unsigned long ip; /* address of mcount call-site */ @@ -179,12 +167,7 @@ struct dyn_ftrace { }; int ftrace_force_update(void); -void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, - int len, int reset); -void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, - int len, int reset); -void ftrace_set_global_filter(unsigned char *buf, int len, int reset); -void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); +void ftrace_set_filter(unsigned char *buf, int len, int reset); int register_ftrace_command(struct ftrace_func_command *cmd); int unregister_ftrace_command(struct ftrace_func_command *cmd); diff --git a/trunk/include/linux/ftrace_event.h b/trunk/include/linux/ftrace_event.h index b5a550a39a70..22b32af1b5ec 100644 --- a/trunk/include/linux/ftrace_event.h +++ b/trunk/include/linux/ftrace_event.h @@ -37,7 +37,6 @@ struct trace_entry { unsigned char flags; unsigned char preempt_count; int pid; - int padding; }; #define FTRACE_MAX_EVENT \ diff --git a/trunk/include/linux/gfp.h b/trunk/include/linux/gfp.h index 56d8fc87fbbc..bfb8f934521e 100644 --- a/trunk/include/linux/gfp.h +++ b/trunk/include/linux/gfp.h @@ -353,8 +353,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask); void *alloc_pages_exact(size_t size, gfp_t gfp_mask); void free_pages_exact(void *virt, size_t size); -/* This is different from alloc_pages_exact_node !!! */ -void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); #define __get_free_page(gfp_mask) \ __get_free_pages((gfp_mask), 0) diff --git a/trunk/include/linux/huge_mm.h b/trunk/include/linux/huge_mm.h index 8847c8c29791..df29c8fde36b 100644 --- a/trunk/include/linux/huge_mm.h +++ b/trunk/include/linux/huge_mm.h @@ -117,7 +117,7 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long end, long adjust_next) { - if (!vma->anon_vma || vma->vm_ops) + if (!vma->anon_vma || vma->vm_ops || vma->vm_file) return; __vma_adjust_trans_huge(vma, start, end, adjust_next); } diff --git a/trunk/include/linux/init.h b/trunk/include/linux/init.h index 9146f39cdddf..577671c55153 100644 --- a/trunk/include/linux/init.h +++ b/trunk/include/linux/init.h @@ -79,29 +79,29 @@ #define __exitused __used #endif -#define __exit __section(.exit.text) __exitused __cold notrace +#define __exit __section(.exit.text) __exitused __cold /* Used for HOTPLUG */ -#define __devinit __section(.devinit.text) __cold notrace +#define __devinit __section(.devinit.text) __cold #define __devinitdata __section(.devinit.data) #define __devinitconst __section(.devinit.rodata) -#define __devexit __section(.devexit.text) __exitused __cold notrace +#define __devexit __section(.devexit.text) __exitused __cold #define __devexitdata __section(.devexit.data) #define __devexitconst __section(.devexit.rodata) /* Used for HOTPLUG_CPU */ -#define __cpuinit __section(.cpuinit.text) __cold notrace +#define __cpuinit __section(.cpuinit.text) __cold #define __cpuinitdata __section(.cpuinit.data) #define __cpuinitconst __section(.cpuinit.rodata) -#define __cpuexit __section(.cpuexit.text) __exitused __cold notrace +#define __cpuexit __section(.cpuexit.text) __exitused __cold #define __cpuexitdata __section(.cpuexit.data) #define __cpuexitconst __section(.cpuexit.rodata) /* Used for MEMORY_HOTPLUG */ -#define __meminit __section(.meminit.text) __cold notrace +#define __meminit __section(.meminit.text) __cold #define __meminitdata __section(.meminit.data) #define __meminitconst __section(.meminit.rodata) -#define __memexit __section(.memexit.text) __exitused __cold notrace +#define __memexit __section(.memexit.text) __exitused __cold #define __memexitdata __section(.memexit.data) #define __memexitconst __section(.memexit.rodata) diff --git a/trunk/include/linux/init_task.h b/trunk/include/linux/init_task.h index 689496bb6654..caa151fbebb7 100644 --- a/trunk/include/linux/init_task.h +++ b/trunk/include/linux/init_task.h @@ -134,6 +134,7 @@ extern struct cred init_cred; .stack = &init_thread_info, \ .usage = ATOMIC_INIT(2), \ .flags = PF_KTHREAD, \ + .lock_depth = -1, \ .prio = MAX_PRIO-20, \ .static_prio = MAX_PRIO-20, \ .normal_prio = MAX_PRIO-20, \ diff --git a/trunk/include/linux/irq.h b/trunk/include/linux/irq.h index 8b4538446636..09a308072f56 100644 --- a/trunk/include/linux/irq.h +++ b/trunk/include/linux/irq.h @@ -53,13 +53,12 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data); * Bits which can be modified via irq_set/clear/modify_status_flags() * IRQ_LEVEL - Interrupt is level type. Will be also * updated in the code when the above trigger - * bits are modified via irq_set_irq_type() + * bits are modified via set_irq_type() * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect * it from affinity setting * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing * IRQ_NOREQUEST - Interrupt cannot be requested via * request_irq() - * IRQ_NOTHREAD - Interrupt cannot be threaded * IRQ_NOAUTOEN - Interrupt is not automatically enabled in * request/setup_irq() * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) @@ -86,7 +85,6 @@ enum { IRQ_NO_BALANCING = (1 << 13), IRQ_MOVE_PCNTXT = (1 << 14), IRQ_NESTED_THREAD = (1 << 15), - IRQ_NOTHREAD = (1 << 16), }; #define IRQF_MODIFY_MASK \ @@ -263,6 +261,23 @@ static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) * struct irq_chip - hardware interrupt chip descriptor * * @name: name for /proc/interrupts + * @startup: deprecated, replaced by irq_startup + * @shutdown: deprecated, replaced by irq_shutdown + * @enable: deprecated, replaced by irq_enable + * @disable: deprecated, replaced by irq_disable + * @ack: deprecated, replaced by irq_ack + * @mask: deprecated, replaced by irq_mask + * @mask_ack: deprecated, replaced by irq_mask_ack + * @unmask: deprecated, replaced by irq_unmask + * @eoi: deprecated, replaced by irq_eoi + * @end: deprecated, will go away with __do_IRQ() + * @set_affinity: deprecated, replaced by irq_set_affinity + * @retrigger: deprecated, replaced by irq_retrigger + * @set_type: deprecated, replaced by irq_set_type + * @set_wake: deprecated, replaced by irq_wake + * @bus_lock: deprecated, replaced by irq_bus_lock + * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock + * * @irq_startup: start up the interrupt (defaults to ->enable if NULL) * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) @@ -280,9 +295,6 @@ static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips * @irq_cpu_online: configure an interrupt source for a secondary CPU * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU - * @irq_suspend: function called from core code on suspend once per chip - * @irq_resume: function called from core code on resume once per chip - * @irq_pm_shutdown: function called from core code on shutdown once per chip * @irq_print_chip: optional to print special chip info in show_interrupts * @flags: chip specific flags * @@ -312,10 +324,6 @@ struct irq_chip { void (*irq_cpu_online)(struct irq_data *data); void (*irq_cpu_offline)(struct irq_data *data); - void (*irq_suspend)(struct irq_data *data); - void (*irq_resume)(struct irq_data *data); - void (*irq_pm_shutdown)(struct irq_data *data); - void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); unsigned long flags; @@ -431,7 +439,7 @@ irq_set_handler(unsigned int irq, irq_flow_handler_t handle) /* * Set a highlevel chained flow handler for a given IRQ. * (a chained handler is automatically enabled and set to - * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) + * IRQ_NOREQUEST and IRQ_NOPROBE) */ static inline void irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) @@ -461,16 +469,6 @@ static inline void irq_set_probe(unsigned int irq) irq_modify_status(irq, IRQ_NOPROBE, 0); } -static inline void irq_set_nothread(unsigned int irq) -{ - irq_modify_status(irq, 0, IRQ_NOTHREAD); -} - -static inline void irq_set_thread(unsigned int irq) -{ - irq_modify_status(irq, IRQ_NOTHREAD, 0); -} - static inline void irq_set_nested_thread(unsigned int irq, bool nest) { if (nest) @@ -575,145 +573,6 @@ static inline int irq_reserve_irq(unsigned int irq) return irq_reserve_irqs(irq, 1); } -#ifndef irq_reg_writel -# define irq_reg_writel(val, addr) writel(val, addr) -#endif -#ifndef irq_reg_readl -# define irq_reg_readl(addr) readl(addr) -#endif - -/** - * struct irq_chip_regs - register offsets for struct irq_gci - * @enable: Enable register offset to reg_base - * @disable: Disable register offset to reg_base - * @mask: Mask register offset to reg_base - * @ack: Ack register offset to reg_base - * @eoi: Eoi register offset to reg_base - * @type: Type configuration register offset to reg_base - * @polarity: Polarity configuration register offset to reg_base - */ -struct irq_chip_regs { - unsigned long enable; - unsigned long disable; - unsigned long mask; - unsigned long ack; - unsigned long eoi; - unsigned long type; - unsigned long polarity; -}; - -/** - * struct irq_chip_type - Generic interrupt chip instance for a flow type - * @chip: The real interrupt chip which provides the callbacks - * @regs: Register offsets for this chip - * @handler: Flow handler associated with this chip - * @type: Chip can handle these flow types - * - * A irq_generic_chip can have several instances of irq_chip_type when - * it requires different functions and register offsets for different - * flow types. - */ -struct irq_chip_type { - struct irq_chip chip; - struct irq_chip_regs regs; - irq_flow_handler_t handler; - u32 type; -}; - -/** - * struct irq_chip_generic - Generic irq chip data structure - * @lock: Lock to protect register and cache data access - * @reg_base: Register base address (virtual) - * @irq_base: Interrupt base nr for this chip - * @irq_cnt: Number of interrupts handled by this chip - * @mask_cache: Cached mask register - * @type_cache: Cached type register - * @polarity_cache: Cached polarity register - * @wake_enabled: Interrupt can wakeup from suspend - * @wake_active: Interrupt is marked as an wakeup from suspend source - * @num_ct: Number of available irq_chip_type instances (usually 1) - * @private: Private data for non generic chip callbacks - * @list: List head for keeping track of instances - * @chip_types: Array of interrupt irq_chip_types - * - * Note, that irq_chip_generic can have multiple irq_chip_type - * implementations which can be associated to a particular irq line of - * an irq_chip_generic instance. That allows to share and protect - * state in an irq_chip_generic instance when we need to implement - * different flow mechanisms (level/edge) for it. - */ -struct irq_chip_generic { - raw_spinlock_t lock; - void __iomem *reg_base; - unsigned int irq_base; - unsigned int irq_cnt; - u32 mask_cache; - u32 type_cache; - u32 polarity_cache; - u32 wake_enabled; - u32 wake_active; - unsigned int num_ct; - void *private; - struct list_head list; - struct irq_chip_type chip_types[0]; -}; - -/** - * enum irq_gc_flags - Initialization flags for generic irq chips - * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg - * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for - * irq chips which need to call irq_set_wake() on - * the parent irq. Usually GPIO implementations - */ -enum irq_gc_flags { - IRQ_GC_INIT_MASK_CACHE = 1 << 0, - IRQ_GC_INIT_NESTED_LOCK = 1 << 1, -}; - -/* Generic chip callback functions */ -void irq_gc_noop(struct irq_data *d); -void irq_gc_mask_disable_reg(struct irq_data *d); -void irq_gc_mask_set_bit(struct irq_data *d); -void irq_gc_mask_clr_bit(struct irq_data *d); -void irq_gc_unmask_enable_reg(struct irq_data *d); -void irq_gc_ack(struct irq_data *d); -void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); -void irq_gc_eoi(struct irq_data *d); -int irq_gc_set_wake(struct irq_data *d, unsigned int on); - -/* Setup functions for irq_chip_generic */ -struct irq_chip_generic * -irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, - void __iomem *reg_base, irq_flow_handler_t handler); -void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, - enum irq_gc_flags flags, unsigned int clr, - unsigned int set); -int irq_setup_alt_chip(struct irq_data *d, unsigned int type); -void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, - unsigned int clr, unsigned int set); - -static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) -{ - return container_of(d->chip, struct irq_chip_type, chip); -} - -#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) - -#ifdef CONFIG_SMP -static inline void irq_gc_lock(struct irq_chip_generic *gc) -{ - raw_spin_lock(&gc->lock); -} - -static inline void irq_gc_unlock(struct irq_chip_generic *gc) -{ - raw_spin_unlock(&gc->lock); -} -#else -static inline void irq_gc_lock(struct irq_chip_generic *gc) { } -static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } -#endif - #endif /* CONFIG_GENERIC_HARDIRQS */ #endif /* !CONFIG_S390 */ diff --git a/trunk/include/linux/irqdesc.h b/trunk/include/linux/irqdesc.h index 2d921b35212c..a082905b5ebe 100644 --- a/trunk/include/linux/irqdesc.h +++ b/trunk/include/linux/irqdesc.h @@ -16,18 +16,16 @@ struct timer_rand_state; * @irq_data: per irq and chip data passed down to chip functions * @timer_rand_state: pointer to timer rand state struct * @kstat_irqs: irq stats per cpu - * @handle_irq: highlevel irq-events handler - * @preflow_handler: handler called before the flow handler (currently used by sparc) + * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] * @action: the irq action chain * @status: status information * @core_internal_state__do_not_mess_with_it: core internal status information * @depth: disable-depth, for nested irq_disable() calls - * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers + * @wake_depth: enable depth, for multiple set_irq_wake() callers * @irq_count: stats field to detect stalled irqs * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts * @lock: locking for SMP - * @affinity_hint: hint to user space for preferred irq affinity * @affinity_notify: context for notification of affinity changes * @pending_mask: pending rebalanced interrupts * @threads_oneshot: bitfield to handle shared oneshot threads @@ -111,7 +109,10 @@ static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *de desc->handle_irq(irq, desc); } -int generic_handle_irq(unsigned int irq); +static inline void generic_handle_irq(unsigned int irq) +{ + generic_handle_irq_desc(irq, irq_to_desc(irq)); +} /* Test to see if a driver has successfully requested an irq */ static inline int irq_has_action(unsigned int irq) diff --git a/trunk/include/linux/jump_label.h b/trunk/include/linux/jump_label.h index 83e745f3ead7..7880f18e4b86 100644 --- a/trunk/include/linux/jump_label.h +++ b/trunk/include/linux/jump_label.h @@ -1,43 +1,20 @@ #ifndef _LINUX_JUMP_LABEL_H #define _LINUX_JUMP_LABEL_H -#include -#include - #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) - -struct jump_label_key { - atomic_t enabled; - struct jump_entry *entries; -#ifdef CONFIG_MODULES - struct jump_label_mod *next; -#endif -}; - # include # define HAVE_JUMP_LABEL #endif enum jump_label_type { - JUMP_LABEL_DISABLE = 0, JUMP_LABEL_ENABLE, + JUMP_LABEL_DISABLE }; struct module; #ifdef HAVE_JUMP_LABEL -#ifdef CONFIG_MODULES -#define JUMP_LABEL_INIT {{ 0 }, NULL, NULL} -#else -#define JUMP_LABEL_INIT {{ 0 }, NULL} -#endif - -static __always_inline bool static_branch(struct jump_label_key *key) -{ - return arch_static_branch(key); -} - extern struct jump_entry __start___jump_table[]; extern struct jump_entry __stop___jump_table[]; @@ -46,37 +23,37 @@ extern void jump_label_unlock(void); extern void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type); extern void arch_jump_label_text_poke_early(jump_label_t addr); -extern int jump_label_text_reserved(void *start, void *end); -extern void jump_label_inc(struct jump_label_key *key); -extern void jump_label_dec(struct jump_label_key *key); -extern bool jump_label_enabled(struct jump_label_key *key); +extern void jump_label_update(unsigned long key, enum jump_label_type type); extern void jump_label_apply_nops(struct module *mod); +extern int jump_label_text_reserved(void *start, void *end); -#else +#define jump_label_enable(key) \ + jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); -#include +#define jump_label_disable(key) \ + jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); -#define JUMP_LABEL_INIT {ATOMIC_INIT(0)} +#else -struct jump_label_key { - atomic_t enabled; -}; +#define JUMP_LABEL(key, label) \ +do { \ + if (unlikely(*key)) \ + goto label; \ +} while (0) -static __always_inline bool static_branch(struct jump_label_key *key) -{ - if (unlikely(atomic_read(&key->enabled))) - return true; - return false; -} +#define jump_label_enable(cond_var) \ +do { \ + *(cond_var) = 1; \ +} while (0) -static inline void jump_label_inc(struct jump_label_key *key) -{ - atomic_inc(&key->enabled); -} +#define jump_label_disable(cond_var) \ +do { \ + *(cond_var) = 0; \ +} while (0) -static inline void jump_label_dec(struct jump_label_key *key) +static inline int jump_label_apply_nops(struct module *mod) { - atomic_dec(&key->enabled); + return 0; } static inline int jump_label_text_reserved(void *start, void *end) @@ -87,16 +64,16 @@ static inline int jump_label_text_reserved(void *start, void *end) static inline void jump_label_lock(void) {} static inline void jump_label_unlock(void) {} -static inline bool jump_label_enabled(struct jump_label_key *key) -{ - return !!atomic_read(&key->enabled); -} - -static inline int jump_label_apply_nops(struct module *mod) -{ - return 0; -} - #endif +#define COND_STMT(key, stmt) \ +do { \ + __label__ jl_enabled; \ + JUMP_LABEL(key, jl_enabled); \ + if (0) { \ +jl_enabled: \ + stmt; \ + } \ +} while (0) + #endif diff --git a/trunk/include/linux/jump_label_ref.h b/trunk/include/linux/jump_label_ref.h new file mode 100644 index 000000000000..e5d012ad92c6 --- /dev/null +++ b/trunk/include/linux/jump_label_ref.h @@ -0,0 +1,44 @@ +#ifndef _LINUX_JUMP_LABEL_REF_H +#define _LINUX_JUMP_LABEL_REF_H + +#include +#include + +#ifdef HAVE_JUMP_LABEL + +static inline void jump_label_inc(atomic_t *key) +{ + if (atomic_add_return(1, key) == 1) + jump_label_enable(key); +} + +static inline void jump_label_dec(atomic_t *key) +{ + if (atomic_dec_and_test(key)) + jump_label_disable(key); +} + +#else /* !HAVE_JUMP_LABEL */ + +static inline void jump_label_inc(atomic_t *key) +{ + atomic_inc(key); +} + +static inline void jump_label_dec(atomic_t *key) +{ + atomic_dec(key); +} + +#undef JUMP_LABEL +#define JUMP_LABEL(key, label) \ +do { \ + if (unlikely(__builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(key), atomic_t *), \ + atomic_read((atomic_t *)(key)), *(key)))) \ + goto label; \ +} while (0) + +#endif /* HAVE_JUMP_LABEL */ + +#endif /* _LINUX_JUMP_LABEL_REF_H */ diff --git a/trunk/include/linux/kernel.h b/trunk/include/linux/kernel.h index f37ba716ef8b..00cec4dc0ae2 100644 --- a/trunk/include/linux/kernel.h +++ b/trunk/include/linux/kernel.h @@ -283,7 +283,6 @@ extern char *get_options(const char *str, int nints, int *ints); extern unsigned long long memparse(const char *ptr, char **retptr); extern int core_kernel_text(unsigned long addr); -extern int core_kernel_data(unsigned long addr); extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); extern int func_ptr_is_kernel_text(void *ptr); diff --git a/trunk/include/linux/kmod.h b/trunk/include/linux/kmod.h index 310231823852..6efd7a78de6a 100644 --- a/trunk/include/linux/kmod.h +++ b/trunk/include/linux/kmod.h @@ -113,6 +113,5 @@ extern void usermodehelper_init(void); extern int usermodehelper_disable(void); extern void usermodehelper_enable(void); -extern bool usermodehelper_is_disabled(void); #endif /* __LINUX_KMOD_H__ */ diff --git a/trunk/include/linux/list.h b/trunk/include/linux/list.h index cc6d2aa6b415..3a54266a1e85 100644 --- a/trunk/include/linux/list.h +++ b/trunk/include/linux/list.h @@ -4,7 +4,7 @@ #include #include #include -#include +#include /* * Simple doubly linked list implementation. @@ -367,15 +367,18 @@ static inline void list_splice_tail_init(struct list_head *list, * @head: the head for your list. */ #define list_for_each(pos, head) \ - for (pos = (head)->next; pos != (head); pos = pos->next) + for (pos = (head)->next; prefetch(pos->next), pos != (head); \ + pos = pos->next) /** * __list_for_each - iterate over a list * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. * - * This variant doesn't differ from list_for_each() any more. - * We don't do prefetching in either case. + * This variant differs from list_for_each() in that it's the + * simplest possible list iteration code, no prefetching is done. + * Use this for code that knows the list to be very short (empty + * or 1 entry) most of the time. */ #define __list_for_each(pos, head) \ for (pos = (head)->next; pos != (head); pos = pos->next) @@ -386,7 +389,8 @@ static inline void list_splice_tail_init(struct list_head *list, * @head: the head for your list. */ #define list_for_each_prev(pos, head) \ - for (pos = (head)->prev; pos != (head); pos = pos->prev) + for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ + pos = pos->prev) /** * list_for_each_safe - iterate over a list safe against removal of list entry @@ -406,7 +410,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_prev_safe(pos, n, head) \ for (pos = (head)->prev, n = pos->prev; \ - pos != (head); \ + prefetch(pos->prev), pos != (head); \ pos = n, n = pos->prev) /** @@ -417,7 +421,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry(pos, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member); \ - &pos->member != (head); \ + prefetch(pos->member.next), &pos->member != (head); \ pos = list_entry(pos->member.next, typeof(*pos), member)) /** @@ -428,7 +432,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_entry((head)->prev, typeof(*pos), member); \ - &pos->member != (head); \ + prefetch(pos->member.prev), &pos->member != (head); \ pos = list_entry(pos->member.prev, typeof(*pos), member)) /** @@ -453,7 +457,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_continue(pos, head, member) \ for (pos = list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ + prefetch(pos->member.next), &pos->member != (head); \ pos = list_entry(pos->member.next, typeof(*pos), member)) /** @@ -467,7 +471,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_continue_reverse(pos, head, member) \ for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ - &pos->member != (head); \ + prefetch(pos->member.prev), &pos->member != (head); \ pos = list_entry(pos->member.prev, typeof(*pos), member)) /** @@ -479,7 +483,7 @@ static inline void list_splice_tail_init(struct list_head *list, * Iterate over list of given type, continuing from current position. */ #define list_for_each_entry_from(pos, head, member) \ - for (; &pos->member != (head); \ + for (; prefetch(pos->member.next), &pos->member != (head); \ pos = list_entry(pos->member.next, typeof(*pos), member)) /** @@ -660,7 +664,8 @@ static inline void hlist_move_list(struct hlist_head *old, #define hlist_entry(ptr, type, member) container_of(ptr,type,member) #define hlist_for_each(pos, head) \ - for (pos = (head)->first; pos ; pos = pos->next) + for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ + pos = pos->next) #define hlist_for_each_safe(pos, n, head) \ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ @@ -675,7 +680,7 @@ static inline void hlist_move_list(struct hlist_head *old, */ #define hlist_for_each_entry(tpos, pos, head, member) \ for (pos = (head)->first; \ - pos && \ + pos && ({ prefetch(pos->next); 1;}) && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = pos->next) @@ -687,7 +692,7 @@ static inline void hlist_move_list(struct hlist_head *old, */ #define hlist_for_each_entry_continue(tpos, pos, member) \ for (pos = (pos)->next; \ - pos && \ + pos && ({ prefetch(pos->next); 1;}) && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = pos->next) @@ -698,7 +703,7 @@ static inline void hlist_move_list(struct hlist_head *old, * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_from(tpos, pos, member) \ - for (; pos && \ + for (; pos && ({ prefetch(pos->next); 1;}) && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ pos = pos->next) diff --git a/trunk/include/linux/mfd/wm831x/pdata.h b/trunk/include/linux/mfd/wm831x/pdata.h index 632d1567a1b6..afe4db49402d 100644 --- a/trunk/include/linux/mfd/wm831x/pdata.h +++ b/trunk/include/linux/mfd/wm831x/pdata.h @@ -81,9 +81,7 @@ struct wm831x_touch_pdata { int rpu; /** Pen down sensitivity resistor divider */ int pressure; /** Report pressure (boolean) */ unsigned int data_irq; /** Touch data ready IRQ */ - int data_irqf; /** IRQ flags for data ready IRQ */ unsigned int pd_irq; /** Touch pendown detect IRQ */ - int pd_irqf; /** IRQ flags for pen down IRQ */ }; enum wm831x_watchdog_action { diff --git a/trunk/include/linux/mm.h b/trunk/include/linux/mm.h index 6507dde38b16..692dbae6ffa7 100644 --- a/trunk/include/linux/mm.h +++ b/trunk/include/linux/mm.h @@ -137,8 +137,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ) /* - * Special vmas that are non-mergable, non-mlock()able. - * Note: mm/huge_memory.c VM_NO_THP depends on this definition. + * special vmas that are non-mergable, non-mlock()able */ #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) @@ -1011,33 +1010,11 @@ int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); /* Is the vma a continuation of the stack vma above it? */ -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) +static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) { return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); } -static inline int stack_guard_page_start(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSDOWN) && - (vma->vm_start == addr) && - !vma_growsdown(vma->vm_prev, addr); -} - -/* Is the vma a continuation of the stack vma below it? */ -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); -} - -static inline int stack_guard_page_end(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSUP) && - (vma->vm_end == addr) && - !vma_growsup(vma->vm_next, addr); -} - extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len); diff --git a/trunk/include/linux/module.h b/trunk/include/linux/module.h index d9ca2d5dc6d0..5de42043dff0 100644 --- a/trunk/include/linux/module.h +++ b/trunk/include/linux/module.h @@ -64,9 +64,6 @@ struct module_version_attribute { const char *version; } __attribute__ ((__aligned__(sizeof(void *)))); -extern ssize_t __modver_version_show(struct module_attribute *, - struct module *, char *); - struct module_kobject { struct kobject kobj; @@ -175,7 +172,12 @@ extern struct module __this_module; #define MODULE_VERSION(_version) MODULE_INFO(version, _version) #else #define MODULE_VERSION(_version) \ - static struct module_version_attribute ___modver_attr = { \ + extern ssize_t __modver_version_show(struct module_attribute *, \ + struct module *, char *); \ + static struct module_version_attribute __modver_version_attr \ + __used \ + __attribute__ ((__section__ ("__modver"),aligned(sizeof(void *)))) \ + = { \ .mattr = { \ .attr = { \ .name = "version", \ @@ -185,10 +187,7 @@ extern struct module __this_module; }, \ .module_name = KBUILD_MODNAME, \ .version = _version, \ - }; \ - static const struct module_version_attribute \ - __used __attribute__ ((__section__ ("__modver"))) \ - * __moduleparam_const __modver_attr = &___modver_attr + } #endif /* Optional firmware file (or files) needed by the module @@ -224,7 +223,7 @@ struct module_use { extern void *__crc_##sym __attribute__((weak)); \ static const unsigned long __kcrctab_##sym \ __used \ - __attribute__((section("___kcrctab" sec "+" #sym), unused)) \ + __attribute__((section("__kcrctab" sec), unused)) \ = (unsigned long) &__crc_##sym; #else #define __CRC_SYMBOL(sym, sec) @@ -239,7 +238,7 @@ struct module_use { = MODULE_SYMBOL_PREFIX #sym; \ static const struct kernel_symbol __ksymtab_##sym \ __used \ - __attribute__((section("___ksymtab" sec "+" #sym), unused)) \ + __attribute__((section("__ksymtab" sec), unused)) \ = { (unsigned long)&sym, __kstrtab_##sym } #define EXPORT_SYMBOL(sym) \ @@ -368,35 +367,34 @@ struct module struct module_notes_attrs *notes_attrs; #endif - /* The command line arguments (may be mangled). People like - keeping pointers to this stuff */ - char *args; - #ifdef CONFIG_SMP /* Per-cpu data. */ void __percpu *percpu; unsigned int percpu_size; #endif + /* The command line arguments (may be mangled). People like + keeping pointers to this stuff */ + char *args; #ifdef CONFIG_TRACEPOINTS - unsigned int num_tracepoints; struct tracepoint * const *tracepoints_ptrs; + unsigned int num_tracepoints; #endif #ifdef HAVE_JUMP_LABEL struct jump_entry *jump_entries; unsigned int num_jump_entries; #endif #ifdef CONFIG_TRACING - unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; + unsigned int num_trace_bprintk_fmt; #endif #ifdef CONFIG_EVENT_TRACING struct ftrace_event_call **trace_events; unsigned int num_trace_events; #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD - unsigned int num_ftrace_callsites; unsigned long *ftrace_callsites; + unsigned int num_ftrace_callsites; #endif #ifdef CONFIG_MODULE_UNLOAD @@ -477,9 +475,8 @@ const struct kernel_symbol *find_symbol(const char *name, bool warn); /* Walk the exported symbol table */ -bool each_symbol_section(bool (*fn)(const struct symsearch *arr, - struct module *owner, - void *data), void *data); +bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, + unsigned int symnum, void *data), void *data); /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if symnum out of range. */ diff --git a/trunk/include/linux/moduleparam.h b/trunk/include/linux/moduleparam.h index ddaae98c53f9..07b41951e3fa 100644 --- a/trunk/include/linux/moduleparam.h +++ b/trunk/include/linux/moduleparam.h @@ -67,9 +67,9 @@ struct kparam_string { struct kparam_array { unsigned int max; - unsigned int elemsize; unsigned int *num; const struct kernel_param_ops *ops; + unsigned int elemsize; void *elem; }; @@ -371,9 +371,8 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp); */ #define module_param_array_named(name, array, type, nump, perm) \ static const struct kparam_array __param_arr_##name \ - = { .max = ARRAY_SIZE(array), .num = nump, \ - .ops = ¶m_ops_##type, \ - .elemsize = sizeof(array[0]), .elem = array }; \ + = { ARRAY_SIZE(array), nump, ¶m_ops_##type, \ + sizeof(array[0]), array }; \ __module_param_call(MODULE_PARAM_PREFIX, name, \ ¶m_array_ops, \ .arr = &__param_arr_##name, \ diff --git a/trunk/include/linux/mutex.h b/trunk/include/linux/mutex.h index c75471db576e..94b48bd40dd7 100644 --- a/trunk/include/linux/mutex.h +++ b/trunk/include/linux/mutex.h @@ -51,7 +51,7 @@ struct mutex { spinlock_t wait_lock; struct list_head wait_list; #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) - struct task_struct *owner; + struct thread_info *owner; #endif #ifdef CONFIG_DEBUG_MUTEXES const char *name; diff --git a/trunk/include/linux/nfs_fs_sb.h b/trunk/include/linux/nfs_fs_sb.h index 87694ca86914..216cea5db0aa 100644 --- a/trunk/include/linux/nfs_fs_sb.h +++ b/trunk/include/linux/nfs_fs_sb.h @@ -47,7 +47,6 @@ struct nfs_client { #ifdef CONFIG_NFS_V4 u64 cl_clientid; /* constant */ - nfs4_verifier cl_confirm; /* Clientid verifier */ unsigned long cl_state; spinlock_t cl_lock; diff --git a/trunk/include/linux/nfs_xdr.h b/trunk/include/linux/nfs_xdr.h index 7e371f7df9c4..78b101e487ea 100644 --- a/trunk/include/linux/nfs_xdr.h +++ b/trunk/include/linux/nfs_xdr.h @@ -50,7 +50,6 @@ struct nfs_fattr { } du; struct nfs_fsid fsid; __u64 fileid; - __u64 mounted_on_fileid; struct timespec atime; struct timespec mtime; struct timespec ctime; @@ -84,7 +83,6 @@ struct nfs_fattr { #define NFS_ATTR_FATTR_PRECHANGE (1U << 18) #define NFS_ATTR_FATTR_V4_REFERRAL (1U << 19) /* NFSv4 referral */ #define NFS_ATTR_FATTR_MOUNTPOINT (1U << 20) /* Treat as mountpoint */ -#define NFS_ATTR_FATTR_MOUNTED_ON_FILEID (1U << 21) #define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \ | NFS_ATTR_FATTR_MODE \ @@ -233,7 +231,6 @@ struct nfs4_layoutget { struct nfs4_layoutget_args args; struct nfs4_layoutget_res res; struct pnfs_layout_segment **lsegpp; - gfp_t gfp_flags; }; struct nfs4_getdeviceinfo_args { diff --git a/trunk/include/linux/of_device.h b/trunk/include/linux/of_device.h index ae5638480ef2..8bfe6c1d4365 100644 --- a/trunk/include/linux/of_device.h +++ b/trunk/include/linux/of_device.h @@ -21,7 +21,8 @@ extern void of_device_make_bus_id(struct device *dev); static inline int of_driver_match_device(struct device *dev, const struct device_driver *drv) { - return of_match_device(drv->of_match_table, dev) != NULL; + dev->of_match = of_match_device(drv->of_match_table, dev); + return dev->of_match != NULL; } extern struct platform_device *of_dev_get(struct platform_device *dev); @@ -57,11 +58,6 @@ static inline int of_device_uevent(struct device *dev, static inline void of_device_node_put(struct device *dev) { } -static inline const struct of_device_id *of_match_device( - const struct of_device_id *matches, const struct device *dev) -{ - return NULL; -} #endif /* CONFIG_OF_DEVICE */ #endif /* _LINUX_OF_DEVICE_H */ diff --git a/trunk/include/linux/pci-ats.h b/trunk/include/linux/pci-ats.h deleted file mode 100644 index 655824fa4c76..000000000000 --- a/trunk/include/linux/pci-ats.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef LINUX_PCI_ATS_H -#define LINUX_PCI_ATS_H - -/* Address Translation Service */ -struct pci_ats { - int pos; /* capability position */ - int stu; /* Smallest Translation Unit */ - int qdep; /* Invalidate Queue Depth */ - int ref_cnt; /* Physical Function reference count */ - unsigned int is_enabled:1; /* Enable bit is set */ -}; - -#ifdef CONFIG_PCI_IOV - -extern int pci_enable_ats(struct pci_dev *dev, int ps); -extern void pci_disable_ats(struct pci_dev *dev); -extern int pci_ats_queue_depth(struct pci_dev *dev); -/** - * pci_ats_enabled - query the ATS status - * @dev: the PCI device - * - * Returns 1 if ATS capability is enabled, or 0 if not. - */ -static inline int pci_ats_enabled(struct pci_dev *dev) -{ - return dev->ats && dev->ats->is_enabled; -} - -#else /* CONFIG_PCI_IOV */ - -static inline int pci_enable_ats(struct pci_dev *dev, int ps) -{ - return -ENODEV; -} - -static inline void pci_disable_ats(struct pci_dev *dev) -{ -} - -static inline int pci_ats_queue_depth(struct pci_dev *dev) -{ - return -ENODEV; -} - -static inline int pci_ats_enabled(struct pci_dev *dev) -{ - return 0; -} - -#endif /* CONFIG_PCI_IOV */ - -#endif /* LINUX_PCI_ATS_H*/ diff --git a/trunk/include/linux/pci_ids.h b/trunk/include/linux/pci_ids.h index 8abe8d78c4bf..4e2c9150a785 100644 --- a/trunk/include/linux/pci_ids.h +++ b/trunk/include/linux/pci_ids.h @@ -2477,12 +2477,15 @@ #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 +#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f +#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 #define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40 #define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41 #define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310 #define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f +#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411 #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413 @@ -2693,6 +2696,7 @@ #define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f +#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f #define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 #define PCI_DEVICE_ID_INTEL_5100_21 0x65f5 diff --git a/trunk/include/linux/percpu.h b/trunk/include/linux/percpu.h index 8b97308e65df..3a5c4449fd36 100644 --- a/trunk/include/linux/percpu.h +++ b/trunk/include/linux/percpu.h @@ -948,7 +948,7 @@ do { \ irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) # endif # define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - __pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) + __pcpu_double_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) #endif #endif /* __LINUX_PERCPU_H */ diff --git a/trunk/include/linux/perf_event.h b/trunk/include/linux/perf_event.h index 3412684ce5d5..ee9f1e782800 100644 --- a/trunk/include/linux/perf_event.h +++ b/trunk/include/linux/perf_event.h @@ -2,8 +2,8 @@ * Performance events: * * Copyright (C) 2008-2009, Thomas Gleixner - * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar - * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra * * Data type definitions, declarations, prototypes. * @@ -52,8 +52,6 @@ enum perf_hw_id { PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, PERF_COUNT_HW_BRANCH_MISSES = 5, PERF_COUNT_HW_BUS_CYCLES = 6, - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, PERF_COUNT_HW_MAX, /* non-ABI */ }; @@ -470,9 +468,9 @@ enum perf_callchain_context { PERF_CONTEXT_MAX = (__u64)-4095, }; -#define PERF_FLAG_FD_NO_GROUP (1U << 0) -#define PERF_FLAG_FD_OUTPUT (1U << 1) -#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ +#define PERF_FLAG_FD_NO_GROUP (1U << 0) +#define PERF_FLAG_FD_OUTPUT (1U << 1) +#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ #ifdef __KERNEL__ /* @@ -486,9 +484,9 @@ enum perf_callchain_context { #endif struct perf_guest_info_callbacks { - int (*is_in_guest)(void); - int (*is_user_mode)(void); - unsigned long (*get_guest_ip)(void); + int (*is_in_guest) (void); + int (*is_user_mode) (void); + unsigned long (*get_guest_ip) (void); }; #ifdef CONFIG_HAVE_HW_BREAKPOINT @@ -507,7 +505,7 @@ struct perf_guest_info_callbacks { #include #include #include -#include +#include #include #include @@ -654,19 +652,19 @@ struct pmu { * Start the transaction, after this ->add() doesn't need to * do schedulability tests. */ - void (*start_txn) (struct pmu *pmu); /* optional */ + void (*start_txn) (struct pmu *pmu); /* optional */ /* * If ->start_txn() disabled the ->add() schedulability test * then ->commit_txn() is required to perform one. On success * the transaction is closed. On error the transaction is kept * open until ->cancel_txn() is called. */ - int (*commit_txn) (struct pmu *pmu); /* optional */ + int (*commit_txn) (struct pmu *pmu); /* optional */ /* * Will cancel the transaction, assumes ->del() is called * for each successful ->add() during the transaction. */ - void (*cancel_txn) (struct pmu *pmu); /* optional */ + void (*cancel_txn) (struct pmu *pmu); /* optional */ }; /** @@ -714,15 +712,15 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int, struct pt_regs *regs); enum perf_group_flag { - PERF_GROUP_SOFTWARE = 0x1, + PERF_GROUP_SOFTWARE = 0x1, }; -#define SWEVENT_HLIST_BITS 8 -#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) +#define SWEVENT_HLIST_BITS 8 +#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) struct swevent_hlist { - struct hlist_head heads[SWEVENT_HLIST_SIZE]; - struct rcu_head rcu_head; + struct hlist_head heads[SWEVENT_HLIST_SIZE]; + struct rcu_head rcu_head; }; #define PERF_ATTACH_CONTEXT 0x01 @@ -735,13 +733,13 @@ struct swevent_hlist { * This is a per-cpu dynamically allocated data structure. */ struct perf_cgroup_info { - u64 time; - u64 timestamp; + u64 time; + u64 timestamp; }; struct perf_cgroup { - struct cgroup_subsys_state css; - struct perf_cgroup_info *info; /* timing info, one per cpu */ + struct cgroup_subsys_state css; + struct perf_cgroup_info *info; /* timing info, one per cpu */ }; #endif @@ -925,7 +923,7 @@ struct perf_event_context { /* * Number of contexts where an event can trigger: - * task, softirq, hardirq, nmi. + * task, softirq, hardirq, nmi. */ #define PERF_NR_CONTEXTS 4 @@ -1003,7 +1001,8 @@ struct perf_sample_data { struct perf_raw_record *raw; }; -static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) +static inline +void perf_sample_data_init(struct perf_sample_data *data, u64 addr) { data->addr = addr; data->raw = NULL; @@ -1035,12 +1034,13 @@ static inline int is_software_event(struct perf_event *event) return event->pmu->task_ctx_nr == perf_sw_context; } -extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; +extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); #ifndef perf_arch_fetch_caller_regs -static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } +static inline void +perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } #endif /* @@ -1063,24 +1063,26 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { struct pt_regs hot_regs; - if (static_branch(&perf_swevent_enabled[event_id])) { - if (!regs) { - perf_fetch_caller_regs(&hot_regs); - regs = &hot_regs; - } - __perf_sw_event(event_id, nr, nmi, regs, addr); + JUMP_LABEL(&perf_swevent_enabled[event_id], have_event); + return; + +have_event: + if (!regs) { + perf_fetch_caller_regs(&hot_regs); + regs = &hot_regs; } + __perf_sw_event(event_id, nr, nmi, regs, addr); } -extern struct jump_label_key perf_sched_events; +extern atomic_t perf_sched_events; static inline void perf_event_task_sched_in(struct task_struct *task) { - if (static_branch(&perf_sched_events)) - __perf_event_task_sched_in(task); + COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task)); } -static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) +static inline +void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) { perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); @@ -1098,10 +1100,14 @@ extern void perf_event_fork(struct task_struct *tsk); /* Callchains */ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); -extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); -extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); +extern void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs); +extern void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs); + -static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) +static inline void +perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) { if (entry->nr < PERF_MAX_STACK_DEPTH) entry->ip[entry->nr++] = ip; @@ -1137,9 +1143,9 @@ extern void perf_tp_event(u64 addr, u64 count, void *record, extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags -# define perf_misc_flags(regs) \ - (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) -# define perf_instruction_pointer(regs) instruction_pointer(regs) +#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ + PERF_RECORD_MISC_KERNEL) +#define perf_instruction_pointer(regs) instruction_pointer(regs) #endif extern int perf_output_begin(struct perf_output_handle *handle, @@ -1174,9 +1180,9 @@ static inline void perf_bp_event(struct perf_event *event, void *data) { } static inline int perf_register_guest_info_callbacks -(struct perf_guest_info_callbacks *callbacks) { return 0; } +(struct perf_guest_info_callbacks *callbacks) { return 0; } static inline int perf_unregister_guest_info_callbacks -(struct perf_guest_info_callbacks *callbacks) { return 0; } +(struct perf_guest_info_callbacks *callbacks) { return 0; } static inline void perf_event_mmap(struct vm_area_struct *vma) { } static inline void perf_event_comm(struct task_struct *tsk) { } @@ -1189,22 +1195,23 @@ static inline void perf_event_disable(struct perf_event *event) { } static inline void perf_event_task_tick(void) { } #endif -#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) +#define perf_output_put(handle, x) \ + perf_output_copy((handle), &(x), sizeof(x)) /* * This has to have a higher priority than migration_notifier in sched.c. */ -#define perf_cpu_notifier(fn) \ -do { \ - static struct notifier_block fn##_nb __cpuinitdata = \ - { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ - fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ - (void *)(unsigned long)smp_processor_id()); \ - fn(&fn##_nb, (unsigned long)CPU_STARTING, \ - (void *)(unsigned long)smp_processor_id()); \ - fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ - (void *)(unsigned long)smp_processor_id()); \ - register_cpu_notifier(&fn##_nb); \ +#define perf_cpu_notifier(fn) \ +do { \ + static struct notifier_block fn##_nb __cpuinitdata = \ + { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ + fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ + (void *)(unsigned long)smp_processor_id()); \ + fn(&fn##_nb, (unsigned long)CPU_STARTING, \ + (void *)(unsigned long)smp_processor_id()); \ + fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ + (void *)(unsigned long)smp_processor_id()); \ + register_cpu_notifier(&fn##_nb); \ } while (0) #endif /* __KERNEL__ */ diff --git a/trunk/include/linux/platform_device.h b/trunk/include/linux/platform_device.h index ede1a80e3358..744942c95fec 100644 --- a/trunk/include/linux/platform_device.h +++ b/trunk/include/linux/platform_device.h @@ -150,6 +150,9 @@ extern struct platform_device *platform_create_bundle(struct platform_driver *dr struct resource *res, unsigned int n_res, const void *data, size_t size); +extern const struct dev_pm_ops * platform_bus_get_pm_ops(void); +extern void platform_bus_set_pm_ops(const struct dev_pm_ops *pm); + /* early platform driver interface */ struct early_platform_driver { const char *class_str; @@ -202,64 +205,4 @@ static inline char *early_platform_driver_setup_func(void) \ } #endif /* MODULE */ -#ifdef CONFIG_PM_SLEEP -extern int platform_pm_prepare(struct device *dev); -extern void platform_pm_complete(struct device *dev); -#else -#define platform_pm_prepare NULL -#define platform_pm_complete NULL -#endif - -#ifdef CONFIG_SUSPEND -extern int platform_pm_suspend(struct device *dev); -extern int platform_pm_suspend_noirq(struct device *dev); -extern int platform_pm_resume(struct device *dev); -extern int platform_pm_resume_noirq(struct device *dev); -#else -#define platform_pm_suspend NULL -#define platform_pm_resume NULL -#define platform_pm_suspend_noirq NULL -#define platform_pm_resume_noirq NULL -#endif - -#ifdef CONFIG_HIBERNATE_CALLBACKS -extern int platform_pm_freeze(struct device *dev); -extern int platform_pm_freeze_noirq(struct device *dev); -extern int platform_pm_thaw(struct device *dev); -extern int platform_pm_thaw_noirq(struct device *dev); -extern int platform_pm_poweroff(struct device *dev); -extern int platform_pm_poweroff_noirq(struct device *dev); -extern int platform_pm_restore(struct device *dev); -extern int platform_pm_restore_noirq(struct device *dev); -#else -#define platform_pm_freeze NULL -#define platform_pm_thaw NULL -#define platform_pm_poweroff NULL -#define platform_pm_restore NULL -#define platform_pm_freeze_noirq NULL -#define platform_pm_thaw_noirq NULL -#define platform_pm_poweroff_noirq NULL -#define platform_pm_restore_noirq NULL -#endif - -#ifdef CONFIG_PM_SLEEP -#define USE_PLATFORM_PM_SLEEP_OPS \ - .prepare = platform_pm_prepare, \ - .complete = platform_pm_complete, \ - .suspend = platform_pm_suspend, \ - .resume = platform_pm_resume, \ - .freeze = platform_pm_freeze, \ - .thaw = platform_pm_thaw, \ - .poweroff = platform_pm_poweroff, \ - .restore = platform_pm_restore, \ - .suspend_noirq = platform_pm_suspend_noirq, \ - .resume_noirq = platform_pm_resume_noirq, \ - .freeze_noirq = platform_pm_freeze_noirq, \ - .thaw_noirq = platform_pm_thaw_noirq, \ - .poweroff_noirq = platform_pm_poweroff_noirq, \ - .restore_noirq = platform_pm_restore_noirq, -#else -#define USE_PLATFORM_PM_SLEEP_OPS -#endif - #endif /* _PLATFORM_DEVICE_H_ */ diff --git a/trunk/include/linux/pm.h b/trunk/include/linux/pm.h index 3160648ccdda..512e09177e57 100644 --- a/trunk/include/linux/pm.h +++ b/trunk/include/linux/pm.h @@ -460,7 +460,6 @@ struct dev_pm_info { unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; - void *subsys_data; /* Owned by the subsystem. */ #endif }; @@ -530,17 +529,21 @@ struct dev_power_domain { */ #ifdef CONFIG_PM_SLEEP +#ifndef CONFIG_ARCH_NO_SYSDEV_OPS +extern int sysdev_suspend(pm_message_t state); +extern int sysdev_resume(void); +#else +static inline int sysdev_suspend(pm_message_t state) { return 0; } +static inline int sysdev_resume(void) { return 0; } +#endif + extern void device_pm_lock(void); extern void dpm_resume_noirq(pm_message_t state); extern void dpm_resume_end(pm_message_t state); -extern void dpm_resume(pm_message_t state); -extern void dpm_complete(pm_message_t state); extern void device_pm_unlock(void); extern int dpm_suspend_noirq(pm_message_t state); extern int dpm_suspend_start(pm_message_t state); -extern int dpm_suspend(pm_message_t state); -extern int dpm_prepare(pm_message_t state); extern void __suspend_report_result(const char *function, void *fn, int ret); @@ -550,16 +553,6 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); } while (0) extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); - -extern int pm_generic_prepare(struct device *dev); -extern int pm_generic_suspend(struct device *dev); -extern int pm_generic_resume(struct device *dev); -extern int pm_generic_freeze(struct device *dev); -extern int pm_generic_thaw(struct device *dev); -extern int pm_generic_restore(struct device *dev); -extern int pm_generic_poweroff(struct device *dev); -extern void pm_generic_complete(struct device *dev); - #else /* !CONFIG_PM_SLEEP */ #define device_pm_lock() do {} while (0) @@ -576,15 +569,6 @@ static inline int device_pm_wait_for_dev(struct device *a, struct device *b) { return 0; } - -#define pm_generic_prepare NULL -#define pm_generic_suspend NULL -#define pm_generic_resume NULL -#define pm_generic_freeze NULL -#define pm_generic_thaw NULL -#define pm_generic_restore NULL -#define pm_generic_poweroff NULL -#define pm_generic_complete NULL #endif /* !CONFIG_PM_SLEEP */ /* How to reorder dpm_list after device_move() */ @@ -595,4 +579,11 @@ enum dpm_order { DPM_ORDER_DEV_LAST, }; +extern int pm_generic_suspend(struct device *dev); +extern int pm_generic_resume(struct device *dev); +extern int pm_generic_freeze(struct device *dev); +extern int pm_generic_thaw(struct device *dev); +extern int pm_generic_restore(struct device *dev); +extern int pm_generic_poweroff(struct device *dev); + #endif /* _LINUX_PM_H */ diff --git a/trunk/include/linux/pm_runtime.h b/trunk/include/linux/pm_runtime.h index 878cf84baeb1..8de9aa6e7def 100644 --- a/trunk/include/linux/pm_runtime.h +++ b/trunk/include/linux/pm_runtime.h @@ -245,46 +245,4 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev) __pm_runtime_use_autosuspend(dev, false); } -struct pm_clk_notifier_block { - struct notifier_block nb; - struct dev_power_domain *pwr_domain; - char *con_ids[]; -}; - -#ifdef CONFIG_PM_RUNTIME_CLK -extern int pm_runtime_clk_init(struct device *dev); -extern void pm_runtime_clk_destroy(struct device *dev); -extern int pm_runtime_clk_add(struct device *dev, const char *con_id); -extern void pm_runtime_clk_remove(struct device *dev, const char *con_id); -extern int pm_runtime_clk_suspend(struct device *dev); -extern int pm_runtime_clk_resume(struct device *dev); -#else -static inline int pm_runtime_clk_init(struct device *dev) -{ - return -EINVAL; -} -static inline void pm_runtime_clk_destroy(struct device *dev) -{ -} -static inline int pm_runtime_clk_add(struct device *dev, const char *con_id) -{ - return -EINVAL; -} -static inline void pm_runtime_clk_remove(struct device *dev, const char *con_id) -{ -} -#define pm_runtime_clock_suspend NULL -#define pm_runtime_clock_resume NULL -#endif - -#ifdef CONFIG_HAVE_CLK -extern void pm_runtime_clk_add_notifier(struct bus_type *bus, - struct pm_clk_notifier_block *clknb); -#else -static inline void pm_runtime_clk_add_notifier(struct bus_type *bus, - struct pm_clk_notifier_block *clknb) -{ -} -#endif - #endif diff --git a/trunk/include/linux/proc_fs.h b/trunk/include/linux/proc_fs.h index eaf4350c0f90..838c1149251a 100644 --- a/trunk/include/linux/proc_fs.h +++ b/trunk/include/linux/proc_fs.h @@ -208,8 +208,6 @@ static inline struct proc_dir_entry *proc_symlink(const char *name, struct proc_dir_entry *parent,const char *dest) {return NULL;} static inline struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent) {return NULL;} -static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, - mode_t mode, struct proc_dir_entry *parent) { return NULL; } static inline struct proc_dir_entry *create_proc_read_entry(const char *name, mode_t mode, struct proc_dir_entry *base, diff --git a/trunk/include/linux/ptrace.h b/trunk/include/linux/ptrace.h index 9178d5cc0b01..a1147e5dd245 100644 --- a/trunk/include/linux/ptrace.h +++ b/trunk/include/linux/ptrace.h @@ -189,10 +189,6 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) child->ptrace = current->ptrace; __ptrace_link(child, current->parent); } - -#ifdef CONFIG_HAVE_HW_BREAKPOINT - atomic_set(&child->ptrace_bp_refcnt, 1); -#endif } /** @@ -354,13 +350,6 @@ extern int task_current_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc); -#ifdef CONFIG_HAVE_HW_BREAKPOINT -extern int ptrace_get_breakpoints(struct task_struct *tsk); -extern void ptrace_put_breakpoints(struct task_struct *tsk); -#else -static inline void ptrace_put_breakpoints(struct task_struct *tsk) { } -#endif /* CONFIG_HAVE_HW_BREAKPOINT */ - -#endif /* __KERNEL */ +#endif #endif diff --git a/trunk/include/linux/rculist.h b/trunk/include/linux/rculist.h index e3beb315517a..2dea94fc4402 100644 --- a/trunk/include/linux/rculist.h +++ b/trunk/include/linux/rculist.h @@ -253,7 +253,7 @@ static inline void list_splice_init_rcu(struct list_head *list, */ #define list_for_each_entry_rcu(pos, head, member) \ for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ - &pos->member != (head); \ + prefetch(pos->member.next), &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) @@ -270,7 +270,7 @@ static inline void list_splice_init_rcu(struct list_head *list, */ #define list_for_each_continue_rcu(pos, head) \ for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ - (pos) != (head); \ + prefetch((pos)->next), (pos) != (head); \ (pos) = rcu_dereference_raw(list_next_rcu(pos))) /** @@ -284,7 +284,7 @@ static inline void list_splice_init_rcu(struct list_head *list, */ #define list_for_each_entry_continue_rcu(pos, head, member) \ for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ + prefetch(pos->member.next), &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** @@ -427,7 +427,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, #define __hlist_for_each_rcu(pos, head) \ for (pos = rcu_dereference(hlist_first_rcu(head)); \ - pos; \ + pos && ({ prefetch(pos->next); 1; }); \ pos = rcu_dereference(hlist_next_rcu(pos))) /** @@ -443,7 +443,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, */ #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ - pos && \ + pos && ({ prefetch(pos->next); 1; }) && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_raw(hlist_next_rcu(pos))) @@ -460,7 +460,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, */ #define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ for (pos = rcu_dereference_bh((head)->first); \ - pos && \ + pos && ({ prefetch(pos->next); 1; }) && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_bh(pos->next)) @@ -472,7 +472,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, */ #define hlist_for_each_entry_continue_rcu(tpos, pos, member) \ for (pos = rcu_dereference((pos)->next); \ - pos && \ + pos && ({ prefetch(pos->next); 1; }) && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference(pos->next)) @@ -484,7 +484,7 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, */ #define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ for (pos = rcu_dereference_bh((pos)->next); \ - pos && \ + pos && ({ prefetch(pos->next); 1; }) && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_bh(pos->next)) diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 12211e1666e2..18d63cea2848 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -360,7 +360,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout); extern signed long schedule_timeout_killable(signed long timeout); extern signed long schedule_timeout_uninterruptible(signed long timeout); asmlinkage void schedule(void); -extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); +extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); struct nsproxy; struct user_namespace; @@ -731,6 +731,10 @@ struct sched_info { /* timestamps */ unsigned long long last_arrival,/* when we last ran on a cpu */ last_queued; /* when we were last queued to run */ +#ifdef CONFIG_SCHEDSTATS + /* BKL stats */ + unsigned int bkl_count; +#endif }; #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ @@ -864,7 +868,6 @@ static inline int sd_power_saving_flags(void) struct sched_group { struct sched_group *next; /* Must be a circular list */ - atomic_t ref; /* * CPU power of this group, SCHED_LOAD_SCALE being max power for a @@ -879,6 +882,9 @@ struct sched_group { * NOTE: this field is variable length. (Allocated dynamically * by attaching extra space to the end of the structure, * depending on how many CPUs the kernel has booted up with) + * + * It is also be embedded into static data structures at build + * time. (See 'struct static_sched_group' in kernel/sched.c) */ unsigned long cpumask[0]; }; @@ -888,6 +894,17 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) return to_cpumask(sg->cpumask); } +enum sched_domain_level { + SD_LV_NONE = 0, + SD_LV_SIBLING, + SD_LV_MC, + SD_LV_BOOK, + SD_LV_CPU, + SD_LV_NODE, + SD_LV_ALLNODES, + SD_LV_MAX +}; + struct sched_domain_attr { int relax_domain_level; }; @@ -896,8 +913,6 @@ struct sched_domain_attr { .relax_domain_level = -1, \ } -extern int sched_domain_level_max; - struct sched_domain { /* These fields must be setup */ struct sched_domain *parent; /* top domain must be null terminated */ @@ -915,7 +930,7 @@ struct sched_domain { unsigned int forkexec_idx; unsigned int smt_gain; int flags; /* See SD_* */ - int level; + enum sched_domain_level level; /* Runtime fields. */ unsigned long last_balance; /* init to jiffies. units in jiffies */ @@ -958,10 +973,6 @@ struct sched_domain { #ifdef CONFIG_SCHED_DEBUG char *name; #endif - union { - void *private; /* used during construction */ - struct rcu_head rcu; /* used during destruction */ - }; unsigned int span_weight; /* @@ -970,6 +981,9 @@ struct sched_domain { * NOTE: this field is variable length. (Allocated dynamically * by attaching extra space to the end of the structure, * depending on how many CPUs the kernel has booted up with) + * + * It is also be embedded into static data structures at build + * time. (See 'struct static_sched_domain' in kernel/sched.c) */ unsigned long span[0]; }; @@ -1034,12 +1048,8 @@ struct sched_domain; #define WF_FORK 0x02 /* child wakeup after fork */ #define ENQUEUE_WAKEUP 1 -#define ENQUEUE_HEAD 2 -#ifdef CONFIG_SMP -#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ -#else -#define ENQUEUE_WAKING 0 -#endif +#define ENQUEUE_WAKING 2 +#define ENQUEUE_HEAD 4 #define DEQUEUE_SLEEP 1 @@ -1057,11 +1067,12 @@ struct sched_class { void (*put_prev_task) (struct rq *rq, struct task_struct *p); #ifdef CONFIG_SMP - int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); + int (*select_task_rq)(struct rq *rq, struct task_struct *p, + int sd_flag, int flags); void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); void (*post_schedule) (struct rq *this_rq); - void (*task_waking) (struct task_struct *task); + void (*task_waking) (struct rq *this_rq, struct task_struct *task); void (*task_woken) (struct rq *this_rq, struct task_struct *task); void (*set_cpus_allowed)(struct task_struct *p, @@ -1186,11 +1197,13 @@ struct task_struct { unsigned int flags; /* per process flags, defined below */ unsigned int ptrace; + int lock_depth; /* BKL lock depth */ + #ifdef CONFIG_SMP - struct task_struct *wake_entry; - int on_cpu; +#ifdef __ARCH_WANT_UNLOCKED_CTXSW + int oncpu; +#endif #endif - int on_rq; int prio, static_prio, normal_prio; unsigned int rt_priority; @@ -1261,7 +1274,6 @@ struct task_struct { /* Revert to default priority/policy when forking */ unsigned sched_reset_on_fork:1; - unsigned sched_contributes_to_load:1; pid_t pid; pid_t tgid; @@ -1525,9 +1537,6 @@ struct task_struct { unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ } memcg_batch; #endif -#ifdef CONFIG_HAVE_HW_BREAKPOINT - atomic_t ptrace_bp_refcnt; -#endif }; /* Future-safe accessor for struct task_struct's cpus_allowed. */ @@ -2051,13 +2060,14 @@ extern void xtime_update(unsigned long ticks); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); -extern void wake_up_new_task(struct task_struct *tsk); +extern void wake_up_new_task(struct task_struct *tsk, + unsigned long clone_flags); #ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); #else static inline void kick_process(struct task_struct *tsk) { } #endif -extern void sched_fork(struct task_struct *p); +extern void sched_fork(struct task_struct *p, int clone_flags); extern void sched_dead(struct task_struct *p); extern void proc_caches_init(void); @@ -2182,10 +2192,8 @@ extern void set_task_comm(struct task_struct *tsk, char *from); extern char *get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP -void scheduler_ipi(void); extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else -static inline void scheduler_ipi(void) { } static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) { diff --git a/trunk/include/linux/seqlock.h b/trunk/include/linux/seqlock.h index 06d69648fc86..e98cd2e57194 100644 --- a/trunk/include/linux/seqlock.h +++ b/trunk/include/linux/seqlock.h @@ -88,12 +88,12 @@ static __always_inline unsigned read_seqbegin(const seqlock_t *sl) unsigned ret; repeat: - ret = ACCESS_ONCE(sl->sequence); + ret = sl->sequence; + smp_rmb(); if (unlikely(ret & 1)) { cpu_relax(); goto repeat; } - smp_rmb(); return ret; } diff --git a/trunk/include/linux/ssb/ssb.h b/trunk/include/linux/ssb/ssb.h index 045f72ab5dfd..9659eff52ca2 100644 --- a/trunk/include/linux/ssb/ssb.h +++ b/trunk/include/linux/ssb/ssb.h @@ -404,9 +404,7 @@ extern bool ssb_is_sprom_available(struct ssb_bus *bus); /* Set a fallback SPROM. * See kdoc at the function definition for complete documentation. */ -extern int ssb_arch_register_fallback_sprom( - int (*sprom_callback)(struct ssb_bus *bus, - struct ssb_sprom *out)); +extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom); /* Suspend a SSB bus. * Call this from the parent bus suspend routine. */ diff --git a/trunk/include/linux/string.h b/trunk/include/linux/string.h index a176db2f2c85..a716ee2a8adb 100644 --- a/trunk/include/linux/string.h +++ b/trunk/include/linux/string.h @@ -123,7 +123,6 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp); extern void argv_free(char **argv); extern bool sysfs_streq(const char *s1, const char *s2); -extern int strtobool(const char *s, bool *res); #ifdef CONFIG_BINARY_PRINTF int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); diff --git a/trunk/include/linux/sunrpc/sched.h b/trunk/include/linux/sunrpc/sched.h index f73c482ec9c6..d81db8012c63 100644 --- a/trunk/include/linux/sunrpc/sched.h +++ b/trunk/include/linux/sunrpc/sched.h @@ -127,16 +127,13 @@ struct rpc_task_setup { #define RPC_TASK_KILLED 0x0100 /* task was killed */ #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ #define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ -#define RPC_TASK_SENT 0x0800 /* message was sent */ -#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) #define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) #define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) -#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT)) +#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT) #define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN) -#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT) #define RPC_TASK_RUNNING 0 #define RPC_TASK_QUEUED 1 diff --git a/trunk/include/linux/sysdev.h b/trunk/include/linux/sysdev.h index d35e783a598c..dfb078db8ebb 100644 --- a/trunk/include/linux/sysdev.h +++ b/trunk/include/linux/sysdev.h @@ -34,6 +34,12 @@ struct sysdev_class { struct list_head drivers; struct sysdev_class_attribute **attrs; struct kset kset; +#ifndef CONFIG_ARCH_NO_SYSDEV_OPS + /* Default operations for these types of devices */ + int (*shutdown)(struct sys_device *); + int (*suspend)(struct sys_device *, pm_message_t state); + int (*resume)(struct sys_device *); +#endif }; struct sysdev_class_attribute { @@ -71,6 +77,11 @@ struct sysdev_driver { struct list_head entry; int (*add)(struct sys_device *); int (*remove)(struct sys_device *); +#ifndef CONFIG_ARCH_NO_SYSDEV_OPS + int (*shutdown)(struct sys_device *); + int (*suspend)(struct sys_device *, pm_message_t state); + int (*resume)(struct sys_device *); +#endif }; diff --git a/trunk/include/linux/time.h b/trunk/include/linux/time.h index 454a26205787..4ea5a75fcacd 100644 --- a/trunk/include/linux/time.h +++ b/trunk/include/linux/time.h @@ -126,6 +126,7 @@ struct timespec __current_kernel_time(void); /* does not take xtime_lock */ struct timespec get_monotonic_coarse(void); void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, struct timespec *wtom, struct timespec *sleep); +void timekeeping_inject_sleeptime(struct timespec *delta); #define CURRENT_TIME (current_kernel_time()) #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) diff --git a/trunk/include/linux/tracepoint.h b/trunk/include/linux/tracepoint.h index d530a4460a0b..97c84a58efb8 100644 --- a/trunk/include/linux/tracepoint.h +++ b/trunk/include/linux/tracepoint.h @@ -29,7 +29,7 @@ struct tracepoint_func { struct tracepoint { const char *name; /* Tracepoint name */ - struct jump_label_key key; + int state; /* State. */ void (*regfunc)(void); void (*unregfunc)(void); struct tracepoint_func __rcu *funcs; @@ -146,7 +146,9 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin, extern struct tracepoint __tracepoint_##name; \ static inline void trace_##name(proto) \ { \ - if (static_branch(&__tracepoint_##name.key)) \ + JUMP_LABEL(&__tracepoint_##name.state, do_trace); \ + return; \ +do_trace: \ __DO_TRACE(&__tracepoint_##name, \ TP_PROTO(data_proto), \ TP_ARGS(data_args), \ @@ -174,14 +176,14 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin, * structures, so we create an array of pointers that will be used for iteration * on the tracepoints. */ -#define DEFINE_TRACE_FN(name, reg, unreg) \ - static const char __tpstrtab_##name[] \ - __attribute__((section("__tracepoints_strings"))) = #name; \ - struct tracepoint __tracepoint_##name \ - __attribute__((section("__tracepoints"))) = \ - { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\ - static struct tracepoint * const __tracepoint_ptr_##name __used \ - __attribute__((section("__tracepoints_ptrs"))) = \ +#define DEFINE_TRACE_FN(name, reg, unreg) \ + static const char __tpstrtab_##name[] \ + __attribute__((section("__tracepoints_strings"))) = #name; \ + struct tracepoint __tracepoint_##name \ + __attribute__((section("__tracepoints"))) = \ + { __tpstrtab_##name, 0, reg, unreg, NULL }; \ + static struct tracepoint * const __tracepoint_ptr_##name __used \ + __attribute__((section("__tracepoints_ptrs"))) = \ &__tracepoint_##name; #define DEFINE_TRACE(name) \ diff --git a/trunk/include/linux/usb/usbnet.h b/trunk/include/linux/usb/usbnet.h index 605b0aa8d852..0e1855079fbb 100644 --- a/trunk/include/linux/usb/usbnet.h +++ b/trunk/include/linux/usb/usbnet.h @@ -68,7 +68,6 @@ struct usbnet { # define EVENT_RX_PAUSED 5 # define EVENT_DEV_WAKING 6 # define EVENT_DEV_ASLEEP 7 -# define EVENT_DEV_OPEN 8 }; static inline struct usb_driver *driver_of(struct usb_interface *intf) diff --git a/trunk/include/linux/v4l2-mediabus.h b/trunk/include/linux/v4l2-mediabus.h index de5c15921025..7054a7a8065e 100644 --- a/trunk/include/linux/v4l2-mediabus.h +++ b/trunk/include/linux/v4l2-mediabus.h @@ -47,7 +47,7 @@ enum v4l2_mbus_pixelcode { V4L2_MBUS_FMT_RGB565_2X8_BE = 0x1007, V4L2_MBUS_FMT_RGB565_2X8_LE = 0x1008, - /* YUV (including grey) - next is 0x2014 */ + /* YUV (including grey) - next is 0x2013 */ V4L2_MBUS_FMT_Y8_1X8 = 0x2001, V4L2_MBUS_FMT_UYVY8_1_5X8 = 0x2002, V4L2_MBUS_FMT_VYUY8_1_5X8 = 0x2003, @@ -60,7 +60,6 @@ enum v4l2_mbus_pixelcode { V4L2_MBUS_FMT_Y10_1X10 = 0x200a, V4L2_MBUS_FMT_YUYV10_2X10 = 0x200b, V4L2_MBUS_FMT_YVYU10_2X10 = 0x200c, - V4L2_MBUS_FMT_Y12_1X12 = 0x2013, V4L2_MBUS_FMT_UYVY8_1X16 = 0x200f, V4L2_MBUS_FMT_VYUY8_1X16 = 0x2010, V4L2_MBUS_FMT_YUYV8_1X16 = 0x2011, @@ -68,11 +67,9 @@ enum v4l2_mbus_pixelcode { V4L2_MBUS_FMT_YUYV10_1X20 = 0x200d, V4L2_MBUS_FMT_YVYU10_1X20 = 0x200e, - /* Bayer - next is 0x3015 */ + /* Bayer - next is 0x3013 */ V4L2_MBUS_FMT_SBGGR8_1X8 = 0x3001, - V4L2_MBUS_FMT_SGBRG8_1X8 = 0x3013, V4L2_MBUS_FMT_SGRBG8_1X8 = 0x3002, - V4L2_MBUS_FMT_SRGGB8_1X8 = 0x3014, V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8 = 0x300b, V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8 = 0x300c, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8 = 0x3009, diff --git a/trunk/include/linux/videodev2.h b/trunk/include/linux/videodev2.h index be82c8ead1af..aa6c393b7ae9 100644 --- a/trunk/include/linux/videodev2.h +++ b/trunk/include/linux/videodev2.h @@ -308,7 +308,6 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_Y4 v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */ #define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */ #define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */ -#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */ #define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ /* Palette formats */ diff --git a/trunk/include/media/v4l2-device.h b/trunk/include/media/v4l2-device.h index d61febfb1668..bd102cf509ac 100644 --- a/trunk/include/media/v4l2-device.h +++ b/trunk/include/media/v4l2-device.h @@ -163,7 +163,7 @@ v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev); ({ \ struct v4l2_subdev *__sd; \ __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, cond, o, \ - f , ##args); \ + f, args...); \ }) /* Call the specified callback for all subdevs matching grp_id (if 0, then diff --git a/trunk/include/net/inet_ecn.h b/trunk/include/net/inet_ecn.h index 2fa8d1341a0a..88bdd010d65d 100644 --- a/trunk/include/net/inet_ecn.h +++ b/trunk/include/net/inet_ecn.h @@ -38,19 +38,9 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner) return outer; } -static inline void INET_ECN_xmit(struct sock *sk) -{ - inet_sk(sk)->tos |= INET_ECN_ECT_0; - if (inet6_sk(sk) != NULL) - inet6_sk(sk)->tclass |= INET_ECN_ECT_0; -} - -static inline void INET_ECN_dontxmit(struct sock *sk) -{ - inet_sk(sk)->tos &= ~INET_ECN_MASK; - if (inet6_sk(sk) != NULL) - inet6_sk(sk)->tclass &= ~INET_ECN_MASK; -} +#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0) +#define INET_ECN_dontxmit(sk) \ + do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0) #define IP6_ECN_flow_init(label) do { \ (label) &= ~htonl(INET_ECN_MASK << 20); \ diff --git a/trunk/include/net/ip_vs.h b/trunk/include/net/ip_vs.h index 86aefed6140b..d516f00c8e0f 100644 --- a/trunk/include/net/ip_vs.h +++ b/trunk/include/net/ip_vs.h @@ -791,7 +791,6 @@ struct ip_vs_app { /* IPVS in network namespace */ struct netns_ipvs { int gen; /* Generation */ - int enable; /* enable like nf_hooks do */ /* * Hash table: for real service lookups */ @@ -1090,22 +1089,6 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) atomic_inc(&ctl_cp->n_control); } -/* - * IPVS netns init & cleanup functions - */ -extern int __ip_vs_estimator_init(struct net *net); -extern int __ip_vs_control_init(struct net *net); -extern int __ip_vs_protocol_init(struct net *net); -extern int __ip_vs_app_init(struct net *net); -extern int __ip_vs_conn_init(struct net *net); -extern int __ip_vs_sync_init(struct net *net); -extern void __ip_vs_conn_cleanup(struct net *net); -extern void __ip_vs_app_cleanup(struct net *net); -extern void __ip_vs_protocol_cleanup(struct net *net); -extern void __ip_vs_control_cleanup(struct net *net); -extern void __ip_vs_estimator_cleanup(struct net *net); -extern void __ip_vs_sync_cleanup(struct net *net); -extern void __ip_vs_service_cleanup(struct net *net); /* * IPVS application functions diff --git a/trunk/include/net/llc_pdu.h b/trunk/include/net/llc_pdu.h index f57e7d46a453..75b8e2968c9b 100644 --- a/trunk/include/net/llc_pdu.h +++ b/trunk/include/net/llc_pdu.h @@ -199,7 +199,7 @@ struct llc_pdu_sn { u8 ssap; u8 ctrl_1; u8 ctrl_2; -} __packed; +}; static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) { @@ -211,7 +211,7 @@ struct llc_pdu_un { u8 dsap; u8 ssap; u8 ctrl_1; -} __packed; +}; static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) { @@ -359,7 +359,7 @@ struct llc_xid_info { u8 fmt_id; /* always 0x81 for LLC */ u8 type; /* different if NULL/non-NULL LSAP */ u8 rw; /* sender receive window */ -} __packed; +}; /** * llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID @@ -415,7 +415,7 @@ struct llc_frmr_info { u8 curr_ssv; /* current send state variable val */ u8 curr_rsv; /* current receive state variable */ u8 ind_bits; /* indicator bits set with macro */ -} __packed; +}; extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type); extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value); diff --git a/trunk/include/net/xfrm.h b/trunk/include/net/xfrm.h index 20afeaa39395..6ae4bc5ce8a7 100644 --- a/trunk/include/net/xfrm.h +++ b/trunk/include/net/xfrm.h @@ -324,7 +324,6 @@ struct xfrm_state_afinfo { int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); int (*output)(struct sk_buff *skb); - int (*output_finish)(struct sk_buff *skb); int (*extract_input)(struct xfrm_state *x, struct sk_buff *skb); int (*extract_output)(struct xfrm_state *x, @@ -1455,7 +1454,6 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm4_output(struct sk_buff *skb); -extern int xfrm4_output_finish(struct sk_buff *skb); extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); extern int xfrm6_extract_header(struct sk_buff *skb); @@ -1472,7 +1470,6 @@ extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr); extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm6_output(struct sk_buff *skb); -extern int xfrm6_output_finish(struct sk_buff *skb); extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr); diff --git a/trunk/include/rdma/iw_cm.h b/trunk/include/rdma/iw_cm.h index 2d0191c90f9e..cbb822e8d791 100644 --- a/trunk/include/rdma/iw_cm.h +++ b/trunk/include/rdma/iw_cm.h @@ -46,9 +46,18 @@ enum iw_cm_event_type { IW_CM_EVENT_CLOSE /* close complete */ }; +enum iw_cm_event_status { + IW_CM_EVENT_STATUS_OK = 0, /* request successful */ + IW_CM_EVENT_STATUS_ACCEPTED = 0, /* connect request accepted */ + IW_CM_EVENT_STATUS_REJECTED, /* connect request rejected */ + IW_CM_EVENT_STATUS_TIMEOUT, /* the operation timed out */ + IW_CM_EVENT_STATUS_RESET, /* reset from remote peer */ + IW_CM_EVENT_STATUS_EINVAL, /* asynchronous failure for bad parm */ +}; + struct iw_cm_event { enum iw_cm_event_type event; - int status; + enum iw_cm_event_status status; struct sockaddr_in local_addr; struct sockaddr_in remote_addr; void *private_data; diff --git a/trunk/include/rdma/rdma_cm.h b/trunk/include/rdma/rdma_cm.h index 169f7a53fb0c..4fae90304648 100644 --- a/trunk/include/rdma/rdma_cm.h +++ b/trunk/include/rdma/rdma_cm.h @@ -329,14 +329,4 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr); */ void rdma_set_service_type(struct rdma_cm_id *id, int tos); -/** - * rdma_set_reuseaddr - Allow the reuse of local addresses when binding - * the rdma_cm_id. - * @id: Communication identifier to configure. - * @reuse: Value indicating if the bound address is reusable. - * - * Reuse must be set before an address is bound to the id. - */ -int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse); - #endif /* RDMA_CM_H */ diff --git a/trunk/include/rdma/rdma_user_cm.h b/trunk/include/rdma/rdma_user_cm.h index fc82c1896f75..1d165022c02d 100644 --- a/trunk/include/rdma/rdma_user_cm.h +++ b/trunk/include/rdma/rdma_user_cm.h @@ -221,9 +221,8 @@ enum { /* Option details */ enum { - RDMA_OPTION_ID_TOS = 0, - RDMA_OPTION_ID_REUSEADDR = 1, - RDMA_OPTION_IB_PATH = 1 + RDMA_OPTION_ID_TOS = 0, + RDMA_OPTION_IB_PATH = 1 }; struct rdma_ucm_set_option { diff --git a/trunk/include/scsi/scsi_device.h b/trunk/include/scsi/scsi_device.h index dd82e02ddde3..2d3ec5094685 100644 --- a/trunk/include/scsi/scsi_device.h +++ b/trunk/include/scsi/scsi_device.h @@ -169,7 +169,6 @@ struct scsi_device { sdev_dev; struct execute_work ew; /* used to get process context on put */ - struct work_struct requeue_work; struct scsi_dh_data *scsi_dh_data; enum scsi_device_state sdev_state; diff --git a/trunk/include/trace/events/gfpflags.h b/trunk/include/trace/events/gfpflags.h index 9fe3a36646e9..e3615c093741 100644 --- a/trunk/include/trace/events/gfpflags.h +++ b/trunk/include/trace/events/gfpflags.h @@ -10,7 +10,6 @@ */ #define show_gfp_flags(flags) \ (flags) ? __print_flags(flags, "|", \ - {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ {(unsigned long)GFP_USER, "GFP_USER"}, \ @@ -33,9 +32,6 @@ {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ - {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \ - {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ - {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \ - {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \ + {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ ) : "GFP_NOWAIT" diff --git a/trunk/include/xen/events.h b/trunk/include/xen/events.h index 9af21e19545a..f1b87ad48ac7 100644 --- a/trunk/include/xen/events.h +++ b/trunk/include/xen/events.h @@ -85,8 +85,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); /* Bind an PSI pirq to an irq. */ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, - int pirq, int vector, const char *name, - domid_t domid); + int pirq, int vector, const char *name); #endif /* De-allocates the above mentioned physical interrupt. */ @@ -95,10 +94,4 @@ int xen_destroy_irq(int irq); /* Return irq from pirq */ int xen_irq_from_pirq(unsigned pirq); -/* Return the pirq allocated to the irq. */ -int xen_pirq_from_irq(unsigned irq); - -/* Determine whether to ignore this IRQ if it is passed to a guest. */ -int xen_test_irq_shared(int irq); - #endif /* _XEN_EVENTS_H */ diff --git a/trunk/init/Kconfig b/trunk/init/Kconfig index af958ad26d60..56240e724d9a 100644 --- a/trunk/init/Kconfig +++ b/trunk/init/Kconfig @@ -827,11 +827,6 @@ config SCHED_AUTOGROUP desktop applications. Task group autogeneration is currently based upon task session. -config SCHED_TTWU_QUEUE - bool - depends on !SPARC32 - default y - config MM_OWNER bool @@ -929,6 +924,14 @@ menuconfig EXPERT environments which can tolerate a "non-standard" kernel. Only use this if you really know what you are doing. +config EMBEDDED + bool "Embedded system" + select EXPERT + help + This option should be enabled if compiling the kernel for + an embedded system so certain expert options are available + for configuration. + config UID16 bool "Enable 16-bit UID system calls" if EXPERT depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION) @@ -1101,14 +1104,6 @@ config AIO by some high performance threaded applications. Disabling this option saves about 7k. -config EMBEDDED - bool "Embedded system" - select EXPERT - help - This option should be enabled if compiling the kernel for - an embedded system so certain expert options are available - for configuration. - config HAVE_PERF_EVENTS bool help diff --git a/trunk/init/main.c b/trunk/init/main.c index 48df882d51d2..4a9479ef4540 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -580,8 +580,8 @@ asmlinkage void __init start_kernel(void) #endif page_cgroup_init(); enable_debug_pagealloc(); - debug_objects_mem_init(); kmemleak_init(); + debug_objects_mem_init(); setup_per_cpu_pageset(); numa_policy_init(); if (late_time_init) diff --git a/trunk/kernel/Makefile b/trunk/kernel/Makefile index e9cf19155b46..85cbfb31e73e 100644 --- a/trunk/kernel/Makefile +++ b/trunk/kernel/Makefile @@ -21,6 +21,7 @@ CFLAGS_REMOVE_mutex-debug.o = -pg CFLAGS_REMOVE_rtmutex-debug.o = -pg CFLAGS_REMOVE_cgroup-debug.o = -pg CFLAGS_REMOVE_sched_clock.o = -pg +CFLAGS_REMOVE_perf_event.o = -pg CFLAGS_REMOVE_irq_work.o = -pg endif @@ -102,9 +103,8 @@ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_TRACEPOINTS) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_IRQ_WORK) += irq_work.o - -obj-$(CONFIG_PERF_EVENTS) += events/ - +obj-$(CONFIG_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o obj-$(CONFIG_PADATA) += padata.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o diff --git a/trunk/kernel/capability.c b/trunk/kernel/capability.c index 32a80e08ff4b..bf0c734d0c12 100644 --- a/trunk/kernel/capability.c +++ b/trunk/kernel/capability.c @@ -399,15 +399,3 @@ bool task_ns_capable(struct task_struct *t, int cap) return ns_capable(task_cred_xxx(t, user)->user_ns, cap); } EXPORT_SYMBOL(task_ns_capable); - -/** - * nsown_capable - Check superior capability to one's own user_ns - * @cap: The capability in question - * - * Return true if the current task has the given superior capability - * targeted at its own user namespace. - */ -bool nsown_capable(int cap) -{ - return ns_capable(current_user_ns(), cap); -} diff --git a/trunk/kernel/cpuset.c b/trunk/kernel/cpuset.c index 2bb8c2e98fff..33eee16addb8 100644 --- a/trunk/kernel/cpuset.c +++ b/trunk/kernel/cpuset.c @@ -1159,7 +1159,7 @@ int current_cpuset_is_being_rebound(void) static int update_relax_domain_level(struct cpuset *cs, s64 val) { #ifdef CONFIG_SMP - if (val < -1 || val >= sched_domain_level_max) + if (val < -1 || val >= SD_LV_MAX) return -EINVAL; #endif diff --git a/trunk/kernel/cred.c b/trunk/kernel/cred.c index 8093c16b84b1..5557b55048df 100644 --- a/trunk/kernel/cred.c +++ b/trunk/kernel/cred.c @@ -54,7 +54,6 @@ struct cred init_cred = { .cap_effective = CAP_INIT_EFF_SET, .cap_bset = CAP_INIT_BSET, .user = INIT_USER, - .user_ns = &init_user_ns, .group_info = &init_groups, #ifdef CONFIG_KEYS .tgcred = &init_tgcred, @@ -411,11 +410,6 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) goto error_put; } - /* cache user_ns in cred. Doesn't need a refcount because it will - * stay pinned by cred->user - */ - new->user_ns = new->user->user_ns; - #ifdef CONFIG_KEYS /* new threads get their own thread keyrings if their parent already * had one */ @@ -747,6 +741,12 @@ int set_create_files_as(struct cred *new, struct inode *inode) } EXPORT_SYMBOL(set_create_files_as); +struct user_namespace *current_user_ns(void) +{ + return _current_user_ns(); +} +EXPORT_SYMBOL(current_user_ns); + #ifdef CONFIG_DEBUG_CREDENTIALS bool creds_are_invalid(const struct cred *cred) diff --git a/trunk/kernel/events/Makefile b/trunk/kernel/events/Makefile deleted file mode 100644 index 1ce23d3d8394..000000000000 --- a/trunk/kernel/events/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -ifdef CONFIG_FUNCTION_TRACER -CFLAGS_REMOVE_core.o = -pg -endif - -obj-y := core.o -obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o diff --git a/trunk/kernel/exit.c b/trunk/kernel/exit.c index 8dd874181542..f5d2f63bae0b 100644 --- a/trunk/kernel/exit.c +++ b/trunk/kernel/exit.c @@ -1016,7 +1016,7 @@ NORET_TYPE void do_exit(long code) /* * FIXME: do that only when needed, using sched_exit tracepoint */ - ptrace_put_breakpoints(tsk); + flush_ptrace_hw_breakpoint(tsk); exit_notify(tsk, group_dead); #ifdef CONFIG_NUMA diff --git a/trunk/kernel/extable.c b/trunk/kernel/extable.c index c2d625fcda77..7f8f263f8524 100644 --- a/trunk/kernel/extable.c +++ b/trunk/kernel/extable.c @@ -72,14 +72,6 @@ int core_kernel_text(unsigned long addr) return 0; } -int core_kernel_data(unsigned long addr) -{ - if (addr >= (unsigned long)_sdata && - addr < (unsigned long)_edata) - return 1; - return 0; -} - int __kernel_text_address(unsigned long addr) { if (core_kernel_text(addr)) diff --git a/trunk/kernel/fork.c b/trunk/kernel/fork.c index 2b44d82b8237..e7548dee636b 100644 --- a/trunk/kernel/fork.c +++ b/trunk/kernel/fork.c @@ -1103,6 +1103,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, posix_cpu_timers_init(p); + p->lock_depth = -1; /* -1 = no lock */ do_posix_clock_monotonic_gettime(&p->start_time); p->real_start_time = p->start_time; monotonic_to_bootbased(&p->real_start_time); @@ -1152,7 +1153,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, #endif /* Perform scheduler related setup. Assign this task to a CPU. */ - sched_fork(p); + sched_fork(p, clone_flags); retval = perf_event_init_task(p); if (retval) @@ -1463,7 +1464,7 @@ long do_fork(unsigned long clone_flags, */ p->flags &= ~PF_STARTING; - wake_up_new_task(p); + wake_up_new_task(p, clone_flags); tracehook_report_clone_complete(trace, regs, clone_flags, nr, p); diff --git a/trunk/kernel/freezer.c b/trunk/kernel/freezer.c index 7b01de98bb6a..66ecd2ead215 100644 --- a/trunk/kernel/freezer.c +++ b/trunk/kernel/freezer.c @@ -17,7 +17,7 @@ static inline void frozen_process(void) { if (!unlikely(current->flags & PF_NOFREEZE)) { current->flags |= PF_FROZEN; - smp_wmb(); + wmb(); } clear_freeze_flag(current); } @@ -93,7 +93,7 @@ bool freeze_task(struct task_struct *p, bool sig_only) * the task as frozen and next clears its TIF_FREEZE. */ if (!freezing(p)) { - smp_rmb(); + rmb(); if (frozen(p)) return false; diff --git a/trunk/kernel/hrtimer.c b/trunk/kernel/hrtimer.c index 87fdb3f8db14..9017478c5d4c 100644 --- a/trunk/kernel/hrtimer.c +++ b/trunk/kernel/hrtimer.c @@ -81,11 +81,7 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = } }; -static int hrtimer_clock_to_base_table[MAX_CLOCKS] = { - [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, - [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, - [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, -}; +static int hrtimer_clock_to_base_table[MAX_CLOCKS]; static inline int hrtimer_clockid_to_base(clockid_t clock_id) { @@ -1726,6 +1722,10 @@ static struct notifier_block __cpuinitdata hrtimers_nb = { void __init hrtimers_init(void) { + hrtimer_clock_to_base_table[CLOCK_REALTIME] = HRTIMER_BASE_REALTIME; + hrtimer_clock_to_base_table[CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC; + hrtimer_clock_to_base_table[CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME; + hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); register_cpu_notifier(&hrtimers_nb); diff --git a/trunk/kernel/hung_task.c b/trunk/kernel/hung_task.c index ea640120ab86..53ead174da2f 100644 --- a/trunk/kernel/hung_task.c +++ b/trunk/kernel/hung_task.c @@ -33,7 +33,7 @@ unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT; /* * Zero means infinite timeout - no checking done: */ -unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT; +unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; unsigned long __read_mostly sysctl_hung_task_warnings = 10; diff --git a/trunk/kernel/events/hw_breakpoint.c b/trunk/kernel/hw_breakpoint.c similarity index 100% rename from trunk/kernel/events/hw_breakpoint.c rename to trunk/kernel/hw_breakpoint.c diff --git a/trunk/kernel/irq/Kconfig b/trunk/kernel/irq/Kconfig index d1d051b38e0b..c574f9a12c48 100644 --- a/trunk/kernel/irq/Kconfig +++ b/trunk/kernel/irq/Kconfig @@ -48,10 +48,6 @@ config IRQ_PREFLOW_FASTEOI config IRQ_EDGE_EOI_HANDLER bool -# Generic configurable interrupt chip implementation -config GENERIC_IRQ_CHIP - bool - # Support forced irq threading config IRQ_FORCED_THREADING bool diff --git a/trunk/kernel/irq/Makefile b/trunk/kernel/irq/Makefile index 73290056cfb6..54329cd7b3ee 100644 --- a/trunk/kernel/irq/Makefile +++ b/trunk/kernel/irq/Makefile @@ -1,6 +1,5 @@ obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o -obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o diff --git a/trunk/kernel/irq/chip.c b/trunk/kernel/irq/chip.c index d5a3009da71a..4af1e2b244cb 100644 --- a/trunk/kernel/irq/chip.c +++ b/trunk/kernel/irq/chip.c @@ -310,7 +310,6 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) out_unlock: raw_spin_unlock(&desc->lock); } -EXPORT_SYMBOL_GPL(handle_simple_irq); /** * handle_level_irq - Level type irq handler @@ -574,7 +573,6 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, if (handle != handle_bad_irq && is_chained) { irq_settings_set_noprobe(desc); irq_settings_set_norequest(desc); - irq_settings_set_nothread(desc); irq_startup(desc); } out: @@ -614,7 +612,6 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) irq_put_desc_unlock(desc, flags); } -EXPORT_SYMBOL_GPL(irq_modify_status); /** * irq_cpu_online - Invoke all irq_cpu_online functions. diff --git a/trunk/kernel/irq/debug.h b/trunk/kernel/irq/debug.h index 97a8bfadc88a..306cba37e9a5 100644 --- a/trunk/kernel/irq/debug.h +++ b/trunk/kernel/irq/debug.h @@ -27,7 +27,6 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) P(IRQ_PER_CPU); P(IRQ_NOPROBE); P(IRQ_NOREQUEST); - P(IRQ_NOTHREAD); P(IRQ_NOAUTOEN); PS(IRQS_AUTODETECT); diff --git a/trunk/kernel/irq/generic-chip.c b/trunk/kernel/irq/generic-chip.c deleted file mode 100644 index 31a9db711906..000000000000 --- a/trunk/kernel/irq/generic-chip.c +++ /dev/null @@ -1,354 +0,0 @@ -/* - * Library implementing the most common irq chip callback functions - * - * Copyright (C) 2011, Thomas Gleixner - */ -#include -#include -#include -#include -#include -#include - -#include "internals.h" - -static LIST_HEAD(gc_list); -static DEFINE_RAW_SPINLOCK(gc_lock); - -static inline struct irq_chip_regs *cur_regs(struct irq_data *d) -{ - return &container_of(d->chip, struct irq_chip_type, chip)->regs; -} - -/** - * irq_gc_noop - NOOP function - * @d: irq_data - */ -void irq_gc_noop(struct irq_data *d) -{ -} - -/** - * irq_gc_mask_disable_reg - Mask chip via disable register - * @d: irq_data - * - * Chip has separate enable/disable registers instead of a single mask - * register. - */ -void irq_gc_mask_disable_reg(struct irq_data *d) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - u32 mask = 1 << (d->irq - gc->irq_base); - - irq_gc_lock(gc); - irq_reg_writel(mask, gc->reg_base + cur_regs(d)->disable); - gc->mask_cache &= ~mask; - irq_gc_unlock(gc); -} - -/** - * irq_gc_mask_set_mask_bit - Mask chip via setting bit in mask register - * @d: irq_data - * - * Chip has a single mask register. Values of this register are cached - * and protected by gc->lock - */ -void irq_gc_mask_set_bit(struct irq_data *d) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - u32 mask = 1 << (d->irq - gc->irq_base); - - irq_gc_lock(gc); - gc->mask_cache |= mask; - irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask); - irq_gc_unlock(gc); -} - -/** - * irq_gc_mask_set_mask_bit - Mask chip via clearing bit in mask register - * @d: irq_data - * - * Chip has a single mask register. Values of this register are cached - * and protected by gc->lock - */ -void irq_gc_mask_clr_bit(struct irq_data *d) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - u32 mask = 1 << (d->irq - gc->irq_base); - - irq_gc_lock(gc); - gc->mask_cache &= ~mask; - irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask); - irq_gc_unlock(gc); -} - -/** - * irq_gc_unmask_enable_reg - Unmask chip via enable register - * @d: irq_data - * - * Chip has separate enable/disable registers instead of a single mask - * register. - */ -void irq_gc_unmask_enable_reg(struct irq_data *d) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - u32 mask = 1 << (d->irq - gc->irq_base); - - irq_gc_lock(gc); - irq_reg_writel(mask, gc->reg_base + cur_regs(d)->enable); - gc->mask_cache |= mask; - irq_gc_unlock(gc); -} - -/** - * irq_gc_ack - Ack pending interrupt - * @d: irq_data - */ -void irq_gc_ack(struct irq_data *d) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - u32 mask = 1 << (d->irq - gc->irq_base); - - irq_gc_lock(gc); - irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); - irq_gc_unlock(gc); -} - -/** - * irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt - * @d: irq_data - */ -void irq_gc_mask_disable_reg_and_ack(struct irq_data *d) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - u32 mask = 1 << (d->irq - gc->irq_base); - - irq_gc_lock(gc); - irq_reg_writel(mask, gc->reg_base + cur_regs(d)->mask); - irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); - irq_gc_unlock(gc); -} - -/** - * irq_gc_eoi - EOI interrupt - * @d: irq_data - */ -void irq_gc_eoi(struct irq_data *d) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - u32 mask = 1 << (d->irq - gc->irq_base); - - irq_gc_lock(gc); - irq_reg_writel(mask, gc->reg_base + cur_regs(d)->eoi); - irq_gc_unlock(gc); -} - -/** - * irq_gc_set_wake - Set/clr wake bit for an interrupt - * @d: irq_data - * - * For chips where the wake from suspend functionality is not - * configured in a separate register and the wakeup active state is - * just stored in a bitmask. - */ -int irq_gc_set_wake(struct irq_data *d, unsigned int on) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - u32 mask = 1 << (d->irq - gc->irq_base); - - if (!(mask & gc->wake_enabled)) - return -EINVAL; - - irq_gc_lock(gc); - if (on) - gc->wake_active |= mask; - else - gc->wake_active &= ~mask; - irq_gc_unlock(gc); - return 0; -} - -/** - * irq_alloc_generic_chip - Allocate a generic chip and initialize it - * @name: Name of the irq chip - * @num_ct: Number of irq_chip_type instances associated with this - * @irq_base: Interrupt base nr for this chip - * @reg_base: Register base address (virtual) - * @handler: Default flow handler associated with this chip - * - * Returns an initialized irq_chip_generic structure. The chip defaults - * to the primary (index 0) irq_chip_type and @handler - */ -struct irq_chip_generic * -irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base, - void __iomem *reg_base, irq_flow_handler_t handler) -{ - struct irq_chip_generic *gc; - unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type); - - gc = kzalloc(sz, GFP_KERNEL); - if (gc) { - raw_spin_lock_init(&gc->lock); - gc->num_ct = num_ct; - gc->irq_base = irq_base; - gc->reg_base = reg_base; - gc->chip_types->chip.name = name; - gc->chip_types->handler = handler; - } - return gc; -} - -/* - * Separate lockdep class for interrupt chip which can nest irq_desc - * lock. - */ -static struct lock_class_key irq_nested_lock_class; - -/** - * irq_setup_generic_chip - Setup a range of interrupts with a generic chip - * @gc: Generic irq chip holding all data - * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base - * @flags: Flags for initialization - * @clr: IRQ_* bits to clear - * @set: IRQ_* bits to set - * - * Set up max. 32 interrupts starting from gc->irq_base. Note, this - * initializes all interrupts to the primary irq_chip_type and its - * associated handler. - */ -void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, - enum irq_gc_flags flags, unsigned int clr, - unsigned int set) -{ - struct irq_chip_type *ct = gc->chip_types; - unsigned int i; - - raw_spin_lock(&gc_lock); - list_add_tail(&gc->list, &gc_list); - raw_spin_unlock(&gc_lock); - - /* Init mask cache ? */ - if (flags & IRQ_GC_INIT_MASK_CACHE) - gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask); - - for (i = gc->irq_base; msk; msk >>= 1, i++) { - if (!msk & 0x01) - continue; - - if (flags & IRQ_GC_INIT_NESTED_LOCK) - irq_set_lockdep_class(i, &irq_nested_lock_class); - - irq_set_chip_and_handler(i, &ct->chip, ct->handler); - irq_set_chip_data(i, gc); - irq_modify_status(i, clr, set); - } - gc->irq_cnt = i - gc->irq_base; -} - -/** - * irq_setup_alt_chip - Switch to alternative chip - * @d: irq_data for this interrupt - * @type Flow type to be initialized - * - * Only to be called from chip->irq_set_type() callbacks. - */ -int irq_setup_alt_chip(struct irq_data *d, unsigned int type) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - struct irq_chip_type *ct = gc->chip_types; - unsigned int i; - - for (i = 0; i < gc->num_ct; i++, ct++) { - if (ct->type & type) { - d->chip = &ct->chip; - irq_data_to_desc(d)->handle_irq = ct->handler; - return 0; - } - } - return -EINVAL; -} - -/** - * irq_remove_generic_chip - Remove a chip - * @gc: Generic irq chip holding all data - * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base - * @clr: IRQ_* bits to clear - * @set: IRQ_* bits to set - * - * Remove up to 32 interrupts starting from gc->irq_base. - */ -void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, - unsigned int clr, unsigned int set) -{ - unsigned int i = gc->irq_base; - - raw_spin_lock(&gc_lock); - list_del(&gc->list); - raw_spin_unlock(&gc_lock); - - for (; msk; msk >>= 1, i++) { - if (!msk & 0x01) - continue; - - /* Remove handler first. That will mask the irq line */ - irq_set_handler(i, NULL); - irq_set_chip(i, &no_irq_chip); - irq_set_chip_data(i, NULL); - irq_modify_status(i, clr, set); - } -} - -#ifdef CONFIG_PM -static int irq_gc_suspend(void) -{ - struct irq_chip_generic *gc; - - list_for_each_entry(gc, &gc_list, list) { - struct irq_chip_type *ct = gc->chip_types; - - if (ct->chip.irq_suspend) - ct->chip.irq_suspend(irq_get_irq_data(gc->irq_base)); - } - return 0; -} - -static void irq_gc_resume(void) -{ - struct irq_chip_generic *gc; - - list_for_each_entry(gc, &gc_list, list) { - struct irq_chip_type *ct = gc->chip_types; - - if (ct->chip.irq_resume) - ct->chip.irq_resume(irq_get_irq_data(gc->irq_base)); - } -} -#else -#define irq_gc_suspend NULL -#define irq_gc_resume NULL -#endif - -static void irq_gc_shutdown(void) -{ - struct irq_chip_generic *gc; - - list_for_each_entry(gc, &gc_list, list) { - struct irq_chip_type *ct = gc->chip_types; - - if (ct->chip.irq_pm_shutdown) - ct->chip.irq_pm_shutdown(irq_get_irq_data(gc->irq_base)); - } -} - -static struct syscore_ops irq_gc_syscore_ops = { - .suspend = irq_gc_suspend, - .resume = irq_gc_resume, - .shutdown = irq_gc_shutdown, -}; - -static int __init irq_gc_init_ops(void) -{ - register_syscore_ops(&irq_gc_syscore_ops); - return 0; -} -device_initcall(irq_gc_init_ops); diff --git a/trunk/kernel/irq/irqdesc.c b/trunk/kernel/irq/irqdesc.c index 886e80347b32..2c039c9b9383 100644 --- a/trunk/kernel/irq/irqdesc.c +++ b/trunk/kernel/irq/irqdesc.c @@ -22,7 +22,7 @@ */ static struct lock_class_key irq_desc_lock_class; -#if defined(CONFIG_SMP) +#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) static void __init init_irq_default_affinity(void) { alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); @@ -290,22 +290,6 @@ static int irq_expand_nr_irqs(unsigned int nr) #endif /* !CONFIG_SPARSE_IRQ */ -/** - * generic_handle_irq - Invoke the handler for a particular irq - * @irq: The irq number to handle - * - */ -int generic_handle_irq(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - - if (!desc) - return -EINVAL; - generic_handle_irq_desc(irq, desc); - return 0; -} -EXPORT_SYMBOL_GPL(generic_handle_irq); - /* Dynamic interrupt handling */ /** @@ -327,7 +311,6 @@ void irq_free_descs(unsigned int from, unsigned int cnt) bitmap_clear(allocated_irqs, from, cnt); mutex_unlock(&sparse_irq_lock); } -EXPORT_SYMBOL_GPL(irq_free_descs); /** * irq_alloc_descs - allocate and initialize a range of irq descriptors @@ -368,7 +351,6 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) mutex_unlock(&sparse_irq_lock); return ret; } -EXPORT_SYMBOL_GPL(irq_alloc_descs); /** * irq_reserve_irqs - mark irqs allocated @@ -448,6 +430,7 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; } +#ifdef CONFIG_GENERIC_HARDIRQS unsigned int kstat_irqs(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); @@ -460,3 +443,4 @@ unsigned int kstat_irqs(unsigned int irq) sum += *per_cpu_ptr(desc->kstat_irqs, cpu); return sum; } +#endif /* CONFIG_GENERIC_HARDIRQS */ diff --git a/trunk/kernel/irq/manage.c b/trunk/kernel/irq/manage.c index f7ce0021e1c4..07c1611f3899 100644 --- a/trunk/kernel/irq/manage.c +++ b/trunk/kernel/irq/manage.c @@ -900,8 +900,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) */ new->handler = irq_nested_primary_handler; } else { - if (irq_settings_can_thread(desc)) - irq_setup_forced_threading(new); + irq_setup_forced_threading(new); } /* diff --git a/trunk/kernel/irq/proc.c b/trunk/kernel/irq/proc.c index 834899f2500f..dd201bd35103 100644 --- a/trunk/kernel/irq/proc.c +++ b/trunk/kernel/irq/proc.c @@ -419,7 +419,7 @@ int show_interrupts(struct seq_file *p, void *v) } else { seq_printf(p, " %8s", "None"); } -#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL +#ifdef CONFIG_GENIRC_IRQ_SHOW_LEVEL seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); #endif if (desc->name) diff --git a/trunk/kernel/irq/settings.h b/trunk/kernel/irq/settings.h index f1667833d444..0d91730b6330 100644 --- a/trunk/kernel/irq/settings.h +++ b/trunk/kernel/irq/settings.h @@ -8,7 +8,6 @@ enum { _IRQ_LEVEL = IRQ_LEVEL, _IRQ_NOPROBE = IRQ_NOPROBE, _IRQ_NOREQUEST = IRQ_NOREQUEST, - _IRQ_NOTHREAD = IRQ_NOTHREAD, _IRQ_NOAUTOEN = IRQ_NOAUTOEN, _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT, _IRQ_NO_BALANCING = IRQ_NO_BALANCING, @@ -21,7 +20,6 @@ enum { #define IRQ_LEVEL GOT_YOU_MORON #define IRQ_NOPROBE GOT_YOU_MORON #define IRQ_NOREQUEST GOT_YOU_MORON -#define IRQ_NOTHREAD GOT_YOU_MORON #define IRQ_NOAUTOEN GOT_YOU_MORON #define IRQ_NESTED_THREAD GOT_YOU_MORON #undef IRQF_MODIFY_MASK @@ -96,21 +94,6 @@ static inline void irq_settings_set_norequest(struct irq_desc *desc) desc->status_use_accessors |= _IRQ_NOREQUEST; } -static inline bool irq_settings_can_thread(struct irq_desc *desc) -{ - return !(desc->status_use_accessors & _IRQ_NOTHREAD); -} - -static inline void irq_settings_clr_nothread(struct irq_desc *desc) -{ - desc->status_use_accessors &= ~_IRQ_NOTHREAD; -} - -static inline void irq_settings_set_nothread(struct irq_desc *desc) -{ - desc->status_use_accessors |= _IRQ_NOTHREAD; -} - static inline bool irq_settings_can_probe(struct irq_desc *desc) { return !(desc->status_use_accessors & _IRQ_NOPROBE); diff --git a/trunk/kernel/jump_label.c b/trunk/kernel/jump_label.c index 74d1c099fbd1..3b79bd938330 100644 --- a/trunk/kernel/jump_label.c +++ b/trunk/kernel/jump_label.c @@ -2,23 +2,43 @@ * jump label support * * Copyright (C) 2009 Jason Baron - * Copyright (C) 2011 Peter Zijlstra * */ +#include #include #include #include #include +#include #include #include #include -#include #ifdef HAVE_JUMP_LABEL +#define JUMP_LABEL_HASH_BITS 6 +#define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS) +static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE]; + /* mutex to protect coming/going of the the jump_label table */ static DEFINE_MUTEX(jump_label_mutex); +struct jump_label_entry { + struct hlist_node hlist; + struct jump_entry *table; + int nr_entries; + /* hang modules off here */ + struct hlist_head modules; + unsigned long key; +}; + +struct jump_label_module_entry { + struct hlist_node hlist; + struct jump_entry *table; + int nr_entries; + struct module *mod; +}; + void jump_label_lock(void) { mutex_lock(&jump_label_mutex); @@ -29,11 +49,6 @@ void jump_label_unlock(void) mutex_unlock(&jump_label_mutex); } -bool jump_label_enabled(struct jump_label_key *key) -{ - return !!atomic_read(&key->enabled); -} - static int jump_label_cmp(const void *a, const void *b) { const struct jump_entry *jea = a; @@ -49,7 +64,7 @@ static int jump_label_cmp(const void *a, const void *b) } static void -jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) +sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) { unsigned long size; @@ -58,25 +73,118 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); } -static void jump_label_update(struct jump_label_key *key, int enable); +static struct jump_label_entry *get_jump_label_entry(jump_label_t key) +{ + struct hlist_head *head; + struct hlist_node *node; + struct jump_label_entry *e; + u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0); + + head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; + hlist_for_each_entry(e, node, head, hlist) { + if (key == e->key) + return e; + } + return NULL; +} -void jump_label_inc(struct jump_label_key *key) +static struct jump_label_entry * +add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table) { - if (atomic_inc_not_zero(&key->enabled)) - return; + struct hlist_head *head; + struct jump_label_entry *e; + u32 hash; + + e = get_jump_label_entry(key); + if (e) + return ERR_PTR(-EEXIST); + + e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL); + if (!e) + return ERR_PTR(-ENOMEM); + + hash = jhash((void *)&key, sizeof(jump_label_t), 0); + head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; + e->key = key; + e->table = table; + e->nr_entries = nr_entries; + INIT_HLIST_HEAD(&(e->modules)); + hlist_add_head(&e->hlist, head); + return e; +} - jump_label_lock(); - if (atomic_add_return(1, &key->enabled) == 1) - jump_label_update(key, JUMP_LABEL_ENABLE); - jump_label_unlock(); +static int +build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop) +{ + struct jump_entry *iter, *iter_begin; + struct jump_label_entry *entry; + int count; + + sort_jump_label_entries(start, stop); + iter = start; + while (iter < stop) { + entry = get_jump_label_entry(iter->key); + if (!entry) { + iter_begin = iter; + count = 0; + while ((iter < stop) && + (iter->key == iter_begin->key)) { + iter++; + count++; + } + entry = add_jump_label_entry(iter_begin->key, + count, iter_begin); + if (IS_ERR(entry)) + return PTR_ERR(entry); + } else { + WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n"); + return -1; + } + } + return 0; } -void jump_label_dec(struct jump_label_key *key) +/*** + * jump_label_update - update jump label text + * @key - key value associated with a a jump label + * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE + * + * Will enable/disable the jump for jump label @key, depending on the + * value of @type. + * + */ + +void jump_label_update(unsigned long key, enum jump_label_type type) { - if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) - return; + struct jump_entry *iter; + struct jump_label_entry *entry; + struct hlist_node *module_node; + struct jump_label_module_entry *e_module; + int count; - jump_label_update(key, JUMP_LABEL_DISABLE); + jump_label_lock(); + entry = get_jump_label_entry((jump_label_t)key); + if (entry) { + count = entry->nr_entries; + iter = entry->table; + while (count--) { + if (kernel_text_address(iter->code)) + arch_jump_label_transform(iter, type); + iter++; + } + /* eanble/disable jump labels in modules */ + hlist_for_each_entry(e_module, module_node, &(entry->modules), + hlist) { + count = e_module->nr_entries; + iter = e_module->table; + while (count--) { + if (iter->key && + kernel_text_address(iter->code)) + arch_jump_label_transform(iter, type); + iter++; + } + } + } jump_label_unlock(); } @@ -89,33 +197,77 @@ static int addr_conflict(struct jump_entry *entry, void *start, void *end) return 0; } -static int __jump_label_text_reserved(struct jump_entry *iter_start, - struct jump_entry *iter_stop, void *start, void *end) +#ifdef CONFIG_MODULES + +static int module_conflict(void *start, void *end) { + struct hlist_head *head; + struct hlist_node *node, *node_next, *module_node, *module_node_next; + struct jump_label_entry *e; + struct jump_label_module_entry *e_module; struct jump_entry *iter; + int i, count; + int conflict = 0; + + for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { + head = &jump_label_table[i]; + hlist_for_each_entry_safe(e, node, node_next, head, hlist) { + hlist_for_each_entry_safe(e_module, module_node, + module_node_next, + &(e->modules), hlist) { + count = e_module->nr_entries; + iter = e_module->table; + while (count--) { + if (addr_conflict(iter, start, end)) { + conflict = 1; + goto out; + } + iter++; + } + } + } + } +out: + return conflict; +} + +#endif + +/*** + * jump_label_text_reserved - check if addr range is reserved + * @start: start text addr + * @end: end text addr + * + * checks if the text addr located between @start and @end + * overlaps with any of the jump label patch addresses. Code + * that wants to modify kernel text should first verify that + * it does not overlap with any of the jump label addresses. + * Caller must hold jump_label_mutex. + * + * returns 1 if there is an overlap, 0 otherwise + */ +int jump_label_text_reserved(void *start, void *end) +{ + struct jump_entry *iter; + struct jump_entry *iter_start = __start___jump_table; + struct jump_entry *iter_stop = __start___jump_table; + int conflict = 0; iter = iter_start; while (iter < iter_stop) { - if (addr_conflict(iter, start, end)) - return 1; + if (addr_conflict(iter, start, end)) { + conflict = 1; + goto out; + } iter++; } - return 0; -} - -static void __jump_label_update(struct jump_label_key *key, - struct jump_entry *entry, int enable) -{ - for (; entry->key == (jump_label_t)(unsigned long)key; entry++) { - /* - * entry->code set to 0 invalidates module init text sections - * kernel_text_address() verifies we are not in core kernel - * init code, see jump_label_invalidate_module_init(). - */ - if (entry->code && kernel_text_address(entry->code)) - arch_jump_label_transform(entry, enable); - } + /* now check modules */ +#ifdef CONFIG_MODULES + conflict = module_conflict(start, end); +#endif +out: + return conflict; } /* @@ -125,173 +277,142 @@ void __weak arch_jump_label_text_poke_early(jump_label_t addr) { } -static __init int jump_label_init(void) +static __init int init_jump_label(void) { + int ret; struct jump_entry *iter_start = __start___jump_table; struct jump_entry *iter_stop = __stop___jump_table; - struct jump_label_key *key = NULL; struct jump_entry *iter; jump_label_lock(); - jump_label_sort_entries(iter_start, iter_stop); - - for (iter = iter_start; iter < iter_stop; iter++) { + ret = build_jump_label_hashtable(__start___jump_table, + __stop___jump_table); + iter = iter_start; + while (iter < iter_stop) { arch_jump_label_text_poke_early(iter->code); - if (iter->key == (jump_label_t)(unsigned long)key) - continue; - - key = (struct jump_label_key *)(unsigned long)iter->key; - atomic_set(&key->enabled, 0); - key->entries = iter; -#ifdef CONFIG_MODULES - key->next = NULL; -#endif + iter++; } jump_label_unlock(); - - return 0; + return ret; } -early_initcall(jump_label_init); +early_initcall(init_jump_label); #ifdef CONFIG_MODULES -struct jump_label_mod { - struct jump_label_mod *next; - struct jump_entry *entries; - struct module *mod; -}; - -static int __jump_label_mod_text_reserved(void *start, void *end) -{ - struct module *mod; - - mod = __module_text_address((unsigned long)start); - if (!mod) - return 0; - - WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); - - return __jump_label_text_reserved(mod->jump_entries, - mod->jump_entries + mod->num_jump_entries, - start, end); -} - -static void __jump_label_mod_update(struct jump_label_key *key, int enable) -{ - struct jump_label_mod *mod = key->next; - - while (mod) { - __jump_label_update(key, mod->entries, enable); - mod = mod->next; - } -} - -/*** - * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() - * @mod: module to patch - * - * Allow for run-time selection of the optimal nops. Before the module - * loads patch these with arch_get_jump_label_nop(), which is specified by - * the arch specific jump label code. - */ -void jump_label_apply_nops(struct module *mod) +static struct jump_label_module_entry * +add_jump_label_module_entry(struct jump_label_entry *entry, + struct jump_entry *iter_begin, + int count, struct module *mod) { - struct jump_entry *iter_start = mod->jump_entries; - struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; - struct jump_entry *iter; - - /* if the module doesn't have jump label entries, just return */ - if (iter_start == iter_stop) - return; - - for (iter = iter_start; iter < iter_stop; iter++) - arch_jump_label_text_poke_early(iter->code); + struct jump_label_module_entry *e; + + e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL); + if (!e) + return ERR_PTR(-ENOMEM); + e->mod = mod; + e->nr_entries = count; + e->table = iter_begin; + hlist_add_head(&e->hlist, &entry->modules); + return e; } -static int jump_label_add_module(struct module *mod) +static int add_jump_label_module(struct module *mod) { - struct jump_entry *iter_start = mod->jump_entries; - struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; - struct jump_entry *iter; - struct jump_label_key *key = NULL; - struct jump_label_mod *jlm; + struct jump_entry *iter, *iter_begin; + struct jump_label_entry *entry; + struct jump_label_module_entry *module_entry; + int count; /* if the module doesn't have jump label entries, just return */ - if (iter_start == iter_stop) + if (!mod->num_jump_entries) return 0; - jump_label_sort_entries(iter_start, iter_stop); - - for (iter = iter_start; iter < iter_stop; iter++) { - if (iter->key == (jump_label_t)(unsigned long)key) - continue; - - key = (struct jump_label_key *)(unsigned long)iter->key; - - if (__module_address(iter->key) == mod) { - atomic_set(&key->enabled, 0); - key->entries = iter; - key->next = NULL; - continue; + sort_jump_label_entries(mod->jump_entries, + mod->jump_entries + mod->num_jump_entries); + iter = mod->jump_entries; + while (iter < mod->jump_entries + mod->num_jump_entries) { + entry = get_jump_label_entry(iter->key); + iter_begin = iter; + count = 0; + while ((iter < mod->jump_entries + mod->num_jump_entries) && + (iter->key == iter_begin->key)) { + iter++; + count++; } - - jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL); - if (!jlm) - return -ENOMEM; - - jlm->mod = mod; - jlm->entries = iter; - jlm->next = key->next; - key->next = jlm; - - if (jump_label_enabled(key)) - __jump_label_update(key, iter, JUMP_LABEL_ENABLE); + if (!entry) { + entry = add_jump_label_entry(iter_begin->key, 0, NULL); + if (IS_ERR(entry)) + return PTR_ERR(entry); + } + module_entry = add_jump_label_module_entry(entry, iter_begin, + count, mod); + if (IS_ERR(module_entry)) + return PTR_ERR(module_entry); } - return 0; } -static void jump_label_del_module(struct module *mod) +static void remove_jump_label_module(struct module *mod) { - struct jump_entry *iter_start = mod->jump_entries; - struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; - struct jump_entry *iter; - struct jump_label_key *key = NULL; - struct jump_label_mod *jlm, **prev; + struct hlist_head *head; + struct hlist_node *node, *node_next, *module_node, *module_node_next; + struct jump_label_entry *e; + struct jump_label_module_entry *e_module; + int i; - for (iter = iter_start; iter < iter_stop; iter++) { - if (iter->key == (jump_label_t)(unsigned long)key) - continue; - - key = (struct jump_label_key *)(unsigned long)iter->key; - - if (__module_address(iter->key) == mod) - continue; - - prev = &key->next; - jlm = key->next; - - while (jlm && jlm->mod != mod) { - prev = &jlm->next; - jlm = jlm->next; - } + /* if the module doesn't have jump label entries, just return */ + if (!mod->num_jump_entries) + return; - if (jlm) { - *prev = jlm->next; - kfree(jlm); + for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { + head = &jump_label_table[i]; + hlist_for_each_entry_safe(e, node, node_next, head, hlist) { + hlist_for_each_entry_safe(e_module, module_node, + module_node_next, + &(e->modules), hlist) { + if (e_module->mod == mod) { + hlist_del(&e_module->hlist); + kfree(e_module); + } + } + if (hlist_empty(&e->modules) && (e->nr_entries == 0)) { + hlist_del(&e->hlist); + kfree(e); + } } } } -static void jump_label_invalidate_module_init(struct module *mod) +static void remove_jump_label_module_init(struct module *mod) { - struct jump_entry *iter_start = mod->jump_entries; - struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; + struct hlist_head *head; + struct hlist_node *node, *node_next, *module_node, *module_node_next; + struct jump_label_entry *e; + struct jump_label_module_entry *e_module; struct jump_entry *iter; + int i, count; + + /* if the module doesn't have jump label entries, just return */ + if (!mod->num_jump_entries) + return; - for (iter = iter_start; iter < iter_stop; iter++) { - if (within_module_init(iter->code, mod)) - iter->code = 0; + for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { + head = &jump_label_table[i]; + hlist_for_each_entry_safe(e, node, node_next, head, hlist) { + hlist_for_each_entry_safe(e_module, module_node, + module_node_next, + &(e->modules), hlist) { + if (e_module->mod != mod) + continue; + count = e_module->nr_entries; + iter = e_module->table; + while (count--) { + if (within_module_init(iter->code, mod)) + iter->key = 0; + iter++; + } + } + } } } @@ -305,77 +426,59 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, switch (val) { case MODULE_STATE_COMING: jump_label_lock(); - ret = jump_label_add_module(mod); + ret = add_jump_label_module(mod); if (ret) - jump_label_del_module(mod); + remove_jump_label_module(mod); jump_label_unlock(); break; case MODULE_STATE_GOING: jump_label_lock(); - jump_label_del_module(mod); + remove_jump_label_module(mod); jump_label_unlock(); break; case MODULE_STATE_LIVE: jump_label_lock(); - jump_label_invalidate_module_init(mod); + remove_jump_label_module_init(mod); jump_label_unlock(); break; } - - return notifier_from_errno(ret); -} - -struct notifier_block jump_label_module_nb = { - .notifier_call = jump_label_module_notify, - .priority = 1, /* higher than tracepoints */ -}; - -static __init int jump_label_init_module(void) -{ - return register_module_notifier(&jump_label_module_nb); + return ret; } -early_initcall(jump_label_init_module); - -#endif /* CONFIG_MODULES */ /*** - * jump_label_text_reserved - check if addr range is reserved - * @start: start text addr - * @end: end text addr - * - * checks if the text addr located between @start and @end - * overlaps with any of the jump label patch addresses. Code - * that wants to modify kernel text should first verify that - * it does not overlap with any of the jump label addresses. - * Caller must hold jump_label_mutex. + * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() + * @mod: module to patch * - * returns 1 if there is an overlap, 0 otherwise + * Allow for run-time selection of the optimal nops. Before the module + * loads patch these with arch_get_jump_label_nop(), which is specified by + * the arch specific jump label code. */ -int jump_label_text_reserved(void *start, void *end) +void jump_label_apply_nops(struct module *mod) { - int ret = __jump_label_text_reserved(__start___jump_table, - __stop___jump_table, start, end); + struct jump_entry *iter; - if (ret) - return ret; + /* if the module doesn't have jump label entries, just return */ + if (!mod->num_jump_entries) + return; -#ifdef CONFIG_MODULES - ret = __jump_label_mod_text_reserved(start, end); -#endif - return ret; + iter = mod->jump_entries; + while (iter < mod->jump_entries + mod->num_jump_entries) { + arch_jump_label_text_poke_early(iter->code); + iter++; + } } -static void jump_label_update(struct jump_label_key *key, int enable) -{ - struct jump_entry *entry = key->entries; - - /* if there are no users, entry can be NULL */ - if (entry) - __jump_label_update(key, entry, enable); +struct notifier_block jump_label_module_nb = { + .notifier_call = jump_label_module_notify, + .priority = 0, +}; -#ifdef CONFIG_MODULES - __jump_label_mod_update(key, enable); -#endif +static __init int init_jump_label_module(void) +{ + return register_module_notifier(&jump_label_module_nb); } +early_initcall(init_jump_label_module); + +#endif /* CONFIG_MODULES */ #endif diff --git a/trunk/kernel/kexec.c b/trunk/kernel/kexec.c index 8d814cbc8109..87b77de03dd3 100644 --- a/trunk/kernel/kexec.c +++ b/trunk/kernel/kexec.c @@ -1531,7 +1531,13 @@ int kernel_kexec(void) if (error) goto Enable_cpus; local_irq_disable(); - error = syscore_suspend(); + /* Suspend system devices */ + error = sysdev_suspend(PMSG_FREEZE); + if (!error) { + error = syscore_suspend(); + if (error) + sysdev_resume(); + } if (error) goto Enable_irqs; } else @@ -1547,6 +1553,7 @@ int kernel_kexec(void) #ifdef CONFIG_KEXEC_JUMP if (kexec_image->preserve_context) { syscore_resume(); + sysdev_resume(); Enable_irqs: local_irq_enable(); Enable_cpus: diff --git a/trunk/kernel/kmod.c b/trunk/kernel/kmod.c index 5ae0ff38425f..9cd0591c96a2 100644 --- a/trunk/kernel/kmod.c +++ b/trunk/kernel/kmod.c @@ -245,6 +245,7 @@ static void __call_usermodehelper(struct work_struct *work) } } +#ifdef CONFIG_PM_SLEEP /* * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY * (used for preventing user land processes from being created after the user @@ -300,15 +301,6 @@ void usermodehelper_enable(void) usermodehelper_disabled = 0; } -/** - * usermodehelper_is_disabled - check if new helpers are allowed to be started - */ -bool usermodehelper_is_disabled(void) -{ - return usermodehelper_disabled; -} -EXPORT_SYMBOL_GPL(usermodehelper_is_disabled); - static void helper_lock(void) { atomic_inc(&running_helpers); @@ -320,6 +312,12 @@ static void helper_unlock(void) if (atomic_dec_and_test(&running_helpers)) wake_up(&running_helpers_waitq); } +#else /* CONFIG_PM_SLEEP */ +#define usermodehelper_disabled 0 + +static inline void helper_lock(void) {} +static inline void helper_unlock(void) {} +#endif /* CONFIG_PM_SLEEP */ /** * call_usermodehelper_setup - prepare to call a usermode helper diff --git a/trunk/kernel/lockdep.c b/trunk/kernel/lockdep.c index 63437d065ac8..53a68956f131 100644 --- a/trunk/kernel/lockdep.c +++ b/trunk/kernel/lockdep.c @@ -490,18 +490,6 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) usage[i] = '\0'; } -static int __print_lock_name(struct lock_class *class) -{ - char str[KSYM_NAME_LEN]; - const char *name; - - name = class->name; - if (!name) - name = __get_key_name(class->key, str); - - return printk("%s", name); -} - static void print_lock_name(struct lock_class *class) { char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; @@ -1065,56 +1053,6 @@ print_circular_bug_entry(struct lock_list *target, int depth) return 0; } -static void -print_circular_lock_scenario(struct held_lock *src, - struct held_lock *tgt, - struct lock_list *prt) -{ - struct lock_class *source = hlock_class(src); - struct lock_class *target = hlock_class(tgt); - struct lock_class *parent = prt->class; - - /* - * A direct locking problem where unsafe_class lock is taken - * directly by safe_class lock, then all we need to show - * is the deadlock scenario, as it is obvious that the - * unsafe lock is taken under the safe lock. - * - * But if there is a chain instead, where the safe lock takes - * an intermediate lock (middle_class) where this lock is - * not the same as the safe lock, then the lock chain is - * used to describe the problem. Otherwise we would need - * to show a different CPU case for each link in the chain - * from the safe_class lock to the unsafe_class lock. - */ - if (parent != source) { - printk("Chain exists of:\n "); - __print_lock_name(source); - printk(" --> "); - __print_lock_name(parent); - printk(" --> "); - __print_lock_name(target); - printk("\n\n"); - } - - printk(" Possible unsafe locking scenario:\n\n"); - printk(" CPU0 CPU1\n"); - printk(" ---- ----\n"); - printk(" lock("); - __print_lock_name(target); - printk(");\n"); - printk(" lock("); - __print_lock_name(parent); - printk(");\n"); - printk(" lock("); - __print_lock_name(target); - printk(");\n"); - printk(" lock("); - __print_lock_name(source); - printk(");\n"); - printk("\n *** DEADLOCK ***\n\n"); -} - /* * When a circular dependency is detected, print the * header first: @@ -1158,7 +1096,6 @@ static noinline int print_circular_bug(struct lock_list *this, { struct task_struct *curr = current; struct lock_list *parent; - struct lock_list *first_parent; int depth; if (!debug_locks_off_graph_unlock() || debug_locks_silent) @@ -1172,7 +1109,6 @@ static noinline int print_circular_bug(struct lock_list *this, print_circular_bug_header(target, depth, check_src, check_tgt); parent = get_lock_parent(target); - first_parent = parent; while (parent) { print_circular_bug_entry(parent, --depth); @@ -1180,9 +1116,6 @@ static noinline int print_circular_bug(struct lock_list *this, } printk("\nother info that might help us debug this:\n\n"); - print_circular_lock_scenario(check_src, check_tgt, - first_parent); - lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); @@ -1381,7 +1314,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf, printk("\n"); if (depth == 0 && (entry != root)) { - printk("lockdep:%s bad path found in chain graph\n", __func__); + printk("lockdep:%s bad BFS generated tree\n", __func__); break; } @@ -1392,62 +1325,6 @@ print_shortest_lock_dependencies(struct lock_list *leaf, return; } -static void -print_irq_lock_scenario(struct lock_list *safe_entry, - struct lock_list *unsafe_entry, - struct lock_class *prev_class, - struct lock_class *next_class) -{ - struct lock_class *safe_class = safe_entry->class; - struct lock_class *unsafe_class = unsafe_entry->class; - struct lock_class *middle_class = prev_class; - - if (middle_class == safe_class) - middle_class = next_class; - - /* - * A direct locking problem where unsafe_class lock is taken - * directly by safe_class lock, then all we need to show - * is the deadlock scenario, as it is obvious that the - * unsafe lock is taken under the safe lock. - * - * But if there is a chain instead, where the safe lock takes - * an intermediate lock (middle_class) where this lock is - * not the same as the safe lock, then the lock chain is - * used to describe the problem. Otherwise we would need - * to show a different CPU case for each link in the chain - * from the safe_class lock to the unsafe_class lock. - */ - if (middle_class != unsafe_class) { - printk("Chain exists of:\n "); - __print_lock_name(safe_class); - printk(" --> "); - __print_lock_name(middle_class); - printk(" --> "); - __print_lock_name(unsafe_class); - printk("\n\n"); - } - - printk(" Possible interrupt unsafe locking scenario:\n\n"); - printk(" CPU0 CPU1\n"); - printk(" ---- ----\n"); - printk(" lock("); - __print_lock_name(unsafe_class); - printk(");\n"); - printk(" local_irq_disable();\n"); - printk(" lock("); - __print_lock_name(safe_class); - printk(");\n"); - printk(" lock("); - __print_lock_name(middle_class); - printk(");\n"); - printk(" \n"); - printk(" lock("); - __print_lock_name(safe_class); - printk(");\n"); - printk("\n *** DEADLOCK ***\n\n"); -} - static int print_bad_irq_dependency(struct task_struct *curr, struct lock_list *prev_root, @@ -1499,9 +1376,6 @@ print_bad_irq_dependency(struct task_struct *curr, print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); printk("\nother info that might help us debug this:\n\n"); - print_irq_lock_scenario(backwards_entry, forwards_entry, - hlock_class(prev), hlock_class(next)); - lockdep_print_held_locks(curr); printk("\nthe dependencies between %s-irq-safe lock", irqclass); @@ -1665,26 +1539,6 @@ static inline void inc_chains(void) #endif -static void -print_deadlock_scenario(struct held_lock *nxt, - struct held_lock *prv) -{ - struct lock_class *next = hlock_class(nxt); - struct lock_class *prev = hlock_class(prv); - - printk(" Possible unsafe locking scenario:\n\n"); - printk(" CPU0\n"); - printk(" ----\n"); - printk(" lock("); - __print_lock_name(prev); - printk(");\n"); - printk(" lock("); - __print_lock_name(next); - printk(");\n"); - printk("\n *** DEADLOCK ***\n\n"); - printk(" May be due to missing lock nesting notation\n\n"); -} - static int print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, struct held_lock *next) @@ -1703,7 +1557,6 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, print_lock(prev); printk("\nother info that might help us debug this:\n"); - print_deadlock_scenario(next, prev); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); @@ -1973,7 +1826,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, struct list_head *hash_head = chainhashentry(chain_key); struct lock_chain *chain; struct held_lock *hlock_curr, *hlock_next; - int i, j; + int i, j, n, cn; if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return 0; @@ -2033,9 +1886,15 @@ static inline int lookup_chain_cache(struct task_struct *curr, } i++; chain->depth = curr->lockdep_depth + 1 - i; - if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { - chain->base = nr_chain_hlocks; - nr_chain_hlocks += chain->depth; + cn = nr_chain_hlocks; + while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) { + n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth); + if (n == cn) + break; + cn = n; + } + if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { + chain->base = cn; for (j = 0; j < chain->depth - 1; j++, i++) { int lock_id = curr->held_locks[i].class_idx - 1; chain_hlocks[chain->base + j] = lock_id; @@ -2152,24 +2011,6 @@ static void check_chain_key(struct task_struct *curr) #endif } -static void -print_usage_bug_scenario(struct held_lock *lock) -{ - struct lock_class *class = hlock_class(lock); - - printk(" Possible unsafe locking scenario:\n\n"); - printk(" CPU0\n"); - printk(" ----\n"); - printk(" lock("); - __print_lock_name(class); - printk(");\n"); - printk(" \n"); - printk(" lock("); - __print_lock_name(class); - printk(");\n"); - printk("\n *** DEADLOCK ***\n\n"); -} - static int print_usage_bug(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) @@ -2198,8 +2039,6 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, print_irqtrace_events(curr); printk("\nother info that might help us debug this:\n"); - print_usage_bug_scenario(this); - lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); @@ -2234,10 +2073,6 @@ print_irq_inversion_bug(struct task_struct *curr, struct held_lock *this, int forwards, const char *irqclass) { - struct lock_list *entry = other; - struct lock_list *middle = NULL; - int depth; - if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; @@ -2256,25 +2091,6 @@ print_irq_inversion_bug(struct task_struct *curr, printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); printk("\nother info that might help us debug this:\n"); - - /* Find a middle lock (if one exists) */ - depth = get_lock_depth(other); - do { - if (depth == 0 && (entry != root)) { - printk("lockdep:%s bad path found in chain graph\n", __func__); - break; - } - middle = entry; - entry = get_lock_parent(entry); - depth--; - } while (entry && entry != root && (depth >= 0)); - if (forwards) - print_irq_lock_scenario(root, other, - middle ? middle->class : root->class, other->class); - else - print_irq_lock_scenario(other, root, - middle ? middle->class : other->class, root->class); - lockdep_print_held_locks(curr); printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); diff --git a/trunk/kernel/module.c b/trunk/kernel/module.c index 22879725678d..d5938a5c19c4 100644 --- a/trunk/kernel/module.c +++ b/trunk/kernel/module.c @@ -57,7 +57,6 @@ #include #include #include -#include #define CREATE_TRACE_POINTS #include @@ -241,24 +240,23 @@ static bool each_symbol_in_section(const struct symsearch *arr, struct module *owner, bool (*fn)(const struct symsearch *syms, struct module *owner, - void *data), + unsigned int symnum, void *data), void *data) { - unsigned int j; + unsigned int i, j; for (j = 0; j < arrsize; j++) { - if (fn(&arr[j], owner, data)) - return true; + for (i = 0; i < arr[j].stop - arr[j].start; i++) + if (fn(&arr[j], owner, i, data)) + return true; } return false; } /* Returns true as soon as fn returns true, otherwise false. */ -bool each_symbol_section(bool (*fn)(const struct symsearch *arr, - struct module *owner, - void *data), - void *data) +bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, + unsigned int symnum, void *data), void *data) { struct module *mod; static const struct symsearch arr[] = { @@ -311,7 +309,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, } return false; } -EXPORT_SYMBOL_GPL(each_symbol_section); +EXPORT_SYMBOL_GPL(each_symbol); struct find_symbol_arg { /* Input */ @@ -325,12 +323,15 @@ struct find_symbol_arg { const struct kernel_symbol *sym; }; -static bool check_symbol(const struct symsearch *syms, - struct module *owner, - unsigned int symnum, void *data) +static bool find_symbol_in_section(const struct symsearch *syms, + struct module *owner, + unsigned int symnum, void *data) { struct find_symbol_arg *fsa = data; + if (strcmp(syms->start[symnum].name, fsa->name) != 0) + return false; + if (!fsa->gplok) { if (syms->licence == GPL_ONLY) return false; @@ -364,30 +365,6 @@ static bool check_symbol(const struct symsearch *syms, return true; } -static int cmp_name(const void *va, const void *vb) -{ - const char *a; - const struct kernel_symbol *b; - a = va; b = vb; - return strcmp(a, b->name); -} - -static bool find_symbol_in_section(const struct symsearch *syms, - struct module *owner, - void *data) -{ - struct find_symbol_arg *fsa = data; - struct kernel_symbol *sym; - - sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, - sizeof(struct kernel_symbol), cmp_name); - - if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data)) - return true; - - return false; -} - /* Find a symbol and return it, along with, (optional) crc and * (optional) module which owns it. Needs preempt disabled or module_mutex. */ const struct kernel_symbol *find_symbol(const char *name, @@ -402,7 +379,7 @@ const struct kernel_symbol *find_symbol(const char *name, fsa.gplok = gplok; fsa.warn = warn; - if (each_symbol_section(find_symbol_in_section, &fsa)) { + if (each_symbol(find_symbol_in_section, &fsa)) { if (owner) *owner = fsa.owner; if (crc) @@ -1630,28 +1607,27 @@ static void set_section_ro_nx(void *base, } } -static void unset_module_core_ro_nx(struct module *mod) +/* Setting memory back to RW+NX before releasing it */ +void unset_section_ro_nx(struct module *mod, void *module_region) { - set_page_attributes(mod->module_core + mod->core_text_size, - mod->module_core + mod->core_size, - set_memory_x); - set_page_attributes(mod->module_core, - mod->module_core + mod->core_ro_size, - set_memory_rw); -} + unsigned long total_pages; -static void unset_module_init_ro_nx(struct module *mod) -{ - set_page_attributes(mod->module_init + mod->init_text_size, - mod->module_init + mod->init_size, - set_memory_x); - set_page_attributes(mod->module_init, - mod->module_init + mod->init_ro_size, - set_memory_rw); + if (mod->module_core == module_region) { + /* Set core as NX+RW */ + total_pages = MOD_NUMBER_OF_PAGES(mod->module_core, mod->core_size); + set_memory_nx((unsigned long)mod->module_core, total_pages); + set_memory_rw((unsigned long)mod->module_core, total_pages); + + } else if (mod->module_init == module_region) { + /* Set init as NX+RW */ + total_pages = MOD_NUMBER_OF_PAGES(mod->module_init, mod->init_size); + set_memory_nx((unsigned long)mod->module_init, total_pages); + set_memory_rw((unsigned long)mod->module_init, total_pages); + } } /* Iterate through all modules and set each module's text as RW */ -void set_all_modules_text_rw(void) +void set_all_modules_text_rw() { struct module *mod; @@ -1672,7 +1648,7 @@ void set_all_modules_text_rw(void) } /* Iterate through all modules and set each module's text as RO */ -void set_all_modules_text_ro(void) +void set_all_modules_text_ro() { struct module *mod; @@ -1693,8 +1669,7 @@ void set_all_modules_text_ro(void) } #else static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { } -static void unset_module_core_ro_nx(struct module *mod) { } -static void unset_module_init_ro_nx(struct module *mod) { } +static inline void unset_section_ro_nx(struct module *mod, void *module_region) { } #endif /* Free a module, remove from lists, etc. */ @@ -1721,7 +1696,7 @@ static void free_module(struct module *mod) destroy_params(mod->kp, mod->num_kp); /* This may be NULL, but that's OK */ - unset_module_init_ro_nx(mod); + unset_section_ro_nx(mod, mod->module_init); module_free(mod, mod->module_init); kfree(mod->args); percpu_modfree(mod); @@ -1730,7 +1705,7 @@ static void free_module(struct module *mod) lockdep_free_key_range(mod->module_core, mod->core_size); /* Finally, free the core (containing the module structure) */ - unset_module_core_ro_nx(mod); + unset_section_ro_nx(mod, mod->module_core); module_free(mod, mod->module_core); #ifdef CONFIG_MPU @@ -2055,8 +2030,11 @@ static const struct kernel_symbol *lookup_symbol(const char *name, const struct kernel_symbol *start, const struct kernel_symbol *stop) { - return bsearch(name, start, stop - start, - sizeof(struct kernel_symbol), cmp_name); + const struct kernel_symbol *ks = start; + for (; ks < stop; ks++) + if (strcmp(ks->name, name) == 0) + return ks; + return NULL; } static int is_exported(const char *name, unsigned long value, @@ -2953,11 +2931,10 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, mod->symtab = mod->core_symtab; mod->strtab = mod->core_strtab; #endif - unset_module_init_ro_nx(mod); + unset_section_ro_nx(mod, mod->module_init); module_free(mod, mod->module_init); mod->module_init = NULL; mod->init_size = 0; - mod->init_ro_size = 0; mod->init_text_size = 0; mutex_unlock(&module_mutex); diff --git a/trunk/kernel/mutex-debug.c b/trunk/kernel/mutex-debug.c index 73da83aff418..ec815a960b5d 100644 --- a/trunk/kernel/mutex-debug.c +++ b/trunk/kernel/mutex-debug.c @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock) return; DEBUG_LOCKS_WARN_ON(lock->magic != lock); - DEBUG_LOCKS_WARN_ON(lock->owner != current); + DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); mutex_clear_owner(lock); } diff --git a/trunk/kernel/mutex-debug.h b/trunk/kernel/mutex-debug.h index 0799fd3e4cfa..57d527a16f9d 100644 --- a/trunk/kernel/mutex-debug.h +++ b/trunk/kernel/mutex-debug.h @@ -29,7 +29,7 @@ extern void debug_mutex_init(struct mutex *lock, const char *name, static inline void mutex_set_owner(struct mutex *lock) { - lock->owner = current; + lock->owner = current_thread_info(); } static inline void mutex_clear_owner(struct mutex *lock) diff --git a/trunk/kernel/mutex.c b/trunk/kernel/mutex.c index 2c938e2337cd..c4195fa98900 100644 --- a/trunk/kernel/mutex.c +++ b/trunk/kernel/mutex.c @@ -160,7 +160,14 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, */ for (;;) { - struct task_struct *owner; + struct thread_info *owner; + + /* + * If we own the BKL, then don't spin. The owner of + * the mutex might be waiting on us to release the BKL. + */ + if (unlikely(current->lock_depth >= 0)) + break; /* * If there's an owner, wait for it to either diff --git a/trunk/kernel/mutex.h b/trunk/kernel/mutex.h index 4115fbf83b12..67578ca48f94 100644 --- a/trunk/kernel/mutex.h +++ b/trunk/kernel/mutex.h @@ -19,7 +19,7 @@ #ifdef CONFIG_SMP static inline void mutex_set_owner(struct mutex *lock) { - lock->owner = current; + lock->owner = current_thread_info(); } static inline void mutex_clear_owner(struct mutex *lock) diff --git a/trunk/kernel/params.c b/trunk/kernel/params.c index ed72e1330862..7ab388a48a2e 100644 --- a/trunk/kernel/params.c +++ b/trunk/kernel/params.c @@ -297,15 +297,21 @@ EXPORT_SYMBOL(param_ops_charp); int param_set_bool(const char *val, const struct kernel_param *kp) { bool v; - int ret; /* No equals means "set"... */ if (!val) val = "1"; /* One of =[yYnN01] */ - ret = strtobool(val, &v); - if (ret) - return ret; + switch (val[0]) { + case 'y': case 'Y': case '1': + v = true; + break; + case 'n': case 'N': case '0': + v = false; + break; + default: + return -EINVAL; + } if (kp->flags & KPARAM_ISBOOL) *(bool *)kp->arg = v; @@ -815,18 +821,15 @@ ssize_t __modver_version_show(struct module_attribute *mattr, return sprintf(buf, "%s\n", vattr->version); } -extern const struct module_version_attribute *__start___modver[]; -extern const struct module_version_attribute *__stop___modver[]; +extern struct module_version_attribute __start___modver[], __stop___modver[]; static void __init version_sysfs_builtin(void) { - const struct module_version_attribute **p; + const struct module_version_attribute *vattr; struct module_kobject *mk; int err; - for (p = __start___modver; p < __stop___modver; p++) { - const struct module_version_attribute *vattr = *p; - + for (vattr = __start___modver; vattr < __stop___modver; vattr++) { mk = locate_module_kobject(vattr->module_name); if (mk) { err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); diff --git a/trunk/kernel/events/core.c b/trunk/kernel/perf_event.c similarity index 99% rename from trunk/kernel/events/core.c rename to trunk/kernel/perf_event.c index 0fc34a370ba4..8e81a9860a0d 100644 --- a/trunk/kernel/events/core.c +++ b/trunk/kernel/perf_event.c @@ -2,8 +2,8 @@ * Performance events core code: * * Copyright (C) 2008 Thomas Gleixner - * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. * * For licensing details see kernel-base/COPYING @@ -39,10 +39,10 @@ #include struct remote_function_call { - struct task_struct *p; - int (*func)(void *info); - void *info; - int ret; + struct task_struct *p; + int (*func)(void *info); + void *info; + int ret; }; static void remote_function(void *data) @@ -76,10 +76,10 @@ static int task_function_call(struct task_struct *p, int (*func) (void *info), void *info) { struct remote_function_call data = { - .p = p, - .func = func, - .info = info, - .ret = -ESRCH, /* No such (running) process */ + .p = p, + .func = func, + .info = info, + .ret = -ESRCH, /* No such (running) process */ }; if (task_curr(p)) @@ -100,10 +100,10 @@ task_function_call(struct task_struct *p, int (*func) (void *info), void *info) static int cpu_function_call(int cpu, int (*func) (void *info), void *info) { struct remote_function_call data = { - .p = NULL, - .func = func, - .info = info, - .ret = -ENXIO, /* No such CPU */ + .p = NULL, + .func = func, + .info = info, + .ret = -ENXIO, /* No such CPU */ }; smp_call_function_single(cpu, remote_function, &data, 1); @@ -125,7 +125,7 @@ enum event_type_t { * perf_sched_events : >0 events exist * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu */ -struct jump_label_key perf_sched_events __read_mostly; +atomic_t perf_sched_events __read_mostly; static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); static atomic_t nr_mmap_events __read_mostly; @@ -5429,7 +5429,7 @@ static int swevent_hlist_get(struct perf_event *event) return err; } -struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; +atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; static void sw_perf_event_destroy(struct perf_event *event) { @@ -7445,11 +7445,11 @@ static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, } struct cgroup_subsys perf_subsys = { - .name = "perf_event", - .subsys_id = perf_subsys_id, - .create = perf_cgroup_create, - .destroy = perf_cgroup_destroy, - .exit = perf_cgroup_exit, - .attach = perf_cgroup_attach, + .name = "perf_event", + .subsys_id = perf_subsys_id, + .create = perf_cgroup_create, + .destroy = perf_cgroup_destroy, + .exit = perf_cgroup_exit, + .attach = perf_cgroup_attach, }; #endif /* CONFIG_CGROUP_PERF */ diff --git a/trunk/kernel/power/Kconfig b/trunk/kernel/power/Kconfig index 87f4d24b55b0..6de9a8fc3417 100644 --- a/trunk/kernel/power/Kconfig +++ b/trunk/kernel/power/Kconfig @@ -125,6 +125,12 @@ config PM_DEBUG code. This is helpful when debugging and reporting PM bugs, like suspend support. +config PM_VERBOSE + bool "Verbose Power Management debugging" + depends on PM_DEBUG + ---help--- + This option enables verbose messages from the Power Management code. + config PM_ADVANCED_DEBUG bool "Extra PM attributes in sysfs for low-level debugging/testing" depends on PM_DEBUG @@ -223,7 +229,3 @@ config PM_OPP representing individual voltage domains and provides SOC implementations a ready to use framework to manage OPPs. For more information, read - -config PM_RUNTIME_CLK - def_bool y - depends on PM_RUNTIME && HAVE_CLK diff --git a/trunk/kernel/power/hibernate.c b/trunk/kernel/power/hibernate.c index f9bec56d8825..50aae660174d 100644 --- a/trunk/kernel/power/hibernate.c +++ b/trunk/kernel/power/hibernate.c @@ -272,7 +272,12 @@ static int create_image(int platform_mode) local_irq_disable(); - error = syscore_suspend(); + error = sysdev_suspend(PMSG_FREEZE); + if (!error) { + error = syscore_suspend(); + if (error) + sysdev_resume(); + } if (error) { printk(KERN_ERR "PM: Some system devices failed to power down, " "aborting hibernation\n"); @@ -297,6 +302,7 @@ static int create_image(int platform_mode) Power_up: syscore_resume(); + sysdev_resume(); /* NOTE: dpm_resume_noirq() is just a resume() for devices * that suspended with irqs off ... no overall powerup. */ @@ -327,25 +333,20 @@ static int create_image(int platform_mode) int hibernation_snapshot(int platform_mode) { - pm_message_t msg = PMSG_RECOVER; int error; error = platform_begin(platform_mode); if (error) goto Close; - error = dpm_prepare(PMSG_FREEZE); - if (error) - goto Complete_devices; - /* Preallocate image memory before shutting down devices. */ error = hibernate_preallocate_memory(); if (error) - goto Complete_devices; + goto Close; suspend_console(); pm_restrict_gfp_mask(); - error = dpm_suspend(PMSG_FREEZE); + error = dpm_suspend_start(PMSG_FREEZE); if (error) goto Recover_platform; @@ -363,17 +364,13 @@ int hibernation_snapshot(int platform_mode) if (error || !in_suspend) swsusp_free(); - msg = in_suspend ? (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE; - dpm_resume(msg); + dpm_resume_end(in_suspend ? + (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); if (error || !in_suspend) pm_restore_gfp_mask(); resume_console(); - - Complete_devices: - dpm_complete(msg); - Close: platform_end(platform_mode); return error; @@ -412,7 +409,12 @@ static int resume_target_kernel(bool platform_mode) local_irq_disable(); - error = syscore_suspend(); + error = sysdev_suspend(PMSG_QUIESCE); + if (!error) { + error = syscore_suspend(); + if (error) + sysdev_resume(); + } if (error) goto Enable_irqs; @@ -440,6 +442,7 @@ static int resume_target_kernel(bool platform_mode) touch_softlockup_watchdog(); syscore_resume(); + sysdev_resume(); Enable_irqs: local_irq_enable(); @@ -525,6 +528,7 @@ int hibernation_platform_enter(void) goto Platform_finish; local_irq_disable(); + sysdev_suspend(PMSG_HIBERNATE); syscore_suspend(); if (pm_wakeup_pending()) { error = -EAGAIN; @@ -537,6 +541,7 @@ int hibernation_platform_enter(void) Power_up: syscore_resume(); + sysdev_resume(); local_irq_enable(); enable_nonboot_cpus(); @@ -977,33 +982,10 @@ static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *att power_attr(image_size); -static ssize_t reserved_size_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", reserved_size); -} - -static ssize_t reserved_size_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t n) -{ - unsigned long size; - - if (sscanf(buf, "%lu", &size) == 1) { - reserved_size = size; - return n; - } - - return -EINVAL; -} - -power_attr(reserved_size); - static struct attribute * g[] = { &disk_attr.attr, &resume_attr.attr, &image_size_attr.attr, - &reserved_size_attr.attr, NULL, }; diff --git a/trunk/kernel/power/main.c b/trunk/kernel/power/main.c index 2981af4ce7cb..de9aef8742f4 100644 --- a/trunk/kernel/power/main.c +++ b/trunk/kernel/power/main.c @@ -337,7 +337,6 @@ static int __init pm_init(void) if (error) return error; hibernate_image_size_init(); - hibernate_reserved_size_init(); power_kobj = kobject_create_and_add("power", NULL); if (!power_kobj) return -ENOMEM; diff --git a/trunk/kernel/power/power.h b/trunk/kernel/power/power.h index 9a00a0a26280..03634be55f62 100644 --- a/trunk/kernel/power/power.h +++ b/trunk/kernel/power/power.h @@ -15,7 +15,6 @@ struct swsusp_info { #ifdef CONFIG_HIBERNATION /* kernel/power/snapshot.c */ -extern void __init hibernate_reserved_size_init(void); extern void __init hibernate_image_size_init(void); #ifdef CONFIG_ARCH_HIBERNATION_HEADER @@ -56,7 +55,6 @@ extern int hibernation_platform_enter(void); #else /* !CONFIG_HIBERNATION */ -static inline void hibernate_reserved_size_init(void) {} static inline void hibernate_image_size_init(void) {} #endif /* !CONFIG_HIBERNATION */ @@ -74,8 +72,6 @@ static struct kobj_attribute _name##_attr = { \ /* Preferred image size in bytes (default 500 MB) */ extern unsigned long image_size; -/* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */ -extern unsigned long reserved_size; extern int in_suspend; extern dev_t swsusp_resume_device; extern sector_t swsusp_resume_block; diff --git a/trunk/kernel/power/snapshot.c b/trunk/kernel/power/snapshot.c index ace55889f702..ca0aacc24874 100644 --- a/trunk/kernel/power/snapshot.c +++ b/trunk/kernel/power/snapshot.c @@ -40,29 +40,17 @@ static int swsusp_page_is_free(struct page *); static void swsusp_set_page_forbidden(struct page *); static void swsusp_unset_page_forbidden(struct page *); -/* - * Number of bytes to reserve for memory allocations made by device drivers - * from their ->freeze() and ->freeze_noirq() callbacks so that they don't - * cause image creation to fail (tunable via /sys/power/reserved_size). - */ -unsigned long reserved_size; - -void __init hibernate_reserved_size_init(void) -{ - reserved_size = SPARE_PAGES * PAGE_SIZE; -} - /* * Preferred image size in bytes (tunable via /sys/power/image_size). - * When it is set to N, swsusp will do its best to ensure the image - * size will not exceed N bytes, but if that is impossible, it will - * try to create the smallest image possible. + * When it is set to N, the image creating code will do its best to + * ensure the image size will not exceed N bytes, but if that is + * impossible, it will try to create the smallest image possible. */ unsigned long image_size; void __init hibernate_image_size_init(void) { - image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE; + image_size = (totalram_pages / 3) * PAGE_SIZE; } /* List of PBEs needed for restoring the pages that were allocated before @@ -1275,13 +1263,11 @@ static unsigned long minimum_image_size(unsigned long saveable) * frame in use. We also need a number of page frames to be free during * hibernation for allocations made while saving the image and for device * drivers, in case they need to allocate memory from their hibernation - * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough - * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through - * /sys/power/reserved_size, respectively). To make this happen, we compute the - * total number of available page frames and allocate at least + * callbacks (these two numbers are given by PAGES_FOR_IO and SPARE_PAGES, + * respectively, both of which are rough estimates). To make this happen, we + * compute the total number of available page frames and allocate at least * - * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 - * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE) + * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 + 2 * SPARE_PAGES * * of them, which corresponds to the maximum size of a hibernation image. * @@ -1336,8 +1322,7 @@ int hibernate_preallocate_memory(void) count -= totalreserve_pages; /* Compute the maximum number of saveable pages to leave in memory. */ - max_size = (count - (size + PAGES_FOR_IO)) / 2 - - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); + max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES; /* Compute the desired number of image pages specified by image_size. */ size = DIV_ROUND_UP(image_size, PAGE_SIZE); if (size > max_size) diff --git a/trunk/kernel/power/suspend.c b/trunk/kernel/power/suspend.c index 1c41ba215419..8935369d503a 100644 --- a/trunk/kernel/power/suspend.c +++ b/trunk/kernel/power/suspend.c @@ -163,13 +163,19 @@ static int suspend_enter(suspend_state_t state) arch_suspend_disable_irqs(); BUG_ON(!irqs_disabled()); - error = syscore_suspend(); + error = sysdev_suspend(PMSG_SUSPEND); + if (!error) { + error = syscore_suspend(); + if (error) + sysdev_resume(); + } if (!error) { if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) { error = suspend_ops->enter(state); events_check_enabled = false; } syscore_resume(); + sysdev_resume(); } arch_suspend_enable_irqs(); @@ -210,6 +216,7 @@ int suspend_devices_and_enter(suspend_state_t state) goto Close; } suspend_console(); + pm_restrict_gfp_mask(); suspend_test_start(); error = dpm_suspend_start(PMSG_SUSPEND); if (error) { @@ -220,12 +227,13 @@ int suspend_devices_and_enter(suspend_state_t state) if (suspend_test(TEST_DEVICES)) goto Recover_platform; - error = suspend_enter(state); + suspend_enter(state); Resume_devices: suspend_test_start(); dpm_resume_end(PMSG_RESUME); suspend_test_finish("resume devices"); + pm_restore_gfp_mask(); resume_console(); Close: if (suspend_ops->end) @@ -286,9 +294,7 @@ int enter_state(suspend_state_t state) goto Finish; pr_debug("PM: Entering %s sleep\n", pm_states[state]); - pm_restrict_gfp_mask(); error = suspend_devices_and_enter(state); - pm_restore_gfp_mask(); Finish: pr_debug("PM: Finishing wakeup.\n"); diff --git a/trunk/kernel/power/user.c b/trunk/kernel/power/user.c index 7d02d33be699..c36c3b9e8a84 100644 --- a/trunk/kernel/power/user.c +++ b/trunk/kernel/power/user.c @@ -135,10 +135,8 @@ static int snapshot_release(struct inode *inode, struct file *filp) free_basic_memory_bitmaps(); data = filp->private_data; free_all_swap_pages(data->swap); - if (data->frozen) { - pm_restore_gfp_mask(); + if (data->frozen) thaw_processes(); - } pm_notifier_call_chain(data->mode == O_RDONLY ? PM_POST_HIBERNATION : PM_POST_RESTORE); atomic_inc(&snapshot_device_available); @@ -381,7 +379,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, * PM_HIBERNATION_PREPARE */ error = suspend_devices_and_enter(PM_SUSPEND_MEM); - data->ready = 0; break; case SNAPSHOT_PLATFORM_SUPPORT: diff --git a/trunk/kernel/ptrace.c b/trunk/kernel/ptrace.c index dc7ab65f3b36..0fc1eed28d27 100644 --- a/trunk/kernel/ptrace.c +++ b/trunk/kernel/ptrace.c @@ -22,7 +22,6 @@ #include #include #include -#include /* @@ -880,19 +879,3 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, return ret; } #endif /* CONFIG_COMPAT */ - -#ifdef CONFIG_HAVE_HW_BREAKPOINT -int ptrace_get_breakpoints(struct task_struct *tsk) -{ - if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt)) - return 0; - - return -1; -} - -void ptrace_put_breakpoints(struct task_struct *tsk) -{ - if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt)) - flush_ptrace_hw_breakpoint(tsk); -} -#endif /* CONFIG_HAVE_HW_BREAKPOINT */ diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index c62acf45d3b9..312f8b95c2d4 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -231,7 +231,7 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) #endif /* - * sched_domains_mutex serializes calls to init_sched_domains, + * sched_domains_mutex serializes calls to arch_init_sched_domains, * detach_destroy_domains and partition_sched_domains. */ static DEFINE_MUTEX(sched_domains_mutex); @@ -312,9 +312,6 @@ struct cfs_rq { u64 exec_clock; u64 min_vruntime; -#ifndef CONFIG_64BIT - u64 min_vruntime_copy; -#endif struct rb_root tasks_timeline; struct rb_node *rb_leftmost; @@ -328,9 +325,7 @@ struct cfs_rq { */ struct sched_entity *curr, *next, *last, *skip; -#ifdef CONFIG_SCHED_DEBUG unsigned int nr_spread_over; -#endif #ifdef CONFIG_FAIR_GROUP_SCHED struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ @@ -422,7 +417,6 @@ struct rt_rq { */ struct root_domain { atomic_t refcount; - struct rcu_head rcu; cpumask_var_t span; cpumask_var_t online; @@ -466,7 +460,7 @@ struct rq { u64 nohz_stamp; unsigned char nohz_balance_kick; #endif - int skip_clock_update; + unsigned int skip_clock_update; /* capture load from *all* tasks on this cpu: */ struct load_weight load; @@ -559,10 +553,6 @@ struct rq { unsigned int ttwu_count; unsigned int ttwu_local; #endif - -#ifdef CONFIG_SMP - struct task_struct *wake_list; -#endif }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); @@ -581,7 +571,7 @@ static inline int cpu_of(struct rq *rq) #define rcu_dereference_check_sched_domain(p) \ rcu_dereference_check((p), \ - rcu_read_lock_held() || \ + rcu_read_lock_sched_held() || \ lockdep_is_held(&sched_domains_mutex)) /* @@ -606,7 +596,7 @@ static inline int cpu_of(struct rq *rq) * Return the group to which this tasks belongs. * * We use task_subsys_state_check() and extend the RCU verification - * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach() + * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach() * holds that lock for each task it moves into the cgroup. Therefore * by holding that lock, we pin the task to the current cgroup. */ @@ -616,7 +606,7 @@ static inline struct task_group *task_group(struct task_struct *p) struct cgroup_subsys_state *css; css = task_subsys_state_check(p, cpu_cgroup_subsys_id, - lockdep_is_held(&p->pi_lock)); + lockdep_is_held(&task_rq(p)->lock)); tg = container_of(css, struct task_group, css); return autogroup_task_group(p, tg); @@ -652,7 +642,7 @@ static void update_rq_clock(struct rq *rq) { s64 delta; - if (rq->skip_clock_update > 0) + if (rq->skip_clock_update) return; delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; @@ -848,39 +838,18 @@ static inline int task_current(struct rq *rq, struct task_struct *p) return rq->curr == p; } +#ifndef __ARCH_WANT_UNLOCKED_CTXSW static inline int task_running(struct rq *rq, struct task_struct *p) { -#ifdef CONFIG_SMP - return p->on_cpu; -#else return task_current(rq, p); -#endif } -#ifndef __ARCH_WANT_UNLOCKED_CTXSW static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) { -#ifdef CONFIG_SMP - /* - * We can optimise this out completely for !SMP, because the - * SMP rebalancing from interrupt is the only thing that cares - * here. - */ - next->on_cpu = 1; -#endif } static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) { -#ifdef CONFIG_SMP - /* - * After ->on_cpu is cleared, the task can be moved to a different CPU. - * We must ensure this doesn't happen until the switch is completely - * finished. - */ - smp_wmb(); - prev->on_cpu = 0; -#endif #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ rq->lock.owner = current; @@ -896,6 +865,15 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) } #else /* __ARCH_WANT_UNLOCKED_CTXSW */ +static inline int task_running(struct rq *rq, struct task_struct *p) +{ +#ifdef CONFIG_SMP + return p->oncpu; +#else + return task_current(rq, p); +#endif +} + static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) { #ifdef CONFIG_SMP @@ -904,7 +882,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) * SMP rebalancing from interrupt is the only thing that cares * here. */ - next->on_cpu = 1; + next->oncpu = 1; #endif #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW raw_spin_unlock_irq(&rq->lock); @@ -917,12 +895,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) { #ifdef CONFIG_SMP /* - * After ->on_cpu is cleared, the task can be moved to a different CPU. + * After ->oncpu is cleared, the task can be moved to a different CPU. * We must ensure this doesn't happen until the switch is completely * finished. */ smp_wmb(); - prev->on_cpu = 0; + prev->oncpu = 0; #endif #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW local_irq_enable(); @@ -931,15 +909,23 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ /* - * __task_rq_lock - lock the rq @p resides on. + * Check whether the task is waking, we use this to synchronize ->cpus_allowed + * against ttwu(). + */ +static inline int task_is_waking(struct task_struct *p) +{ + return unlikely(p->state == TASK_WAKING); +} + +/* + * __task_rq_lock - lock the runqueue a given task resides on. + * Must be called interrupts disabled. */ static inline struct rq *__task_rq_lock(struct task_struct *p) __acquires(rq->lock) { struct rq *rq; - lockdep_assert_held(&p->pi_lock); - for (;;) { rq = task_rq(p); raw_spin_lock(&rq->lock); @@ -950,22 +936,22 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) } /* - * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. + * task_rq_lock - lock the runqueue a given task resides on and disable + * interrupts. Note the ordering: we can safely lookup the task_rq without + * explicitly disabling preemption. */ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) - __acquires(p->pi_lock) __acquires(rq->lock) { struct rq *rq; for (;;) { - raw_spin_lock_irqsave(&p->pi_lock, *flags); + local_irq_save(*flags); rq = task_rq(p); raw_spin_lock(&rq->lock); if (likely(rq == task_rq(p))) return rq; - raw_spin_unlock(&rq->lock); - raw_spin_unlock_irqrestore(&p->pi_lock, *flags); + raw_spin_unlock_irqrestore(&rq->lock, *flags); } } @@ -975,13 +961,10 @@ static void __task_rq_unlock(struct rq *rq) raw_spin_unlock(&rq->lock); } -static inline void -task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) +static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) __releases(rq->lock) - __releases(p->pi_lock) { - raw_spin_unlock(&rq->lock); - raw_spin_unlock_irqrestore(&p->pi_lock, *flags); + raw_spin_unlock_irqrestore(&rq->lock, *flags); } /* @@ -1210,17 +1193,11 @@ int get_nohz_timer_target(void) int i; struct sched_domain *sd; - rcu_read_lock(); for_each_domain(cpu, sd) { - for_each_cpu(i, sched_domain_span(sd)) { - if (!idle_cpu(i)) { - cpu = i; - goto unlock; - } - } + for_each_cpu(i, sched_domain_span(sd)) + if (!idle_cpu(i)) + return i; } -unlock: - rcu_read_unlock(); return cpu; } /* @@ -1330,15 +1307,15 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, { u64 tmp; - tmp = (u64)delta_exec * weight; - if (!lw->inv_weight) { if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST)) lw->inv_weight = 1; else - lw->inv_weight = WMULT_CONST / lw->weight; + lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2) + / (lw->weight+1); } + tmp = (u64)delta_exec * weight; /* * Check whether we'd overflow the 64-bit multiplication: */ @@ -1796,6 +1773,7 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) update_rq_clock(rq); sched_info_queued(p); p->sched_class->enqueue_task(rq, p, flags); + p->se.on_rq = 1; } static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) @@ -1803,6 +1781,7 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) update_rq_clock(rq); sched_info_dequeued(p); p->sched_class->dequeue_task(rq, p, flags); + p->se.on_rq = 0; } /* @@ -2137,7 +2116,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (rq->curr->on_rq && test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -2183,11 +2162,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) */ WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); - -#ifdef CONFIG_LOCKDEP - WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || - lockdep_is_held(&task_rq(p)->lock))); -#endif #endif trace_sched_migrate_task(p, new_cpu); @@ -2207,6 +2181,19 @@ struct migration_arg { static int migration_cpu_stop(void *data); +/* + * The task's runqueue lock must be held. + * Returns true if you have to wait for migration thread. + */ +static bool migrate_task(struct task_struct *p, struct rq *rq) +{ + /* + * If the task is not on a runqueue (and not running), then + * the next wake-up will properly place the task. + */ + return p->se.on_rq || task_running(rq, p); +} + /* * wait_task_inactive - wait for a thread to unschedule. * @@ -2264,11 +2251,11 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) rq = task_rq_lock(p, &flags); trace_sched_wait_task(p); running = task_running(rq, p); - on_rq = p->on_rq; + on_rq = p->se.on_rq; ncsw = 0; if (!match_state || p->state == match_state) ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ - task_rq_unlock(rq, p, &flags); + task_rq_unlock(rq, &flags); /* * If it changed from the expected state, bail out now. @@ -2343,7 +2330,7 @@ EXPORT_SYMBOL_GPL(kick_process); #ifdef CONFIG_SMP /* - * ->cpus_allowed is protected by both rq->lock and p->pi_lock + * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. */ static int select_fallback_rq(int cpu, struct task_struct *p) { @@ -2376,12 +2363,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p) } /* - * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. + * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable. */ static inline -int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) +int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) { - int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); + int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags); /* * In order not to call set_task_cpu() on a blocking task we need @@ -2407,62 +2394,27 @@ static void update_avg(u64 *avg, u64 sample) } #endif -static void -ttwu_stat(struct task_struct *p, int cpu, int wake_flags) +static inline void ttwu_activate(struct task_struct *p, struct rq *rq, + bool is_sync, bool is_migrate, bool is_local, + unsigned long en_flags) { -#ifdef CONFIG_SCHEDSTATS - struct rq *rq = this_rq(); - -#ifdef CONFIG_SMP - int this_cpu = smp_processor_id(); - - if (cpu == this_cpu) { - schedstat_inc(rq, ttwu_local); - schedstat_inc(p, se.statistics.nr_wakeups_local); - } else { - struct sched_domain *sd; - - schedstat_inc(p, se.statistics.nr_wakeups_remote); - rcu_read_lock(); - for_each_domain(this_cpu, sd) { - if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { - schedstat_inc(sd, ttwu_wake_remote); - break; - } - } - rcu_read_unlock(); - } -#endif /* CONFIG_SMP */ - - schedstat_inc(rq, ttwu_count); schedstat_inc(p, se.statistics.nr_wakeups); - - if (wake_flags & WF_SYNC) + if (is_sync) schedstat_inc(p, se.statistics.nr_wakeups_sync); - - if (cpu != task_cpu(p)) + if (is_migrate) schedstat_inc(p, se.statistics.nr_wakeups_migrate); + if (is_local) + schedstat_inc(p, se.statistics.nr_wakeups_local); + else + schedstat_inc(p, se.statistics.nr_wakeups_remote); -#endif /* CONFIG_SCHEDSTATS */ -} - -static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) -{ activate_task(rq, p, en_flags); - p->on_rq = 1; - - /* if a worker is waking up, notify workqueue */ - if (p->flags & PF_WQ_WORKER) - wq_worker_waking_up(p, cpu_of(rq)); } -/* - * Mark the task runnable and perform wakeup-preemption. - */ -static void -ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) +static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, + int wake_flags, bool success) { - trace_sched_wakeup(p, true); + trace_sched_wakeup(p, success); check_preempt_curr(rq, p, wake_flags); p->state = TASK_RUNNING; @@ -2481,99 +2433,9 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) rq->idle_stamp = 0; } #endif -} - -static void -ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) -{ -#ifdef CONFIG_SMP - if (p->sched_contributes_to_load) - rq->nr_uninterruptible--; -#endif - - ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); - ttwu_do_wakeup(rq, p, wake_flags); -} - -/* - * Called in case the task @p isn't fully descheduled from its runqueue, - * in this case we must do a remote wakeup. Its a 'light' wakeup though, - * since all we need to do is flip p->state to TASK_RUNNING, since - * the task is still ->on_rq. - */ -static int ttwu_remote(struct task_struct *p, int wake_flags) -{ - struct rq *rq; - int ret = 0; - - rq = __task_rq_lock(p); - if (p->on_rq) { - ttwu_do_wakeup(rq, p, wake_flags); - ret = 1; - } - __task_rq_unlock(rq); - - return ret; -} - -#ifdef CONFIG_SMP -static void sched_ttwu_pending(void) -{ - struct rq *rq = this_rq(); - struct task_struct *list = xchg(&rq->wake_list, NULL); - - if (!list) - return; - - raw_spin_lock(&rq->lock); - - while (list) { - struct task_struct *p = list; - list = list->wake_entry; - ttwu_do_activate(rq, p, 0); - } - - raw_spin_unlock(&rq->lock); -} - -void scheduler_ipi(void) -{ - sched_ttwu_pending(); -} - -static void ttwu_queue_remote(struct task_struct *p, int cpu) -{ - struct rq *rq = cpu_rq(cpu); - struct task_struct *next = rq->wake_list; - - for (;;) { - struct task_struct *old = next; - - p->wake_entry = next; - next = cmpxchg(&rq->wake_list, old, p); - if (next == old) - break; - } - - if (!next) - smp_send_reschedule(cpu); -} -#endif - -static void ttwu_queue(struct task_struct *p, int cpu) -{ - struct rq *rq = cpu_rq(cpu); - -#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_TTWU_QUEUE) - if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) { - ttwu_queue_remote(p, cpu); - return; - } -#endif - - raw_spin_lock(&rq->lock); - ttwu_do_activate(rq, p, 0); - raw_spin_unlock(&rq->lock); + /* if a worker is waking up, notify workqueue */ + if ((p->flags & PF_WQ_WORKER) && success) + wq_worker_waking_up(p, cpu_of(rq)); } /** @@ -2591,64 +2453,92 @@ static void ttwu_queue(struct task_struct *p, int cpu) * Returns %true if @p was woken up, %false if it was already running * or @state didn't match @p's state. */ -static int -try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) +static int try_to_wake_up(struct task_struct *p, unsigned int state, + int wake_flags) { + int cpu, orig_cpu, this_cpu, success = 0; unsigned long flags; - int cpu, success = 0; + unsigned long en_flags = ENQUEUE_WAKEUP; + struct rq *rq; + + this_cpu = get_cpu(); smp_wmb(); - raw_spin_lock_irqsave(&p->pi_lock, flags); + rq = task_rq_lock(p, &flags); if (!(p->state & state)) goto out; - success = 1; /* we're going to change ->state */ - cpu = task_cpu(p); + if (p->se.on_rq) + goto out_running; - if (p->on_rq && ttwu_remote(p, wake_flags)) - goto stat; + cpu = task_cpu(p); + orig_cpu = cpu; #ifdef CONFIG_SMP + if (unlikely(task_running(rq, p))) + goto out_activate; + /* - * If the owning (remote) cpu is still in the middle of schedule() with - * this task as prev, wait until its done referencing the task. + * In order to handle concurrent wakeups and release the rq->lock + * we put the task in TASK_WAKING state. + * + * First fix up the nr_uninterruptible count: */ - while (p->on_cpu) { -#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW - /* - * If called from interrupt context we could have landed in the - * middle of schedule(), in this case we should take care not - * to spin on ->on_cpu if p is current, since that would - * deadlock. - */ - if (p == current) { - ttwu_queue(p, cpu); - goto stat; - } -#endif - cpu_relax(); + if (task_contributes_to_load(p)) { + if (likely(cpu_online(orig_cpu))) + rq->nr_uninterruptible--; + else + this_rq()->nr_uninterruptible--; } - /* - * Pairs with the smp_wmb() in finish_lock_switch(). - */ - smp_rmb(); - - p->sched_contributes_to_load = !!task_contributes_to_load(p); p->state = TASK_WAKING; - if (p->sched_class->task_waking) - p->sched_class->task_waking(p); + if (p->sched_class->task_waking) { + p->sched_class->task_waking(rq, p); + en_flags |= ENQUEUE_WAKING; + } - cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); - if (task_cpu(p) != cpu) + cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); + if (cpu != orig_cpu) set_task_cpu(p, cpu); -#endif /* CONFIG_SMP */ + __task_rq_unlock(rq); + + rq = cpu_rq(cpu); + raw_spin_lock(&rq->lock); + + /* + * We migrated the task without holding either rq->lock, however + * since the task is not on the task list itself, nobody else + * will try and migrate the task, hence the rq should match the + * cpu we just moved it to. + */ + WARN_ON(task_cpu(p) != cpu); + WARN_ON(p->state != TASK_WAKING); + +#ifdef CONFIG_SCHEDSTATS + schedstat_inc(rq, ttwu_count); + if (cpu == this_cpu) + schedstat_inc(rq, ttwu_local); + else { + struct sched_domain *sd; + for_each_domain(this_cpu, sd) { + if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { + schedstat_inc(sd, ttwu_wake_remote); + break; + } + } + } +#endif /* CONFIG_SCHEDSTATS */ - ttwu_queue(p, cpu); -stat: - ttwu_stat(p, cpu, wake_flags); +out_activate: +#endif /* CONFIG_SMP */ + ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu, + cpu == this_cpu, en_flags); + success = 1; +out_running: + ttwu_post_activation(p, rq, wake_flags, success); out: - raw_spin_unlock_irqrestore(&p->pi_lock, flags); + task_rq_unlock(rq, &flags); + put_cpu(); return success; } @@ -2657,34 +2547,31 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) * try_to_wake_up_local - try to wake up a local task with rq lock held * @p: the thread to be awakened * - * Put @p on the run-queue if it's not already there. The caller must + * Put @p on the run-queue if it's not already there. The caller must * ensure that this_rq() is locked, @p is bound to this_rq() and not - * the current task. + * the current task. this_rq() stays locked over invocation. */ static void try_to_wake_up_local(struct task_struct *p) { struct rq *rq = task_rq(p); + bool success = false; BUG_ON(rq != this_rq()); BUG_ON(p == current); lockdep_assert_held(&rq->lock); - if (!raw_spin_trylock(&p->pi_lock)) { - raw_spin_unlock(&rq->lock); - raw_spin_lock(&p->pi_lock); - raw_spin_lock(&rq->lock); - } - if (!(p->state & TASK_NORMAL)) - goto out; - - if (!p->on_rq) - ttwu_activate(rq, p, ENQUEUE_WAKEUP); + return; - ttwu_do_wakeup(rq, p, 0); - ttwu_stat(p, smp_processor_id(), 0); -out: - raw_spin_unlock(&p->pi_lock); + if (!p->se.on_rq) { + if (likely(!task_running(rq, p))) { + schedstat_inc(rq, ttwu_count); + schedstat_inc(rq, ttwu_local); + } + ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP); + success = true; + } + ttwu_post_activation(p, rq, 0, success); } /** @@ -2717,21 +2604,19 @@ int wake_up_state(struct task_struct *p, unsigned int state) */ static void __sched_fork(struct task_struct *p) { - p->on_rq = 0; - - p->se.on_rq = 0; p->se.exec_start = 0; p->se.sum_exec_runtime = 0; p->se.prev_sum_exec_runtime = 0; p->se.nr_migrations = 0; p->se.vruntime = 0; - INIT_LIST_HEAD(&p->se.group_node); #ifdef CONFIG_SCHEDSTATS memset(&p->se.statistics, 0, sizeof(p->se.statistics)); #endif INIT_LIST_HEAD(&p->rt.run_list); + p->se.on_rq = 0; + INIT_LIST_HEAD(&p->se.group_node); #ifdef CONFIG_PREEMPT_NOTIFIERS INIT_HLIST_HEAD(&p->preempt_notifiers); @@ -2741,9 +2626,8 @@ static void __sched_fork(struct task_struct *p) /* * fork()/clone()-time setup: */ -void sched_fork(struct task_struct *p) +void sched_fork(struct task_struct *p, int clone_flags) { - unsigned long flags; int cpu = get_cpu(); __sched_fork(p); @@ -2794,16 +2678,16 @@ void sched_fork(struct task_struct *p) * * Silence PROVE_RCU. */ - raw_spin_lock_irqsave(&p->pi_lock, flags); + rcu_read_lock(); set_task_cpu(p, cpu); - raw_spin_unlock_irqrestore(&p->pi_lock, flags); + rcu_read_unlock(); #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) if (likely(sched_info_on())) memset(&p->sched_info, 0, sizeof(p->sched_info)); #endif -#if defined(CONFIG_SMP) - p->on_cpu = 0; +#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) + p->oncpu = 0; #endif #ifdef CONFIG_PREEMPT /* Want to start with kernel preemption disabled. */ @@ -2823,31 +2707,41 @@ void sched_fork(struct task_struct *p) * that must be done for every newly created context, then puts the task * on the runqueue and wakes it. */ -void wake_up_new_task(struct task_struct *p) +void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) { unsigned long flags; struct rq *rq; + int cpu __maybe_unused = get_cpu(); - raw_spin_lock_irqsave(&p->pi_lock, flags); #ifdef CONFIG_SMP + rq = task_rq_lock(p, &flags); + p->state = TASK_WAKING; + /* * Fork balancing, do it here and not earlier because: * - cpus_allowed can change in the fork path * - any previously selected cpu might disappear through hotplug + * + * We set TASK_WAKING so that select_task_rq() can drop rq->lock + * without people poking at ->cpus_allowed. */ - set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0)); + cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0); + set_task_cpu(p, cpu); + + p->state = TASK_RUNNING; + task_rq_unlock(rq, &flags); #endif - rq = __task_rq_lock(p); + rq = task_rq_lock(p, &flags); activate_task(rq, p, 0); - p->on_rq = 1; - trace_sched_wakeup_new(p, true); + trace_sched_wakeup_new(p, 1); check_preempt_curr(rq, p, WF_FORK); #ifdef CONFIG_SMP if (p->sched_class->task_woken) p->sched_class->task_woken(rq, p); #endif - task_rq_unlock(rq, p, &flags); + task_rq_unlock(rq, &flags); + put_cpu(); } #ifdef CONFIG_PREEMPT_NOTIFIERS @@ -3556,22 +3450,27 @@ void sched_exec(void) { struct task_struct *p = current; unsigned long flags; + struct rq *rq; int dest_cpu; - raw_spin_lock_irqsave(&p->pi_lock, flags); - dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0); + rq = task_rq_lock(p, &flags); + dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0); if (dest_cpu == smp_processor_id()) goto unlock; - if (likely(cpu_active(dest_cpu))) { + /* + * select_task_rq() can race against ->cpus_allowed + */ + if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && + likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) { struct migration_arg arg = { p, dest_cpu }; - raw_spin_unlock_irqrestore(&p->pi_lock, flags); - stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); + task_rq_unlock(rq, &flags); + stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); return; } unlock: - raw_spin_unlock_irqrestore(&p->pi_lock, flags); + task_rq_unlock(rq, &flags); } #endif @@ -3608,7 +3507,7 @@ unsigned long long task_delta_exec(struct task_struct *p) rq = task_rq_lock(p, &flags); ns = do_task_delta_exec(p, rq); - task_rq_unlock(rq, p, &flags); + task_rq_unlock(rq, &flags); return ns; } @@ -3626,7 +3525,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) rq = task_rq_lock(p, &flags); ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); - task_rq_unlock(rq, p, &flags); + task_rq_unlock(rq, &flags); return ns; } @@ -3650,7 +3549,7 @@ unsigned long long thread_group_sched_runtime(struct task_struct *p) rq = task_rq_lock(p, &flags); thread_group_cputime(p, &totals); ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); - task_rq_unlock(rq, p, &flags); + task_rq_unlock(rq, &flags); return ns; } @@ -4004,6 +3903,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) /* * This function gets called by the timer code, with HZ frequency. * We call it with interrupts disabled. + * + * It also gets called by the fork code, when changing the parent's + * timeslices. */ void scheduler_tick(void) { @@ -4123,11 +4025,17 @@ static inline void schedule_debug(struct task_struct *prev) profile_hit(SCHED_PROFILING, __builtin_return_address(0)); schedstat_inc(this_rq(), sched_count); +#ifdef CONFIG_SCHEDSTATS + if (unlikely(prev->lock_depth >= 0)) { + schedstat_inc(this_rq(), rq_sched_info.bkl_count); + schedstat_inc(prev, sched_info.bkl_count); + } +#endif } static void put_prev_task(struct rq *rq, struct task_struct *prev) { - if (prev->on_rq || rq->skip_clock_update < 0) + if (prev->se.on_rq) update_rq_clock(rq); prev->sched_class->put_prev_task(rq, prev); } @@ -4189,13 +4097,11 @@ asmlinkage void __sched schedule(void) if (unlikely(signal_pending_state(prev->state, prev))) { prev->state = TASK_RUNNING; } else { - deactivate_task(rq, prev, DEQUEUE_SLEEP); - prev->on_rq = 0; - /* - * If a worker went to sleep, notify and ask workqueue - * whether it wants to wake up a task to maintain - * concurrency. + * If a worker is going to sleep, notify and + * ask workqueue whether it wants to wake up a + * task to maintain concurrency. If so, wake + * up the task. */ if (prev->flags & PF_WQ_WORKER) { struct task_struct *to_wakeup; @@ -4204,10 +4110,11 @@ asmlinkage void __sched schedule(void) if (to_wakeup) try_to_wake_up_local(to_wakeup); } + deactivate_task(rq, prev, DEQUEUE_SLEEP); /* - * If we are going to sleep and we have plugged IO - * queued, make sure to submit it to avoid deadlocks. + * If we are going to sleep and we have plugged IO queued, make + * sure to submit it to avoid deadlocks. */ if (blk_needs_flush_plug(prev)) { raw_spin_unlock(&rq->lock); @@ -4254,53 +4161,70 @@ asmlinkage void __sched schedule(void) EXPORT_SYMBOL(schedule); #ifdef CONFIG_MUTEX_SPIN_ON_OWNER - -static inline bool owner_running(struct mutex *lock, struct task_struct *owner) -{ - bool ret = false; - - rcu_read_lock(); - if (lock->owner != owner) - goto fail; - - /* - * Ensure we emit the owner->on_cpu, dereference _after_ checking - * lock->owner still matches owner, if that fails, owner might - * point to free()d memory, if it still matches, the rcu_read_lock() - * ensures the memory stays valid. - */ - barrier(); - - ret = owner->on_cpu; -fail: - rcu_read_unlock(); - - return ret; -} - /* * Look out! "owner" is an entirely speculative pointer * access and not reliable. */ -int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) +int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) { + unsigned int cpu; + struct rq *rq; + if (!sched_feat(OWNER_SPIN)) return 0; - while (owner_running(lock, owner)) { - if (need_resched()) - return 0; +#ifdef CONFIG_DEBUG_PAGEALLOC + /* + * Need to access the cpu field knowing that + * DEBUG_PAGEALLOC could have unmapped it if + * the mutex owner just released it and exited. + */ + if (probe_kernel_address(&owner->cpu, cpu)) + return 0; +#else + cpu = owner->cpu; +#endif - arch_mutex_cpu_relax(); - } + /* + * Even if the access succeeded (likely case), + * the cpu field may no longer be valid. + */ + if (cpu >= nr_cpumask_bits) + return 0; /* - * If the owner changed to another task there is likely - * heavy contention, stop spinning. + * We need to validate that we can do a + * get_cpu() and that we have the percpu area. */ - if (lock->owner) + if (!cpu_online(cpu)) return 0; + rq = cpu_rq(cpu); + + for (;;) { + /* + * Owner changed, break to re-assess state. + */ + if (lock->owner != owner) { + /* + * If the lock has switched to a different owner, + * we likely have heavy contention. Return 0 to quit + * optimistic spinning and not contend further: + */ + if (lock->owner) + return 0; + break; + } + + /* + * Is that owner really running on that cpu? + */ + if (task_thread_info(rq->curr) != owner || need_resched()) + return 0; + + arch_mutex_cpu_relax(); + } + return 1; } #endif @@ -4760,18 +4684,19 @@ EXPORT_SYMBOL(sleep_on_timeout); */ void rt_mutex_setprio(struct task_struct *p, int prio) { + unsigned long flags; int oldprio, on_rq, running; struct rq *rq; const struct sched_class *prev_class; BUG_ON(prio < 0 || prio > MAX_PRIO); - rq = __task_rq_lock(p); + rq = task_rq_lock(p, &flags); trace_sched_pi_setprio(p, prio); oldprio = p->prio; prev_class = p->sched_class; - on_rq = p->on_rq; + on_rq = p->se.on_rq; running = task_current(rq, p); if (on_rq) dequeue_task(rq, p, 0); @@ -4791,7 +4716,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); check_class_changed(rq, p, prev_class, oldprio); - __task_rq_unlock(rq); + task_rq_unlock(rq, &flags); } #endif @@ -4819,7 +4744,7 @@ void set_user_nice(struct task_struct *p, long nice) p->static_prio = NICE_TO_PRIO(nice); goto out_unlock; } - on_rq = p->on_rq; + on_rq = p->se.on_rq; if (on_rq) dequeue_task(rq, p, 0); @@ -4839,7 +4764,7 @@ void set_user_nice(struct task_struct *p, long nice) resched_task(rq->curr); } out_unlock: - task_rq_unlock(rq, p, &flags); + task_rq_unlock(rq, &flags); } EXPORT_SYMBOL(set_user_nice); @@ -4953,6 +4878,8 @@ static struct task_struct *find_process_by_pid(pid_t pid) static void __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) { + BUG_ON(p->se.on_rq); + p->policy = policy; p->rt_priority = prio; p->normal_prio = normal_prio(p); @@ -5067,17 +4994,20 @@ static int __sched_setscheduler(struct task_struct *p, int policy, /* * make sure no PI-waiters arrive (or leave) while we are * changing the priority of the task: - * + */ + raw_spin_lock_irqsave(&p->pi_lock, flags); + /* * To be able to change p->policy safely, the appropriate * runqueue lock must be held. */ - rq = task_rq_lock(p, &flags); + rq = __task_rq_lock(p); /* * Changing the policy of the stop threads its a very bad idea */ if (p == rq->stop) { - task_rq_unlock(rq, p, &flags); + __task_rq_unlock(rq); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); return -EINVAL; } @@ -5101,7 +5031,8 @@ static int __sched_setscheduler(struct task_struct *p, int policy, if (rt_bandwidth_enabled() && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0 && !task_group_is_autogroup(task_group(p))) { - task_rq_unlock(rq, p, &flags); + __task_rq_unlock(rq); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); return -EPERM; } } @@ -5110,10 +5041,11 @@ static int __sched_setscheduler(struct task_struct *p, int policy, /* recheck policy now with rq lock held */ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { policy = oldpolicy = -1; - task_rq_unlock(rq, p, &flags); + __task_rq_unlock(rq); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); goto recheck; } - on_rq = p->on_rq; + on_rq = p->se.on_rq; running = task_current(rq, p); if (on_rq) deactivate_task(rq, p, 0); @@ -5132,7 +5064,8 @@ static int __sched_setscheduler(struct task_struct *p, int policy, activate_task(rq, p, 0); check_class_changed(rq, p, prev_class, oldprio); - task_rq_unlock(rq, p, &flags); + __task_rq_unlock(rq); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); rt_mutex_adjust_pi(p); @@ -5383,6 +5316,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) { struct task_struct *p; unsigned long flags; + struct rq *rq; int retval; get_online_cpus(); @@ -5397,9 +5331,9 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) if (retval) goto out_unlock; - raw_spin_lock_irqsave(&p->pi_lock, flags); + rq = task_rq_lock(p, &flags); cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); - raw_spin_unlock_irqrestore(&p->pi_lock, flags); + task_rq_unlock(rq, &flags); out_unlock: rcu_read_unlock(); @@ -5724,7 +5658,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, rq = task_rq_lock(p, &flags); time_slice = p->sched_class->get_rr_interval(rq, p); - task_rq_unlock(rq, p, &flags); + task_rq_unlock(rq, &flags); rcu_read_unlock(); jiffies_to_timespec(time_slice, &t); @@ -5842,14 +5776,17 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) rcu_read_unlock(); rq->curr = rq->idle = idle; -#if defined(CONFIG_SMP) - idle->on_cpu = 1; +#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) + idle->oncpu = 1; #endif raw_spin_unlock_irqrestore(&rq->lock, flags); /* Set the preempt count _outside_ the spinlocks! */ +#if defined(CONFIG_PREEMPT) + task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); +#else task_thread_info(idle)->preempt_count = 0; - +#endif /* * The idle tasks have their own, simple scheduling class: */ @@ -5944,17 +5881,26 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) unsigned int dest_cpu; int ret = 0; + /* + * Serialize against TASK_WAKING so that ttwu() and wunt() can + * drop the rq->lock and still rely on ->cpus_allowed. + */ +again: + while (task_is_waking(p)) + cpu_relax(); rq = task_rq_lock(p, &flags); - - if (cpumask_equal(&p->cpus_allowed, new_mask)) - goto out; + if (task_is_waking(p)) { + task_rq_unlock(rq, &flags); + goto again; + } if (!cpumask_intersects(new_mask, cpu_active_mask)) { ret = -EINVAL; goto out; } - if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) { + if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && + !cpumask_equal(&p->cpus_allowed, new_mask))) { ret = -EINVAL; goto out; } @@ -5971,16 +5917,16 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) goto out; dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); - if (p->on_rq) { + if (migrate_task(p, rq)) { struct migration_arg arg = { p, dest_cpu }; /* Need help from migration thread: drop lock and wait. */ - task_rq_unlock(rq, p, &flags); + task_rq_unlock(rq, &flags); stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); tlb_migrate_finish(p->mm); return 0; } out: - task_rq_unlock(rq, p, &flags); + task_rq_unlock(rq, &flags); return ret; } @@ -6008,7 +5954,6 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) rq_src = cpu_rq(src_cpu); rq_dest = cpu_rq(dest_cpu); - raw_spin_lock(&p->pi_lock); double_rq_lock(rq_src, rq_dest); /* Already moved. */ if (task_cpu(p) != src_cpu) @@ -6021,7 +5966,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) * If we're not on a rq, the next wake-up will ensure we're * placed properly. */ - if (p->on_rq) { + if (p->se.on_rq) { deactivate_task(rq_src, p, 0); set_task_cpu(p, dest_cpu); activate_task(rq_dest, p, 0); @@ -6031,7 +5976,6 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) ret = 1; fail: double_rq_unlock(rq_src, rq_dest); - raw_spin_unlock(&p->pi_lock); return ret; } @@ -6372,7 +6316,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) #ifdef CONFIG_HOTPLUG_CPU case CPU_DYING: - sched_ttwu_pending(); /* Update our root-domain */ raw_spin_lock_irqsave(&rq->lock, flags); if (rq->rd) { @@ -6451,8 +6394,6 @@ early_initcall(migration_init); #ifdef CONFIG_SMP -static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ - #ifdef CONFIG_SCHED_DEBUG static __read_mostly int sched_domain_debug_enabled; @@ -6548,6 +6489,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, static void sched_domain_debug(struct sched_domain *sd, int cpu) { + cpumask_var_t groupmask; int level = 0; if (!sched_domain_debug_enabled) @@ -6560,14 +6502,20 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); + if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) { + printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); + return; + } + for (;;) { - if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) + if (sched_domain_debug_one(sd, cpu, level, groupmask)) break; level++; sd = sd->parent; if (!sd) break; } + free_cpumask_var(groupmask); } #else /* !CONFIG_SCHED_DEBUG */ # define sched_domain_debug(sd, cpu) do { } while (0) @@ -6624,11 +6572,12 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) return 1; } -static void free_rootdomain(struct rcu_head *rcu) +static void free_rootdomain(struct root_domain *rd) { - struct root_domain *rd = container_of(rcu, struct root_domain, rcu); + synchronize_sched(); cpupri_cleanup(&rd->cpupri); + free_cpumask_var(rd->rto_mask); free_cpumask_var(rd->online); free_cpumask_var(rd->span); @@ -6669,7 +6618,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) raw_spin_unlock_irqrestore(&rq->lock, flags); if (old_rd) - call_rcu_sched(&old_rd->rcu, free_rootdomain); + free_rootdomain(old_rd); } static int init_rootdomain(struct root_domain *rd) @@ -6720,25 +6669,6 @@ static struct root_domain *alloc_rootdomain(void) return rd; } -static void free_sched_domain(struct rcu_head *rcu) -{ - struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); - if (atomic_dec_and_test(&sd->groups->ref)) - kfree(sd->groups); - kfree(sd); -} - -static void destroy_sched_domain(struct sched_domain *sd, int cpu) -{ - call_rcu(&sd->rcu, free_sched_domain); -} - -static void destroy_sched_domains(struct sched_domain *sd, int cpu) -{ - for (; sd; sd = sd->parent) - destroy_sched_domain(sd, cpu); -} - /* * Attach the domain 'sd' to 'cpu' as its base domain. Callers must * hold the hotplug lock. @@ -6749,6 +6679,9 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) struct rq *rq = cpu_rq(cpu); struct sched_domain *tmp; + for (tmp = sd; tmp; tmp = tmp->parent) + tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); + /* Remove the sched domains which do not contribute to scheduling. */ for (tmp = sd; tmp; ) { struct sched_domain *parent = tmp->parent; @@ -6759,15 +6692,12 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) tmp->parent = parent->parent; if (parent->parent) parent->parent->child = tmp; - destroy_sched_domain(parent, cpu); } else tmp = tmp->parent; } if (sd && sd_degenerate(sd)) { - tmp = sd; sd = sd->parent; - destroy_sched_domain(tmp, cpu); if (sd) sd->child = NULL; } @@ -6775,9 +6705,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) sched_domain_debug(sd, cpu); rq_attach_root(rq, rd); - tmp = rq->sd; rcu_assign_pointer(rq->sd, sd); - destroy_sched_domains(tmp, cpu); } /* cpus with isolated domains */ @@ -6793,6 +6721,56 @@ static int __init isolated_cpu_setup(char *str) __setup("isolcpus=", isolated_cpu_setup); +/* + * init_sched_build_groups takes the cpumask we wish to span, and a pointer + * to a function which identifies what group(along with sched group) a CPU + * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids + * (due to the fact that we keep track of groups covered with a struct cpumask). + * + * init_sched_build_groups will build a circular linked list of the groups + * covered by the given span, and will set each group's ->cpumask correctly, + * and ->cpu_power to 0. + */ +static void +init_sched_build_groups(const struct cpumask *span, + const struct cpumask *cpu_map, + int (*group_fn)(int cpu, const struct cpumask *cpu_map, + struct sched_group **sg, + struct cpumask *tmpmask), + struct cpumask *covered, struct cpumask *tmpmask) +{ + struct sched_group *first = NULL, *last = NULL; + int i; + + cpumask_clear(covered); + + for_each_cpu(i, span) { + struct sched_group *sg; + int group = group_fn(i, cpu_map, &sg, tmpmask); + int j; + + if (cpumask_test_cpu(i, covered)) + continue; + + cpumask_clear(sched_group_cpus(sg)); + sg->cpu_power = 0; + + for_each_cpu(j, span) { + if (group_fn(j, cpu_map, NULL, tmpmask) != group) + continue; + + cpumask_set_cpu(j, covered); + cpumask_set_cpu(j, sched_group_cpus(sg)); + } + if (!first) + first = sg; + if (last) + last->next = sg; + last = sg; + } + last->next = first; +} + #define SD_NODES_PER_DOMAIN 16 #ifdef CONFIG_NUMA @@ -6809,7 +6787,7 @@ __setup("isolcpus=", isolated_cpu_setup); */ static int find_next_best_node(int node, nodemask_t *used_nodes) { - int i, n, val, min_val, best_node = -1; + int i, n, val, min_val, best_node = 0; min_val = INT_MAX; @@ -6833,8 +6811,7 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) } } - if (best_node != -1) - node_set(best_node, *used_nodes); + node_set(best_node, *used_nodes); return best_node; } @@ -6860,130 +6837,315 @@ static void sched_domain_node_span(int node, struct cpumask *span) for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { int next_node = find_next_best_node(node, &used_nodes); - if (next_node < 0) - break; + cpumask_or(span, span, cpumask_of_node(next_node)); } } - -static const struct cpumask *cpu_node_mask(int cpu) -{ - lockdep_assert_held(&sched_domains_mutex); - - sched_domain_node_span(cpu_to_node(cpu), sched_domains_tmpmask); - - return sched_domains_tmpmask; -} - -static const struct cpumask *cpu_allnodes_mask(int cpu) -{ - return cpu_possible_mask; -} #endif /* CONFIG_NUMA */ -static const struct cpumask *cpu_cpu_mask(int cpu) -{ - return cpumask_of_node(cpu_to_node(cpu)); -} - int sched_smt_power_savings = 0, sched_mc_power_savings = 0; -struct sd_data { - struct sched_domain **__percpu sd; - struct sched_group **__percpu sg; +/* + * The cpus mask in sched_group and sched_domain hangs off the end. + * + * ( See the the comments in include/linux/sched.h:struct sched_group + * and struct sched_domain. ) + */ +struct static_sched_group { + struct sched_group sg; + DECLARE_BITMAP(cpus, CONFIG_NR_CPUS); +}; + +struct static_sched_domain { + struct sched_domain sd; + DECLARE_BITMAP(span, CONFIG_NR_CPUS); }; struct s_data { - struct sched_domain ** __percpu sd; +#ifdef CONFIG_NUMA + int sd_allnodes; + cpumask_var_t domainspan; + cpumask_var_t covered; + cpumask_var_t notcovered; +#endif + cpumask_var_t nodemask; + cpumask_var_t this_sibling_map; + cpumask_var_t this_core_map; + cpumask_var_t this_book_map; + cpumask_var_t send_covered; + cpumask_var_t tmpmask; + struct sched_group **sched_group_nodes; struct root_domain *rd; }; enum s_alloc { + sa_sched_groups = 0, sa_rootdomain, - sa_sd, - sa_sd_storage, + sa_tmpmask, + sa_send_covered, + sa_this_book_map, + sa_this_core_map, + sa_this_sibling_map, + sa_nodemask, + sa_sched_group_nodes, +#ifdef CONFIG_NUMA + sa_notcovered, + sa_covered, + sa_domainspan, +#endif sa_none, }; -struct sched_domain_topology_level; - -typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu); -typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); +/* + * SMT sched-domains: + */ +#ifdef CONFIG_SCHED_SMT +static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); +static DEFINE_PER_CPU(struct static_sched_group, sched_groups); -struct sched_domain_topology_level { - sched_domain_init_f init; - sched_domain_mask_f mask; - struct sd_data data; -}; +static int +cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, + struct sched_group **sg, struct cpumask *unused) +{ + if (sg) + *sg = &per_cpu(sched_groups, cpu).sg; + return cpu; +} +#endif /* CONFIG_SCHED_SMT */ /* - * Assumes the sched_domain tree is fully constructed + * multi-core sched-domains: */ -static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) +#ifdef CONFIG_SCHED_MC +static DEFINE_PER_CPU(struct static_sched_domain, core_domains); +static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); + +static int +cpu_to_core_group(int cpu, const struct cpumask *cpu_map, + struct sched_group **sg, struct cpumask *mask) { - struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); - struct sched_domain *child = sd->child; + int group; +#ifdef CONFIG_SCHED_SMT + cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); + group = cpumask_first(mask); +#else + group = cpu; +#endif + if (sg) + *sg = &per_cpu(sched_group_core, group).sg; + return group; +} +#endif /* CONFIG_SCHED_MC */ - if (child) - cpu = cpumask_first(sched_domain_span(child)); +/* + * book sched-domains: + */ +#ifdef CONFIG_SCHED_BOOK +static DEFINE_PER_CPU(struct static_sched_domain, book_domains); +static DEFINE_PER_CPU(struct static_sched_group, sched_group_book); +static int +cpu_to_book_group(int cpu, const struct cpumask *cpu_map, + struct sched_group **sg, struct cpumask *mask) +{ + int group = cpu; +#ifdef CONFIG_SCHED_MC + cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); + group = cpumask_first(mask); +#elif defined(CONFIG_SCHED_SMT) + cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); + group = cpumask_first(mask); +#endif if (sg) - *sg = *per_cpu_ptr(sdd->sg, cpu); + *sg = &per_cpu(sched_group_book, group).sg; + return group; +} +#endif /* CONFIG_SCHED_BOOK */ - return cpu; +static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); +static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); + +static int +cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, + struct sched_group **sg, struct cpumask *mask) +{ + int group; +#ifdef CONFIG_SCHED_BOOK + cpumask_and(mask, cpu_book_mask(cpu), cpu_map); + group = cpumask_first(mask); +#elif defined(CONFIG_SCHED_MC) + cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); + group = cpumask_first(mask); +#elif defined(CONFIG_SCHED_SMT) + cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); + group = cpumask_first(mask); +#else + group = cpu; +#endif + if (sg) + *sg = &per_cpu(sched_group_phys, group).sg; + return group; } +#ifdef CONFIG_NUMA /* - * build_sched_groups takes the cpumask we wish to span, and a pointer - * to a function which identifies what group(along with sched group) a CPU - * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids - * (due to the fact that we keep track of groups covered with a struct cpumask). - * - * build_sched_groups will build a circular linked list of the groups - * covered by the given span, and will set each group's ->cpumask correctly, - * and ->cpu_power to 0. + * The init_sched_build_groups can't handle what we want to do with node + * groups, so roll our own. Now each node has its own list of groups which + * gets dynamically allocated. */ -static void -build_sched_groups(struct sched_domain *sd) +static DEFINE_PER_CPU(struct static_sched_domain, node_domains); +static struct sched_group ***sched_group_nodes_bycpu; + +static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains); +static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); + +static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, + struct sched_group **sg, + struct cpumask *nodemask) { - struct sched_group *first = NULL, *last = NULL; - struct sd_data *sdd = sd->private; - const struct cpumask *span = sched_domain_span(sd); - struct cpumask *covered; - int i; + int group; - lockdep_assert_held(&sched_domains_mutex); - covered = sched_domains_tmpmask; + cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); + group = cpumask_first(nodemask); - cpumask_clear(covered); + if (sg) + *sg = &per_cpu(sched_group_allnodes, group).sg; + return group; +} - for_each_cpu(i, span) { - struct sched_group *sg; - int group = get_group(i, sdd, &sg); - int j; +static void init_numa_sched_groups_power(struct sched_group *group_head) +{ + struct sched_group *sg = group_head; + int j; - if (cpumask_test_cpu(i, covered)) - continue; + if (!sg) + return; + do { + for_each_cpu(j, sched_group_cpus(sg)) { + struct sched_domain *sd; - cpumask_clear(sched_group_cpus(sg)); + sd = &per_cpu(phys_domains, j).sd; + if (j != group_first_cpu(sd->groups)) { + /* + * Only add "power" once for each + * physical package. + */ + continue; + } + + sg->cpu_power += sd->groups->cpu_power; + } + sg = sg->next; + } while (sg != group_head); +} + +static int build_numa_sched_groups(struct s_data *d, + const struct cpumask *cpu_map, int num) +{ + struct sched_domain *sd; + struct sched_group *sg, *prev; + int n, j; + + cpumask_clear(d->covered); + cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map); + if (cpumask_empty(d->nodemask)) { + d->sched_group_nodes[num] = NULL; + goto out; + } + + sched_domain_node_span(num, d->domainspan); + cpumask_and(d->domainspan, d->domainspan, cpu_map); + + sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), + GFP_KERNEL, num); + if (!sg) { + printk(KERN_WARNING "Can not alloc domain group for node %d\n", + num); + return -ENOMEM; + } + d->sched_group_nodes[num] = sg; + + for_each_cpu(j, d->nodemask) { + sd = &per_cpu(node_domains, j).sd; + sd->groups = sg; + } + + sg->cpu_power = 0; + cpumask_copy(sched_group_cpus(sg), d->nodemask); + sg->next = sg; + cpumask_or(d->covered, d->covered, d->nodemask); + + prev = sg; + for (j = 0; j < nr_node_ids; j++) { + n = (num + j) % nr_node_ids; + cpumask_complement(d->notcovered, d->covered); + cpumask_and(d->tmpmask, d->notcovered, cpu_map); + cpumask_and(d->tmpmask, d->tmpmask, d->domainspan); + if (cpumask_empty(d->tmpmask)) + break; + cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n)); + if (cpumask_empty(d->tmpmask)) + continue; + sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), + GFP_KERNEL, num); + if (!sg) { + printk(KERN_WARNING + "Can not alloc domain group for node %d\n", j); + return -ENOMEM; + } sg->cpu_power = 0; + cpumask_copy(sched_group_cpus(sg), d->tmpmask); + sg->next = prev->next; + cpumask_or(d->covered, d->covered, d->tmpmask); + prev->next = sg; + prev = sg; + } +out: + return 0; +} +#endif /* CONFIG_NUMA */ - for_each_cpu(j, span) { - if (get_group(j, sdd, NULL) != group) +#ifdef CONFIG_NUMA +/* Free memory allocated for various sched_group structures */ +static void free_sched_groups(const struct cpumask *cpu_map, + struct cpumask *nodemask) +{ + int cpu, i; + + for_each_cpu(cpu, cpu_map) { + struct sched_group **sched_group_nodes + = sched_group_nodes_bycpu[cpu]; + + if (!sched_group_nodes) + continue; + + for (i = 0; i < nr_node_ids; i++) { + struct sched_group *oldsg, *sg = sched_group_nodes[i]; + + cpumask_and(nodemask, cpumask_of_node(i), cpu_map); + if (cpumask_empty(nodemask)) continue; - cpumask_set_cpu(j, covered); - cpumask_set_cpu(j, sched_group_cpus(sg)); + if (sg == NULL) + continue; + sg = sg->next; +next_sg: + oldsg = sg; + sg = sg->next; + kfree(oldsg); + if (oldsg != sched_group_nodes[i]) + goto next_sg; } - - if (!first) - first = sg; - if (last) - last->next = sg; - last = sg; + kfree(sched_group_nodes); + sched_group_nodes_bycpu[cpu] = NULL; } - last->next = first; } +#else /* !CONFIG_NUMA */ +static void free_sched_groups(const struct cpumask *cpu_map, + struct cpumask *nodemask) +{ +} +#endif /* CONFIG_NUMA */ /* * Initialize sched groups cpu_power. @@ -6997,6 +7159,11 @@ build_sched_groups(struct sched_domain *sd) */ static void init_sched_groups_power(int cpu, struct sched_domain *sd) { + struct sched_domain *child; + struct sched_group *group; + long power; + int weight; + WARN_ON(!sd || !sd->groups); if (cpu != group_first_cpu(sd->groups)) @@ -7004,7 +7171,36 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); - update_group_power(sd, cpu); + child = sd->child; + + sd->groups->cpu_power = 0; + + if (!child) { + power = SCHED_LOAD_SCALE; + weight = cpumask_weight(sched_domain_span(sd)); + /* + * SMT siblings share the power of a single core. + * Usually multiple threads get a better yield out of + * that one core than a single thread would have, + * reflect that in sd->smt_gain. + */ + if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { + power *= sd->smt_gain; + power /= weight; + power >>= SCHED_LOAD_SHIFT; + } + sd->groups->cpu_power += power; + return; + } + + /* + * Add cpu_power of each child group to this groups cpu_power. + */ + group = child->groups; + do { + sd->groups->cpu_power += group->cpu_power; + group = group->next; + } while (group != child->groups); } /* @@ -7018,15 +7214,15 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) # define SD_INIT_NAME(sd, type) do { } while (0) #endif -#define SD_INIT_FUNC(type) \ -static noinline struct sched_domain * \ -sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \ -{ \ - struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \ - *sd = SD_##type##_INIT; \ - SD_INIT_NAME(sd, type); \ - sd->private = &tl->data; \ - return sd; \ +#define SD_INIT(sd, type) sd_init_##type(sd) + +#define SD_INIT_FUNC(type) \ +static noinline void sd_init_##type(struct sched_domain *sd) \ +{ \ + memset(sd, 0, sizeof(*sd)); \ + *sd = SD_##type##_INIT; \ + sd->level = SD_LV_##type; \ + SD_INIT_NAME(sd, type); \ } SD_INIT_FUNC(CPU) @@ -7045,14 +7241,13 @@ SD_INIT_FUNC(CPU) #endif static int default_relax_domain_level = -1; -int sched_domain_level_max; static int __init setup_relax_domain_level(char *str) { unsigned long val; val = simple_strtoul(str, NULL, 0); - if (val < sched_domain_level_max) + if (val < SD_LV_MAX) default_relax_domain_level = val; return 1; @@ -7080,20 +7275,37 @@ static void set_domain_attribute(struct sched_domain *sd, } } -static void __sdt_free(const struct cpumask *cpu_map); -static int __sdt_alloc(const struct cpumask *cpu_map); - static void __free_domain_allocs(struct s_data *d, enum s_alloc what, const struct cpumask *cpu_map) { switch (what) { + case sa_sched_groups: + free_sched_groups(cpu_map, d->tmpmask); /* fall through */ + d->sched_group_nodes = NULL; case sa_rootdomain: - if (!atomic_read(&d->rd->refcount)) - free_rootdomain(&d->rd->rcu); /* fall through */ - case sa_sd: - free_percpu(d->sd); /* fall through */ - case sa_sd_storage: - __sdt_free(cpu_map); /* fall through */ + free_rootdomain(d->rd); /* fall through */ + case sa_tmpmask: + free_cpumask_var(d->tmpmask); /* fall through */ + case sa_send_covered: + free_cpumask_var(d->send_covered); /* fall through */ + case sa_this_book_map: + free_cpumask_var(d->this_book_map); /* fall through */ + case sa_this_core_map: + free_cpumask_var(d->this_core_map); /* fall through */ + case sa_this_sibling_map: + free_cpumask_var(d->this_sibling_map); /* fall through */ + case sa_nodemask: + free_cpumask_var(d->nodemask); /* fall through */ + case sa_sched_group_nodes: +#ifdef CONFIG_NUMA + kfree(d->sched_group_nodes); /* fall through */ + case sa_notcovered: + free_cpumask_var(d->notcovered); /* fall through */ + case sa_covered: + free_cpumask_var(d->covered); /* fall through */ + case sa_domainspan: + free_cpumask_var(d->domainspan); /* fall through */ +#endif case sa_none: break; } @@ -7102,212 +7314,308 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) { - memset(d, 0, sizeof(*d)); - - if (__sdt_alloc(cpu_map)) - return sa_sd_storage; - d->sd = alloc_percpu(struct sched_domain *); - if (!d->sd) - return sa_sd_storage; +#ifdef CONFIG_NUMA + if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL)) + return sa_none; + if (!alloc_cpumask_var(&d->covered, GFP_KERNEL)) + return sa_domainspan; + if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL)) + return sa_covered; + /* Allocate the per-node list of sched groups */ + d->sched_group_nodes = kcalloc(nr_node_ids, + sizeof(struct sched_group *), GFP_KERNEL); + if (!d->sched_group_nodes) { + printk(KERN_WARNING "Can not alloc sched group node list\n"); + return sa_notcovered; + } + sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; +#endif + if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL)) + return sa_sched_group_nodes; + if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL)) + return sa_nodemask; + if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL)) + return sa_this_sibling_map; + if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL)) + return sa_this_core_map; + if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) + return sa_this_book_map; + if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) + return sa_send_covered; d->rd = alloc_rootdomain(); - if (!d->rd) - return sa_sd; + if (!d->rd) { + printk(KERN_WARNING "Cannot alloc root domain\n"); + return sa_tmpmask; + } return sa_rootdomain; } -/* - * NULL the sd_data elements we've used to build the sched_domain and - * sched_group structure so that the subsequent __free_domain_allocs() - * will not free the data we're using. - */ -static void claim_allocations(int cpu, struct sched_domain *sd) +static struct sched_domain *__build_numa_sched_domains(struct s_data *d, + const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i) { - struct sd_data *sdd = sd->private; - struct sched_group *sg = sd->groups; + struct sched_domain *sd = NULL; +#ifdef CONFIG_NUMA + struct sched_domain *parent; + + d->sd_allnodes = 0; + if (cpumask_weight(cpu_map) > + SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) { + sd = &per_cpu(allnodes_domains, i).sd; + SD_INIT(sd, ALLNODES); + set_domain_attribute(sd, attr); + cpumask_copy(sched_domain_span(sd), cpu_map); + cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask); + d->sd_allnodes = 1; + } + parent = sd; + + sd = &per_cpu(node_domains, i).sd; + SD_INIT(sd, NODE); + set_domain_attribute(sd, attr); + sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); + sd->parent = parent; + if (parent) + parent->child = sd; + cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map); +#endif + return sd; +} - WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); - *per_cpu_ptr(sdd->sd, cpu) = NULL; +static struct sched_domain *__build_cpu_sched_domain(struct s_data *d, + const struct cpumask *cpu_map, struct sched_domain_attr *attr, + struct sched_domain *parent, int i) +{ + struct sched_domain *sd; + sd = &per_cpu(phys_domains, i).sd; + SD_INIT(sd, CPU); + set_domain_attribute(sd, attr); + cpumask_copy(sched_domain_span(sd), d->nodemask); + sd->parent = parent; + if (parent) + parent->child = sd; + cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask); + return sd; +} - if (cpu == cpumask_first(sched_group_cpus(sg))) { - WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg); - *per_cpu_ptr(sdd->sg, cpu) = NULL; - } +static struct sched_domain *__build_book_sched_domain(struct s_data *d, + const struct cpumask *cpu_map, struct sched_domain_attr *attr, + struct sched_domain *parent, int i) +{ + struct sched_domain *sd = parent; +#ifdef CONFIG_SCHED_BOOK + sd = &per_cpu(book_domains, i).sd; + SD_INIT(sd, BOOK); + set_domain_attribute(sd, attr); + cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i)); + sd->parent = parent; + parent->child = sd; + cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask); +#endif + return sd; } -#ifdef CONFIG_SCHED_SMT -static const struct cpumask *cpu_smt_mask(int cpu) +static struct sched_domain *__build_mc_sched_domain(struct s_data *d, + const struct cpumask *cpu_map, struct sched_domain_attr *attr, + struct sched_domain *parent, int i) { - return topology_thread_cpumask(cpu); + struct sched_domain *sd = parent; +#ifdef CONFIG_SCHED_MC + sd = &per_cpu(core_domains, i).sd; + SD_INIT(sd, MC); + set_domain_attribute(sd, attr); + cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i)); + sd->parent = parent; + parent->child = sd; + cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask); +#endif + return sd; } + +static struct sched_domain *__build_smt_sched_domain(struct s_data *d, + const struct cpumask *cpu_map, struct sched_domain_attr *attr, + struct sched_domain *parent, int i) +{ + struct sched_domain *sd = parent; +#ifdef CONFIG_SCHED_SMT + sd = &per_cpu(cpu_domains, i).sd; + SD_INIT(sd, SIBLING); + set_domain_attribute(sd, attr); + cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i)); + sd->parent = parent; + parent->child = sd; + cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask); #endif + return sd; +} -/* - * Topology list, bottom-up. - */ -static struct sched_domain_topology_level default_topology[] = { +static void build_sched_groups(struct s_data *d, enum sched_domain_level l, + const struct cpumask *cpu_map, int cpu) +{ + switch (l) { #ifdef CONFIG_SCHED_SMT - { sd_init_SIBLING, cpu_smt_mask, }, + case SD_LV_SIBLING: /* set up CPU (sibling) groups */ + cpumask_and(d->this_sibling_map, cpu_map, + topology_thread_cpumask(cpu)); + if (cpu == cpumask_first(d->this_sibling_map)) + init_sched_build_groups(d->this_sibling_map, cpu_map, + &cpu_to_cpu_group, + d->send_covered, d->tmpmask); + break; #endif #ifdef CONFIG_SCHED_MC - { sd_init_MC, cpu_coregroup_mask, }, + case SD_LV_MC: /* set up multi-core groups */ + cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu)); + if (cpu == cpumask_first(d->this_core_map)) + init_sched_build_groups(d->this_core_map, cpu_map, + &cpu_to_core_group, + d->send_covered, d->tmpmask); + break; #endif #ifdef CONFIG_SCHED_BOOK - { sd_init_BOOK, cpu_book_mask, }, + case SD_LV_BOOK: /* set up book groups */ + cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu)); + if (cpu == cpumask_first(d->this_book_map)) + init_sched_build_groups(d->this_book_map, cpu_map, + &cpu_to_book_group, + d->send_covered, d->tmpmask); + break; #endif - { sd_init_CPU, cpu_cpu_mask, }, + case SD_LV_CPU: /* set up physical groups */ + cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map); + if (!cpumask_empty(d->nodemask)) + init_sched_build_groups(d->nodemask, cpu_map, + &cpu_to_phys_group, + d->send_covered, d->tmpmask); + break; #ifdef CONFIG_NUMA - { sd_init_NODE, cpu_node_mask, }, - { sd_init_ALLNODES, cpu_allnodes_mask, }, + case SD_LV_ALLNODES: + init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group, + d->send_covered, d->tmpmask); + break; #endif - { NULL, }, -}; - -static struct sched_domain_topology_level *sched_domain_topology = default_topology; - -static int __sdt_alloc(const struct cpumask *cpu_map) -{ - struct sched_domain_topology_level *tl; - int j; - - for (tl = sched_domain_topology; tl->init; tl++) { - struct sd_data *sdd = &tl->data; - - sdd->sd = alloc_percpu(struct sched_domain *); - if (!sdd->sd) - return -ENOMEM; - - sdd->sg = alloc_percpu(struct sched_group *); - if (!sdd->sg) - return -ENOMEM; - - for_each_cpu(j, cpu_map) { - struct sched_domain *sd; - struct sched_group *sg; - - sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), - GFP_KERNEL, cpu_to_node(j)); - if (!sd) - return -ENOMEM; - - *per_cpu_ptr(sdd->sd, j) = sd; - - sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), - GFP_KERNEL, cpu_to_node(j)); - if (!sg) - return -ENOMEM; - - *per_cpu_ptr(sdd->sg, j) = sg; - } - } - - return 0; -} - -static void __sdt_free(const struct cpumask *cpu_map) -{ - struct sched_domain_topology_level *tl; - int j; - - for (tl = sched_domain_topology; tl->init; tl++) { - struct sd_data *sdd = &tl->data; - - for_each_cpu(j, cpu_map) { - kfree(*per_cpu_ptr(sdd->sd, j)); - kfree(*per_cpu_ptr(sdd->sg, j)); - } - free_percpu(sdd->sd); - free_percpu(sdd->sg); - } -} - -struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, - struct s_data *d, const struct cpumask *cpu_map, - struct sched_domain_attr *attr, struct sched_domain *child, - int cpu) -{ - struct sched_domain *sd = tl->init(tl, cpu); - if (!sd) - return child; - - set_domain_attribute(sd, attr); - cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); - if (child) { - sd->level = child->level + 1; - sched_domain_level_max = max(sched_domain_level_max, sd->level); - child->parent = sd; + default: + break; } - sd->child = child; - - return sd; } /* * Build sched domains for a given set of cpus and attach the sched domains * to the individual cpus */ -static int build_sched_domains(const struct cpumask *cpu_map, - struct sched_domain_attr *attr) +static int __build_sched_domains(const struct cpumask *cpu_map, + struct sched_domain_attr *attr) { enum s_alloc alloc_state = sa_none; - struct sched_domain *sd; struct s_data d; - int i, ret = -ENOMEM; + struct sched_domain *sd; + int i; +#ifdef CONFIG_NUMA + d.sd_allnodes = 0; +#endif alloc_state = __visit_domain_allocation_hell(&d, cpu_map); if (alloc_state != sa_rootdomain) goto error; + alloc_state = sa_sched_groups; + + /* + * Set up domains for cpus specified by the cpu_map. + */ + for_each_cpu(i, cpu_map) { + cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), + cpu_map); + + sd = __build_numa_sched_domains(&d, cpu_map, attr, i); + sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); + sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i); + sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); + sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); + } - /* Set up domains for cpus specified by the cpu_map. */ for_each_cpu(i, cpu_map) { - struct sched_domain_topology_level *tl; + build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); + build_sched_groups(&d, SD_LV_BOOK, cpu_map, i); + build_sched_groups(&d, SD_LV_MC, cpu_map, i); + } - sd = NULL; - for (tl = sched_domain_topology; tl->init; tl++) - sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i); + /* Set up physical groups */ + for (i = 0; i < nr_node_ids; i++) + build_sched_groups(&d, SD_LV_CPU, cpu_map, i); - while (sd->child) - sd = sd->child; +#ifdef CONFIG_NUMA + /* Set up node groups */ + if (d.sd_allnodes) + build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0); - *per_cpu_ptr(d.sd, i) = sd; - } + for (i = 0; i < nr_node_ids; i++) + if (build_numa_sched_groups(&d, cpu_map, i)) + goto error; +#endif - /* Build the groups for the domains */ + /* Calculate CPU power for physical packages and nodes */ +#ifdef CONFIG_SCHED_SMT for_each_cpu(i, cpu_map) { - for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { - sd->span_weight = cpumask_weight(sched_domain_span(sd)); - get_group(i, sd->private, &sd->groups); - atomic_inc(&sd->groups->ref); - - if (i != cpumask_first(sched_domain_span(sd))) - continue; + sd = &per_cpu(cpu_domains, i).sd; + init_sched_groups_power(i, sd); + } +#endif +#ifdef CONFIG_SCHED_MC + for_each_cpu(i, cpu_map) { + sd = &per_cpu(core_domains, i).sd; + init_sched_groups_power(i, sd); + } +#endif +#ifdef CONFIG_SCHED_BOOK + for_each_cpu(i, cpu_map) { + sd = &per_cpu(book_domains, i).sd; + init_sched_groups_power(i, sd); + } +#endif - build_sched_groups(sd); - } + for_each_cpu(i, cpu_map) { + sd = &per_cpu(phys_domains, i).sd; + init_sched_groups_power(i, sd); } - /* Calculate CPU power for physical packages and nodes */ - for (i = nr_cpumask_bits-1; i >= 0; i--) { - if (!cpumask_test_cpu(i, cpu_map)) - continue; +#ifdef CONFIG_NUMA + for (i = 0; i < nr_node_ids; i++) + init_numa_sched_groups_power(d.sched_group_nodes[i]); - for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { - claim_allocations(i, sd); - init_sched_groups_power(i, sd); - } + if (d.sd_allnodes) { + struct sched_group *sg; + + cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, + d.tmpmask); + init_numa_sched_groups_power(sg); } +#endif /* Attach the domains */ - rcu_read_lock(); for_each_cpu(i, cpu_map) { - sd = *per_cpu_ptr(d.sd, i); +#ifdef CONFIG_SCHED_SMT + sd = &per_cpu(cpu_domains, i).sd; +#elif defined(CONFIG_SCHED_MC) + sd = &per_cpu(core_domains, i).sd; +#elif defined(CONFIG_SCHED_BOOK) + sd = &per_cpu(book_domains, i).sd; +#else + sd = &per_cpu(phys_domains, i).sd; +#endif cpu_attach_domain(sd, d.rd, i); } - rcu_read_unlock(); - ret = 0; + d.sched_group_nodes = NULL; /* don't free this we still need it */ + __free_domain_allocs(&d, sa_tmpmask, cpu_map); + return 0; + error: __free_domain_allocs(&d, alloc_state, cpu_map); - return ret; + return -ENOMEM; +} + +static int build_sched_domains(const struct cpumask *cpu_map) +{ + return __build_sched_domains(cpu_map, NULL); } static cpumask_var_t *doms_cur; /* current sched domains */ @@ -7362,7 +7670,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) * For now this just excludes isolated cpus, but could be used to * exclude other special cases in the future. */ -static int init_sched_domains(const struct cpumask *cpu_map) +static int arch_init_sched_domains(const struct cpumask *cpu_map) { int err; @@ -7373,24 +7681,32 @@ static int init_sched_domains(const struct cpumask *cpu_map) doms_cur = &fallback_doms; cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); dattr_cur = NULL; - err = build_sched_domains(doms_cur[0], NULL); + err = build_sched_domains(doms_cur[0]); register_sched_domain_sysctl(); return err; } +static void arch_destroy_sched_domains(const struct cpumask *cpu_map, + struct cpumask *tmpmask) +{ + free_sched_groups(cpu_map, tmpmask); +} + /* * Detach sched domains from a group of cpus specified in cpu_map * These cpus will now be attached to the NULL domain */ static void detach_destroy_domains(const struct cpumask *cpu_map) { + /* Save because hotplug lock held. */ + static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS); int i; - rcu_read_lock(); for_each_cpu(i, cpu_map) cpu_attach_domain(NULL, &def_root_domain, i); - rcu_read_unlock(); + synchronize_sched(); + arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); } /* handle null as "default" */ @@ -7479,7 +7795,8 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], goto match2; } /* no match - add a new doms_new */ - build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); + __build_sched_domains(doms_new[i], + dattr_new ? dattr_new + i : NULL); match2: ; } @@ -7498,7 +7815,7 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) -static void reinit_sched_domains(void) +static void arch_reinit_sched_domains(void) { get_online_cpus(); @@ -7531,7 +7848,7 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) else sched_mc_power_savings = level; - reinit_sched_domains(); + arch_reinit_sched_domains(); return count; } @@ -7650,9 +7967,14 @@ void __init sched_init_smp(void) alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); alloc_cpumask_var(&fallback_doms, GFP_KERNEL); +#if defined(CONFIG_NUMA) + sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), + GFP_KERNEL); + BUG_ON(sched_group_nodes_bycpu == NULL); +#endif get_online_cpus(); mutex_lock(&sched_domains_mutex); - init_sched_domains(cpu_active_mask); + arch_init_sched_domains(cpu_active_mask); cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); if (cpumask_empty(non_isolated_cpus)) cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); @@ -7959,7 +8281,6 @@ void __init sched_init(void) /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); #ifdef CONFIG_SMP - zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); #ifdef CONFIG_NO_HZ zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT); @@ -8019,7 +8340,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p) int old_prio = p->prio; int on_rq; - on_rq = p->on_rq; + on_rq = p->se.on_rq; if (on_rq) deactivate_task(rq, p, 0); __setscheduler(rq, p, SCHED_NORMAL, 0); @@ -8232,6 +8553,7 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) { struct rt_rq *rt_rq; struct sched_rt_entity *rt_se; + struct rq *rq; int i; tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); @@ -8245,6 +8567,8 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) ktime_to_ns(def_rt_bandwidth.rt_period), 0); for_each_possible_cpu(i) { + rq = cpu_rq(i); + rt_rq = kzalloc_node(sizeof(struct rt_rq), GFP_KERNEL, cpu_to_node(i)); if (!rt_rq) @@ -8359,7 +8683,7 @@ void sched_move_task(struct task_struct *tsk) rq = task_rq_lock(tsk, &flags); running = task_current(rq, tsk); - on_rq = tsk->on_rq; + on_rq = tsk->se.on_rq; if (on_rq) dequeue_task(rq, tsk, 0); @@ -8378,7 +8702,7 @@ void sched_move_task(struct task_struct *tsk) if (on_rq) enqueue_task(rq, tsk, 0); - task_rq_unlock(rq, tsk, &flags); + task_rq_unlock(rq, &flags); } #endif /* CONFIG_CGROUP_SCHED */ diff --git a/trunk/kernel/sched_debug.c b/trunk/kernel/sched_debug.c index a6710a112b4f..7bacd83a4158 100644 --- a/trunk/kernel/sched_debug.c +++ b/trunk/kernel/sched_debug.c @@ -152,7 +152,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) read_lock_irqsave(&tasklist_lock, flags); do_each_thread(g, p) { - if (!p->on_rq || task_cpu(p) != rq_cpu) + if (!p->se.on_rq || task_cpu(p) != rq_cpu) continue; print_task(m, rq, p); @@ -296,6 +296,9 @@ static void print_cpu(struct seq_file *m, int cpu) P(ttwu_count); P(ttwu_local); + SEQ_printf(m, " .%-30s: %d\n", "bkl_count", + rq->rq_sched_info.bkl_count); + #undef P #undef P64 #endif @@ -438,6 +441,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) P(se.statistics.wait_count); PN(se.statistics.iowait_sum); P(se.statistics.iowait_count); + P(sched_info.bkl_count); P(se.nr_migrations); P(se.statistics.nr_migrations_cold); P(se.statistics.nr_failed_migrations_affine); diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index 37f22626225e..6fa833ab2cb8 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -358,10 +358,6 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq) } cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); -#ifndef CONFIG_64BIT - smp_wmb(); - cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; -#endif } /* @@ -1344,8 +1340,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) hrtick_update(rq); } -static void set_next_buddy(struct sched_entity *se); - /* * The dequeue_task method is called before nr_running is * decreased. We remove the task from the rbtree and @@ -1355,22 +1349,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) { struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se; - int task_sleep = flags & DEQUEUE_SLEEP; for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, flags); /* Don't dequeue parent if it has other entities besides us */ - if (cfs_rq->load.weight) { - /* - * Bias pick_next to pick a task from this cfs_rq, as - * p is sleeping when it is within its sched_slice. - */ - if (task_sleep && parent_entity(se)) - set_next_buddy(parent_entity(se)); + if (cfs_rq->load.weight) break; - } flags |= DEQUEUE_SLEEP; } @@ -1386,25 +1372,12 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) #ifdef CONFIG_SMP -static void task_waking_fair(struct task_struct *p) +static void task_waking_fair(struct rq *rq, struct task_struct *p) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); - u64 min_vruntime; - -#ifndef CONFIG_64BIT - u64 min_vruntime_copy; - do { - min_vruntime_copy = cfs_rq->min_vruntime_copy; - smp_rmb(); - min_vruntime = cfs_rq->min_vruntime; - } while (min_vruntime != min_vruntime_copy); -#else - min_vruntime = cfs_rq->min_vruntime; -#endif - - se->vruntime -= min_vruntime; + se->vruntime -= cfs_rq->min_vruntime; } #ifdef CONFIG_FAIR_GROUP_SCHED @@ -1649,7 +1622,6 @@ static int select_idle_sibling(struct task_struct *p, int target) /* * Otherwise, iterate the domains and find an elegible idle cpu. */ - rcu_read_lock(); for_each_domain(target, sd) { if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) break; @@ -1669,7 +1641,6 @@ static int select_idle_sibling(struct task_struct *p, int target) cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) break; } - rcu_read_unlock(); return target; } @@ -1686,7 +1657,7 @@ static int select_idle_sibling(struct task_struct *p, int target) * preempt must be disabled. */ static int -select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) +select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags) { struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; int cpu = smp_processor_id(); @@ -1702,7 +1673,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) new_cpu = prev_cpu; } - rcu_read_lock(); for_each_domain(cpu, tmp) { if (!(tmp->flags & SD_LOAD_BALANCE)) continue; @@ -1753,10 +1723,9 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) if (affine_sd) { if (cpu == prev_cpu || wake_affine(affine_sd, p, sync)) - prev_cpu = cpu; - - new_cpu = select_idle_sibling(p, prev_cpu); - goto unlock; + return select_idle_sibling(p, cpu); + else + return select_idle_sibling(p, prev_cpu); } while (sd) { @@ -1797,8 +1766,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) } /* while loop will break here if sd == NULL */ } -unlock: - rcu_read_unlock(); return new_cpu; } @@ -1822,7 +1789,10 @@ wakeup_gran(struct sched_entity *curr, struct sched_entity *se) * This is especially important for buddies when the leftmost * task is higher priority than the buddy. */ - return calc_delta_fair(gran, se); + if (unlikely(se->load.weight != NICE_0_LOAD)) + gran = calc_delta_fair(gran, se); + + return gran; } /* @@ -1856,26 +1826,26 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) static void set_last_buddy(struct sched_entity *se) { - if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) - return; - - for_each_sched_entity(se) - cfs_rq_of(se)->last = se; + if (likely(task_of(se)->policy != SCHED_IDLE)) { + for_each_sched_entity(se) + cfs_rq_of(se)->last = se; + } } static void set_next_buddy(struct sched_entity *se) { - if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) - return; - - for_each_sched_entity(se) - cfs_rq_of(se)->next = se; + if (likely(task_of(se)->policy != SCHED_IDLE)) { + for_each_sched_entity(se) + cfs_rq_of(se)->next = se; + } } static void set_skip_buddy(struct sched_entity *se) { - for_each_sched_entity(se) - cfs_rq_of(se)->skip = se; + if (likely(task_of(se)->policy != SCHED_IDLE)) { + for_each_sched_entity(se) + cfs_rq_of(se)->skip = se; + } } /* @@ -1887,15 +1857,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ struct sched_entity *se = &curr->se, *pse = &p->se; struct cfs_rq *cfs_rq = task_cfs_rq(curr); int scale = cfs_rq->nr_running >= sched_nr_latency; - int next_buddy_marked = 0; if (unlikely(se == pse)) return; - if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { + if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) set_next_buddy(pse); - next_buddy_marked = 1; - } /* * We can come here with TIF_NEED_RESCHED already set from new task @@ -1923,15 +1890,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ update_curr(cfs_rq); find_matching_se(&se, &pse); BUG_ON(!pse); - if (wakeup_preempt_entity(se, pse) == 1) { - /* - * Bias pick_next to pick the sched entity that is - * triggering this preemption. - */ - if (!next_buddy_marked) - set_next_buddy(pse); + if (wakeup_preempt_entity(se, pse) == 1) goto preempt; - } return; @@ -2142,7 +2102,7 @@ static unsigned long balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, int *all_pinned, - struct cfs_rq *busiest_cfs_rq) + int *this_best_prio, struct cfs_rq *busiest_cfs_rq) { int loops = 0, pulled = 0; long rem_load_move = max_load_move; @@ -2180,6 +2140,9 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, */ if (rem_load_move <= 0) break; + + if (p->prio < *this_best_prio) + *this_best_prio = p->prio; } out: /* @@ -2239,7 +2202,7 @@ static unsigned long load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, - int *all_pinned) + int *all_pinned, int *this_best_prio) { long rem_load_move = max_load_move; int busiest_cpu = cpu_of(busiest); @@ -2264,7 +2227,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, rem_load = div_u64(rem_load, busiest_h_load + 1); moved_load = balance_tasks(this_rq, this_cpu, busiest, - rem_load, sd, idle, all_pinned, + rem_load, sd, idle, all_pinned, this_best_prio, busiest_cfs_rq); if (!moved_load) @@ -2290,11 +2253,11 @@ static unsigned long load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, - int *all_pinned) + int *all_pinned, int *this_best_prio) { return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd, idle, all_pinned, - &busiest->cfs); + this_best_prio, &busiest->cfs); } #endif @@ -2311,11 +2274,12 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, int *all_pinned) { unsigned long total_load_moved = 0, load_moved; + int this_best_prio = this_rq->curr->prio; do { load_moved = load_balance_fair(this_rq, this_cpu, busiest, max_load_move - total_load_moved, - sd, idle, all_pinned); + sd, idle, all_pinned, &this_best_prio); total_load_moved += load_moved; @@ -2684,7 +2648,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) /* * Only siblings can have significantly less than SCHED_LOAD_SCALE */ - if (!(sd->flags & SD_SHARE_CPUPOWER)) + if (sd->level != SD_LV_SIBLING) return 0; /* @@ -3501,7 +3465,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq) raw_spin_unlock(&this_rq->lock); update_shares(this_cpu); - rcu_read_lock(); for_each_domain(this_cpu, sd) { unsigned long interval; int balance = 1; @@ -3523,7 +3486,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq) break; } } - rcu_read_unlock(); raw_spin_lock(&this_rq->lock); @@ -3572,7 +3534,6 @@ static int active_load_balance_cpu_stop(void *data) double_lock_balance(busiest_rq, target_rq); /* Search for an sd spanning us and the target CPU. */ - rcu_read_lock(); for_each_domain(target_cpu, sd) { if ((sd->flags & SD_LOAD_BALANCE) && cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) @@ -3588,7 +3549,6 @@ static int active_load_balance_cpu_stop(void *data) else schedstat_inc(sd, alb_failed); } - rcu_read_unlock(); double_unlock_balance(busiest_rq, target_rq); out_unlock: busiest_rq->active_balance = 0; @@ -3715,7 +3675,6 @@ static int find_new_ilb(int cpu) { struct sched_domain *sd; struct sched_group *ilb_group; - int ilb = nr_cpu_ids; /* * Have idle load balancer selection from semi-idle packages only @@ -3731,25 +3690,20 @@ static int find_new_ilb(int cpu) if (cpumask_weight(nohz.idle_cpus_mask) < 2) goto out_done; - rcu_read_lock(); for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) { ilb_group = sd->groups; do { - if (is_semi_idle_group(ilb_group)) { - ilb = cpumask_first(nohz.grp_idle_mask); - goto unlock; - } + if (is_semi_idle_group(ilb_group)) + return cpumask_first(nohz.grp_idle_mask); ilb_group = ilb_group->next; } while (ilb_group != sd->groups); } -unlock: - rcu_read_unlock(); out_done: - return ilb; + return nr_cpu_ids; } #else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ static inline int find_new_ilb(int call_cpu) @@ -3894,7 +3848,6 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) update_shares(cpu); - rcu_read_lock(); for_each_domain(cpu, sd) { if (!(sd->flags & SD_LOAD_BALANCE)) continue; @@ -3940,7 +3893,6 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) if (!balance) break; } - rcu_read_unlock(); /* * next_balance will be updated only when there is a need. diff --git a/trunk/kernel/sched_features.h b/trunk/kernel/sched_features.h index be40f7371ee1..68e69acc29b9 100644 --- a/trunk/kernel/sched_features.h +++ b/trunk/kernel/sched_features.h @@ -64,9 +64,3 @@ SCHED_FEAT(OWNER_SPIN, 1) * Decrement CPU power based on irq activity */ SCHED_FEAT(NONIRQ_POWER, 1) - -/* - * Queue remote wakeups on the target CPU and process them - * using the scheduler IPI. Reduces rq->lock contention/bounces. - */ -SCHED_FEAT(TTWU_QUEUE, 1) diff --git a/trunk/kernel/sched_idletask.c b/trunk/kernel/sched_idletask.c index 0a51882534ea..a776a6396427 100644 --- a/trunk/kernel/sched_idletask.c +++ b/trunk/kernel/sched_idletask.c @@ -7,7 +7,7 @@ #ifdef CONFIG_SMP static int -select_task_rq_idle(struct task_struct *p, int sd_flag, int flags) +select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags) { return task_cpu(p); /* IDLE tasks as never migrated */ } diff --git a/trunk/kernel/sched_rt.c b/trunk/kernel/sched_rt.c index 64b2a37c07d0..e7cebdc65f82 100644 --- a/trunk/kernel/sched_rt.c +++ b/trunk/kernel/sched_rt.c @@ -183,14 +183,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq) return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); } -typedef struct task_group *rt_rq_iter_t; - -#define for_each_rt_rq(rt_rq, iter, rq) \ - for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \ - (&iter->list != &task_groups) && \ - (rt_rq = iter->rt_rq[cpu_of(rq)]); \ - iter = list_entry_rcu(iter->list.next, typeof(*iter), list)) - static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) { list_add_rcu(&rt_rq->leaf_rt_rq_list, @@ -296,11 +288,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq) return ktime_to_ns(def_rt_bandwidth.rt_period); } -typedef struct rt_rq *rt_rq_iter_t; - -#define for_each_rt_rq(rt_rq, iter, rq) \ - for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) - static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) { } @@ -415,13 +402,12 @@ static int do_balance_runtime(struct rt_rq *rt_rq) static void __disable_runtime(struct rq *rq) { struct root_domain *rd = rq->rd; - rt_rq_iter_t iter; struct rt_rq *rt_rq; if (unlikely(!scheduler_running)) return; - for_each_rt_rq(rt_rq, iter, rq) { + for_each_leaf_rt_rq(rt_rq, rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); s64 want; int i; @@ -501,7 +487,6 @@ static void disable_runtime(struct rq *rq) static void __enable_runtime(struct rq *rq) { - rt_rq_iter_t iter; struct rt_rq *rt_rq; if (unlikely(!scheduler_running)) @@ -510,7 +495,7 @@ static void __enable_runtime(struct rq *rq) /* * Reset each runqueue's bandwidth settings */ - for_each_rt_rq(rt_rq, iter, rq) { + for_each_leaf_rt_rq(rt_rq, rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); raw_spin_lock(&rt_b->rt_runtime_lock); @@ -577,13 +562,6 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { rt_rq->rt_throttled = 0; enqueue = 1; - - /* - * Force a clock update if the CPU was idle, - * lest wakeup -> unthrottle time accumulate. - */ - if (rt_rq->rt_nr_running && rq->curr == rq->idle) - rq->skip_clock_update = -1; } if (rt_rq->rt_time || rt_rq->rt_nr_running) idle = 0; @@ -999,23 +977,13 @@ static void yield_task_rt(struct rq *rq) static int find_lowest_rq(struct task_struct *task); static int -select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) +select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) { - struct task_struct *curr; - struct rq *rq; - int cpu; - if (sd_flag != SD_BALANCE_WAKE) return smp_processor_id(); - cpu = task_cpu(p); - rq = cpu_rq(cpu); - - rcu_read_lock(); - curr = ACCESS_ONCE(rq->curr); /* unlocked access */ - /* - * If the current task on @p's runqueue is an RT task, then + * If the current task is an RT task, then * try to see if we can wake this RT task up on another * runqueue. Otherwise simply start this RT task * on its current runqueue. @@ -1029,25 +997,21 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) * lock? * * For equal prio tasks, we just let the scheduler sort it out. - * - * Otherwise, just let it ride on the affined RQ and the - * post-schedule router will push the preempted task away - * - * This test is optimistic, if we get it wrong the load-balancer - * will have to sort it out. */ - if (curr && unlikely(rt_task(curr)) && - (curr->rt.nr_cpus_allowed < 2 || - curr->prio < p->prio) && + if (unlikely(rt_task(rq->curr)) && + (rq->curr->rt.nr_cpus_allowed < 2 || + rq->curr->prio < p->prio) && (p->rt.nr_cpus_allowed > 1)) { - int target = find_lowest_rq(p); + int cpu = find_lowest_rq(p); - if (target != -1) - cpu = target; + return (cpu == -1) ? task_cpu(p) : cpu; } - rcu_read_unlock(); - return cpu; + /* + * Otherwise, just let it ride on the affined RQ and the + * post-schedule router will push the preempted task away + */ + return task_cpu(p); } static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) @@ -1172,7 +1136,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) * The previous task needs to be made eligible for pushing * if it is still active */ - if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1) + if (p->se.on_rq && p->rt.nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); } @@ -1323,7 +1287,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) || task_running(rq, task) || - !task->on_rq)) { + !task->se.on_rq)) { raw_spin_unlock(&lowest_rq->lock); lowest_rq = NULL; @@ -1357,7 +1321,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq) BUG_ON(task_current(rq, p)); BUG_ON(p->rt.nr_cpus_allowed <= 1); - BUG_ON(!p->on_rq); + BUG_ON(!p->se.on_rq); BUG_ON(!rt_task(p)); return p; @@ -1503,7 +1467,7 @@ static int pull_rt_task(struct rq *this_rq) */ if (p && (p->prio < this_rq->rt.highest_prio.curr)) { WARN_ON(p == src_rq->curr); - WARN_ON(!p->on_rq); + WARN_ON(!p->se.on_rq); /* * There's a chance that p is higher in priority @@ -1574,7 +1538,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, * Update the migration status of the RQ if we have an RT task * which is running AND changing its weight value. */ - if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) { + if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { struct rq *rq = task_rq(p); if (!task_current(rq, p)) { @@ -1644,7 +1608,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) * we may need to handle the pulling of RT tasks * now. */ - if (p->on_rq && !rq->rt.rt_nr_running) + if (p->se.on_rq && !rq->rt.rt_nr_running) pull_rt_task(rq); } @@ -1674,7 +1638,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) * If that current running task is also an RT task * then see if we can move to another run queue. */ - if (p->on_rq && rq->curr != p) { + if (p->se.on_rq && rq->curr != p) { #ifdef CONFIG_SMP if (rq->rt.overloaded && push_rt_task(rq) && /* Don't resched if we changed runqueues */ @@ -1693,7 +1657,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) static void prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) { - if (!p->on_rq) + if (!p->se.on_rq) return; if (rq->curr == p) { @@ -1832,11 +1796,10 @@ extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); static void print_rt_stats(struct seq_file *m, int cpu) { - rt_rq_iter_t iter; struct rt_rq *rt_rq; rcu_read_lock(); - for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) + for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu)) print_rt_rq(m, cpu, rt_rq); rcu_read_unlock(); } diff --git a/trunk/kernel/sched_stoptask.c b/trunk/kernel/sched_stoptask.c index 6f437632afab..1ba2bd40fdac 100644 --- a/trunk/kernel/sched_stoptask.c +++ b/trunk/kernel/sched_stoptask.c @@ -9,7 +9,8 @@ #ifdef CONFIG_SMP static int -select_task_rq_stop(struct task_struct *p, int sd_flag, int flags) +select_task_rq_stop(struct rq *rq, struct task_struct *p, + int sd_flag, int flags) { return task_cpu(p); /* stop tasks as never migrate */ } @@ -25,7 +26,7 @@ static struct task_struct *pick_next_task_stop(struct rq *rq) { struct task_struct *stop = rq->stop; - if (stop && stop->on_rq) + if (stop && stop->se.on_rq) return stop; return NULL; diff --git a/trunk/kernel/sys.c b/trunk/kernel/sys.c index f0c10385f30c..af468edf096a 100644 --- a/trunk/kernel/sys.c +++ b/trunk/kernel/sys.c @@ -315,6 +315,7 @@ void kernel_restart_prepare(char *cmd) blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); system_state = SYSTEM_RESTART; device_shutdown(); + sysdev_shutdown(); syscore_shutdown(); } @@ -353,6 +354,7 @@ static void kernel_shutdown_prepare(enum system_states state) void kernel_halt(void) { kernel_shutdown_prepare(SYSTEM_HALT); + sysdev_shutdown(); syscore_shutdown(); printk(KERN_EMERG "System halted.\n"); kmsg_dump(KMSG_DUMP_HALT); @@ -372,6 +374,7 @@ void kernel_power_off(void) if (pm_power_off_prepare) pm_power_off_prepare(); disable_nonboot_cpus(); + sysdev_shutdown(); syscore_shutdown(); printk(KERN_EMERG "Power down.\n"); kmsg_dump(KMSG_DUMP_POWEROFF); diff --git a/trunk/kernel/time/clockevents.c b/trunk/kernel/time/clockevents.c index 22a9da9a9c96..0d74b9ba90c8 100644 --- a/trunk/kernel/time/clockevents.c +++ b/trunk/kernel/time/clockevents.c @@ -194,70 +194,6 @@ void clockevents_register_device(struct clock_event_device *dev) } EXPORT_SYMBOL_GPL(clockevents_register_device); -static void clockevents_config(struct clock_event_device *dev, - u32 freq) -{ - unsigned long sec; - - if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) - return; - - /* - * Calculate the maximum number of seconds we can sleep. Limit - * to 10 minutes for hardware which can program more than - * 32bit ticks so we still get reasonable conversion values. - */ - sec = dev->max_delta_ticks; - do_div(sec, freq); - if (!sec) - sec = 1; - else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) - sec = 600; - - clockevents_calc_mult_shift(dev, freq, sec); - dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev); - dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev); -} - -/** - * clockevents_config_and_register - Configure and register a clock event device - * @dev: device to register - * @freq: The clock frequency - * @min_delta: The minimum clock ticks to program in oneshot mode - * @max_delta: The maximum clock ticks to program in oneshot mode - * - * min/max_delta can be 0 for devices which do not support oneshot mode. - */ -void clockevents_config_and_register(struct clock_event_device *dev, - u32 freq, unsigned long min_delta, - unsigned long max_delta) -{ - dev->min_delta_ticks = min_delta; - dev->max_delta_ticks = max_delta; - clockevents_config(dev, freq); - clockevents_register_device(dev); -} - -/** - * clockevents_update_freq - Update frequency and reprogram a clock event device. - * @dev: device to modify - * @freq: new device frequency - * - * Reconfigure and reprogram a clock event device in oneshot - * mode. Must be called on the cpu for which the device delivers per - * cpu timer events with interrupts disabled! Returns 0 on success, - * -ETIME when the event is in the past. - */ -int clockevents_update_freq(struct clock_event_device *dev, u32 freq) -{ - clockevents_config(dev, freq); - - if (dev->mode != CLOCK_EVT_MODE_ONESHOT) - return 0; - - return clockevents_program_event(dev, dev->next_event, ktime_get()); -} - /* * Noop handler when we shut down an event device */ diff --git a/trunk/kernel/time/clocksource.c b/trunk/kernel/time/clocksource.c index d9d5f8c885f6..6519cf62d9cd 100644 --- a/trunk/kernel/time/clocksource.c +++ b/trunk/kernel/time/clocksource.c @@ -626,6 +626,19 @@ static void clocksource_enqueue(struct clocksource *cs) list_add(&cs->list, entry); } + +/* + * Maximum time we expect to go between ticks. This includes idle + * tickless time. It provides the trade off between selecting a + * mult/shift pair that is very precise but can only handle a short + * period of time, vs. a mult/shift pair that can handle long periods + * of time but isn't as precise. + * + * This is a subsystem constant, and actual hardware limitations + * may override it (ie: clocksources that wrap every 3 seconds). + */ +#define MAX_UPDATE_LENGTH 5 /* Seconds */ + /** * __clocksource_updatefreq_scale - Used update clocksource with new freq * @t: clocksource to be registered @@ -639,28 +652,15 @@ static void clocksource_enqueue(struct clocksource *cs) */ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) { - unsigned long sec; - /* - * Calc the maximum number of seconds which we can run before - * wrapping around. For clocksources which have a mask > 32bit - * we need to limit the max sleep time to have a good - * conversion precision. 10 minutes is still a reasonable - * amount. That results in a shift value of 24 for a - * clocksource with mask >= 40bit and f >= 4GHz. That maps to - * ~ 0.06ppm granularity for NTP. We apply the same 12.5% - * margin as we do in clocksource_max_deferment() + * Ideally we want to use some of the limits used in + * clocksource_max_deferment, to provide a more informed + * MAX_UPDATE_LENGTH. But for now this just gets the + * register interface working properly. */ - sec = (cs->mask - (cs->mask >> 5)); - do_div(sec, freq); - do_div(sec, scale); - if (!sec) - sec = 1; - else if (sec > 600 && cs->mask > UINT_MAX) - sec = 600; - clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, - NSEC_PER_SEC / scale, sec * scale); + NSEC_PER_SEC/scale, + MAX_UPDATE_LENGTH*scale); cs->max_idle_ns = clocksource_max_deferment(cs); } EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); @@ -685,8 +685,8 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) /* Add clocksource to the clcoksource list */ mutex_lock(&clocksource_mutex); clocksource_enqueue(cs); - clocksource_enqueue_watchdog(cs); clocksource_select(); + clocksource_enqueue_watchdog(cs); mutex_unlock(&clocksource_mutex); return 0; } @@ -706,8 +706,8 @@ int clocksource_register(struct clocksource *cs) mutex_lock(&clocksource_mutex); clocksource_enqueue(cs); - clocksource_enqueue_watchdog(cs); clocksource_select(); + clocksource_enqueue_watchdog(cs); mutex_unlock(&clocksource_mutex); return 0; } diff --git a/trunk/kernel/time/tick-broadcast.c b/trunk/kernel/time/tick-broadcast.c index 723c7637e55a..da800ffa810c 100644 --- a/trunk/kernel/time/tick-broadcast.c +++ b/trunk/kernel/time/tick-broadcast.c @@ -522,11 +522,10 @@ static void tick_broadcast_init_next_event(struct cpumask *mask, */ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { - int cpu = smp_processor_id(); - /* Set it up only once ! */ if (bc->event_handler != tick_handle_oneshot_broadcast) { int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; + int cpu = smp_processor_id(); bc->event_handler = tick_handle_oneshot_broadcast; clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); @@ -552,15 +551,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) tick_broadcast_set_event(tick_next_period, 1); } else bc->next_event.tv64 = KTIME_MAX; - } else { - /* - * The first cpu which switches to oneshot mode sets - * the bit for all other cpus which are in the general - * (periodic) broadcast mask. So the bit is set and - * would prevent the first broadcast enter after this - * to program the bc device. - */ - tick_broadcast_clear_oneshot(cpu); } } diff --git a/trunk/kernel/time/timekeeping.c b/trunk/kernel/time/timekeeping.c index 8ad5d576755e..8e6a05a5915a 100644 --- a/trunk/kernel/time/timekeeping.c +++ b/trunk/kernel/time/timekeeping.c @@ -595,6 +595,58 @@ void __init timekeeping_init(void) /* time in seconds when suspend began */ static struct timespec timekeeping_suspend_time; +/** + * __timekeeping_inject_sleeptime - Internal function to add sleep interval + * @delta: pointer to a timespec delta value + * + * Takes a timespec offset measuring a suspend interval and properly + * adds the sleep offset to the timekeeping variables. + */ +static void __timekeeping_inject_sleeptime(struct timespec *delta) +{ + xtime = timespec_add(xtime, *delta); + wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta); + total_sleep_time = timespec_add(total_sleep_time, *delta); +} + + +/** + * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values + * @delta: pointer to a timespec delta value + * + * This hook is for architectures that cannot support read_persistent_clock + * because their RTC/persistent clock is only accessible when irqs are enabled. + * + * This function should only be called by rtc_resume(), and allows + * a suspend offset to be injected into the timekeeping values. + */ +void timekeeping_inject_sleeptime(struct timespec *delta) +{ + unsigned long flags; + struct timespec ts; + + /* Make sure we don't set the clock twice */ + read_persistent_clock(&ts); + if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) + return; + + write_seqlock_irqsave(&xtime_lock, flags); + timekeeping_forward_now(); + + __timekeeping_inject_sleeptime(delta); + + timekeeper.ntp_error = 0; + ntp_clear(); + update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, + timekeeper.mult); + + write_sequnlock_irqrestore(&xtime_lock, flags); + + /* signal hrtimers about time change */ + clock_was_set(); +} + + /** * timekeeping_resume - Resumes the generic timekeeping subsystem. * @@ -615,9 +667,7 @@ static void timekeeping_resume(void) if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { ts = timespec_sub(ts, timekeeping_suspend_time); - xtime = timespec_add(xtime, ts); - wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); - total_sleep_time = timespec_add(total_sleep_time, ts); + __timekeeping_inject_sleeptime(&ts); } /* re-base the last cycle value */ timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); diff --git a/trunk/kernel/trace/Kconfig b/trunk/kernel/trace/Kconfig index 2ad39e556cb4..61d7d59f4a1a 100644 --- a/trunk/kernel/trace/Kconfig +++ b/trunk/kernel/trace/Kconfig @@ -141,7 +141,7 @@ if FTRACE config FUNCTION_TRACER bool "Kernel Function Tracer" depends on HAVE_FUNCTION_TRACER - select FRAME_POINTER if !ARM_UNWIND && !S390 && !MICROBLAZE + select FRAME_POINTER if !ARM_UNWIND && !S390 select KALLSYMS select GENERIC_TRACER select CONTEXT_SWITCH_TRACER diff --git a/trunk/kernel/trace/ftrace.c b/trunk/kernel/trace/ftrace.c index d017c2c82c44..ee24fa1935ac 100644 --- a/trunk/kernel/trace/ftrace.c +++ b/trunk/kernel/trace/ftrace.c @@ -39,26 +39,20 @@ #include "trace_stat.h" #define FTRACE_WARN_ON(cond) \ - ({ \ - int ___r = cond; \ - if (WARN_ON(___r)) \ + do { \ + if (WARN_ON(cond)) \ ftrace_kill(); \ - ___r; \ - }) + } while (0) #define FTRACE_WARN_ON_ONCE(cond) \ - ({ \ - int ___r = cond; \ - if (WARN_ON_ONCE(___r)) \ + do { \ + if (WARN_ON_ONCE(cond)) \ ftrace_kill(); \ - ___r; \ - }) + } while (0) /* hash bits for specific function selection */ #define FTRACE_HASH_BITS 7 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) -#define FTRACE_HASH_DEFAULT_BITS 10 -#define FTRACE_HASH_MAX_BITS 12 /* ftrace_enabled is a method to turn ftrace on or off */ int ftrace_enabled __read_mostly; @@ -87,29 +81,23 @@ static struct ftrace_ops ftrace_list_end __read_mostly = .func = ftrace_stub, }; -static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; -static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; +static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; -static struct ftrace_ops global_ops; - -static void -ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); /* - * Traverse the ftrace_global_list, invoking all entries. The reason that we + * Traverse the ftrace_list, invoking all entries. The reason that we * can use rcu_dereference_raw() is that elements removed from this list * are simply leaked, so there is no need to interact with a grace-period * mechanism. The rcu_dereference_raw() calls are needed to handle - * concurrent insertions into the ftrace_global_list. + * concurrent insertions into the ftrace_list. * * Silly Alpha and silly pointer-speculation compiler optimizations! */ -static void ftrace_global_list_func(unsigned long ip, - unsigned long parent_ip) +static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) { - struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/ + struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/ while (op != &ftrace_list_end) { op->func(ip, parent_ip); @@ -159,69 +147,46 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) } #endif -static void update_global_ops(void) +static int __register_ftrace_function(struct ftrace_ops *ops) { - ftrace_func_t func; - + ops->next = ftrace_list; /* - * If there's only one function registered, then call that - * function directly. Otherwise, we need to iterate over the - * registered callers. + * We are entering ops into the ftrace_list but another + * CPU might be walking that list. We need to make sure + * the ops->next pointer is valid before another CPU sees + * the ops pointer included into the ftrace_list. */ - if (ftrace_global_list == &ftrace_list_end || - ftrace_global_list->next == &ftrace_list_end) - func = ftrace_global_list->func; - else - func = ftrace_global_list_func; - - /* If we filter on pids, update to use the pid function */ - if (!list_empty(&ftrace_pids)) { - set_ftrace_pid_function(func); - func = ftrace_pid_func; - } - - global_ops.func = func; -} + rcu_assign_pointer(ftrace_list, ops); -static void update_ftrace_function(void) -{ - ftrace_func_t func; + if (ftrace_enabled) { + ftrace_func_t func; - update_global_ops(); + if (ops->next == &ftrace_list_end) + func = ops->func; + else + func = ftrace_list_func; - /* - * If we are at the end of the list and this ops is - * not dynamic, then have the mcount trampoline call - * the function directly - */ - if (ftrace_ops_list == &ftrace_list_end || - (ftrace_ops_list->next == &ftrace_list_end && - !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC))) - func = ftrace_ops_list->func; - else - func = ftrace_ops_list_func; + if (!list_empty(&ftrace_pids)) { + set_ftrace_pid_function(func); + func = ftrace_pid_func; + } + /* + * For one func, simply call it directly. + * For more than one func, call the chain. + */ #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST - ftrace_trace_function = func; + ftrace_trace_function = func; #else - __ftrace_trace_function = func; - ftrace_trace_function = ftrace_test_stop_func; + __ftrace_trace_function = func; + ftrace_trace_function = ftrace_test_stop_func; #endif -} + } -static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) -{ - ops->next = *list; - /* - * We are entering ops into the list but another - * CPU might be walking that list. We need to make sure - * the ops->next pointer is valid before another CPU sees - * the ops pointer included into the list. - */ - rcu_assign_pointer(*list, ops); + return 0; } -static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) +static int __unregister_ftrace_function(struct ftrace_ops *ops) { struct ftrace_ops **p; @@ -229,12 +194,13 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) * If we are removing the last function, then simply point * to the ftrace_stub. */ - if (*list == ops && ops->next == &ftrace_list_end) { - *list = &ftrace_list_end; + if (ftrace_list == ops && ops->next == &ftrace_list_end) { + ftrace_trace_function = ftrace_stub; + ftrace_list = &ftrace_list_end; return 0; } - for (p = list; *p != &ftrace_list_end; p = &(*p)->next) + for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) if (*p == ops) break; @@ -242,83 +208,53 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) return -1; *p = (*p)->next; - return 0; -} -static int __register_ftrace_function(struct ftrace_ops *ops) -{ - if (ftrace_disabled) - return -ENODEV; - - if (FTRACE_WARN_ON(ops == &global_ops)) - return -EINVAL; - - if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) - return -EBUSY; - - if (!core_kernel_data((unsigned long)ops)) - ops->flags |= FTRACE_OPS_FL_DYNAMIC; - - if (ops->flags & FTRACE_OPS_FL_GLOBAL) { - int first = ftrace_global_list == &ftrace_list_end; - add_ftrace_ops(&ftrace_global_list, ops); - ops->flags |= FTRACE_OPS_FL_ENABLED; - if (first) - add_ftrace_ops(&ftrace_ops_list, &global_ops); - } else - add_ftrace_ops(&ftrace_ops_list, ops); - - if (ftrace_enabled) - update_ftrace_function(); - - return 0; -} - -static int __unregister_ftrace_function(struct ftrace_ops *ops) -{ - int ret; - - if (ftrace_disabled) - return -ENODEV; - - if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) - return -EBUSY; - - if (FTRACE_WARN_ON(ops == &global_ops)) - return -EINVAL; - - if (ops->flags & FTRACE_OPS_FL_GLOBAL) { - ret = remove_ftrace_ops(&ftrace_global_list, ops); - if (!ret && ftrace_global_list == &ftrace_list_end) - ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops); - if (!ret) - ops->flags &= ~FTRACE_OPS_FL_ENABLED; - } else - ret = remove_ftrace_ops(&ftrace_ops_list, ops); - - if (ret < 0) - return ret; - - if (ftrace_enabled) - update_ftrace_function(); + if (ftrace_enabled) { + /* If we only have one func left, then call that directly */ + if (ftrace_list->next == &ftrace_list_end) { + ftrace_func_t func = ftrace_list->func; - /* - * Dynamic ops may be freed, we must make sure that all - * callers are done before leaving this function. - */ - if (ops->flags & FTRACE_OPS_FL_DYNAMIC) - synchronize_sched(); + if (!list_empty(&ftrace_pids)) { + set_ftrace_pid_function(func); + func = ftrace_pid_func; + } +#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST + ftrace_trace_function = func; +#else + __ftrace_trace_function = func; +#endif + } + } return 0; } static void ftrace_update_pid_func(void) { - /* Only do something if we are tracing something */ + ftrace_func_t func; + if (ftrace_trace_function == ftrace_stub) return; - update_ftrace_function(); +#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST + func = ftrace_trace_function; +#else + func = __ftrace_trace_function; +#endif + + if (!list_empty(&ftrace_pids)) { + set_ftrace_pid_function(func); + func = ftrace_pid_func; + } else { + if (func == ftrace_pid_func) + func = ftrace_pid_function; + } + +#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST + ftrace_trace_function = func; +#else + __ftrace_trace_function = func; +#endif } #ifdef CONFIG_FUNCTION_PROFILER @@ -952,35 +888,8 @@ enum { FTRACE_START_FUNC_RET = (1 << 3), FTRACE_STOP_FUNC_RET = (1 << 4), }; -struct ftrace_func_entry { - struct hlist_node hlist; - unsigned long ip; -}; - -struct ftrace_hash { - unsigned long size_bits; - struct hlist_head *buckets; - unsigned long count; - struct rcu_head rcu; -}; -/* - * We make these constant because no one should touch them, - * but they are used as the default "empty hash", to avoid allocating - * it all the time. These are in a read only section such that if - * anyone does try to modify it, it will cause an exception. - */ -static const struct hlist_head empty_buckets[1]; -static const struct ftrace_hash empty_hash = { - .buckets = (struct hlist_head *)empty_buckets, -}; -#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) - -static struct ftrace_ops global_ops = { - .func = ftrace_stub, - .notrace_hash = EMPTY_HASH, - .filter_hash = EMPTY_HASH, -}; +static int ftrace_filtered; static struct dyn_ftrace *ftrace_new_addrs; @@ -1003,269 +912,6 @@ static struct ftrace_page *ftrace_pages; static struct dyn_ftrace *ftrace_free_records; -static struct ftrace_func_entry * -ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) -{ - unsigned long key; - struct ftrace_func_entry *entry; - struct hlist_head *hhd; - struct hlist_node *n; - - if (!hash->count) - return NULL; - - if (hash->size_bits > 0) - key = hash_long(ip, hash->size_bits); - else - key = 0; - - hhd = &hash->buckets[key]; - - hlist_for_each_entry_rcu(entry, n, hhd, hlist) { - if (entry->ip == ip) - return entry; - } - return NULL; -} - -static void __add_hash_entry(struct ftrace_hash *hash, - struct ftrace_func_entry *entry) -{ - struct hlist_head *hhd; - unsigned long key; - - if (hash->size_bits) - key = hash_long(entry->ip, hash->size_bits); - else - key = 0; - - hhd = &hash->buckets[key]; - hlist_add_head(&entry->hlist, hhd); - hash->count++; -} - -static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) -{ - struct ftrace_func_entry *entry; - - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - entry->ip = ip; - __add_hash_entry(hash, entry); - - return 0; -} - -static void -free_hash_entry(struct ftrace_hash *hash, - struct ftrace_func_entry *entry) -{ - hlist_del(&entry->hlist); - kfree(entry); - hash->count--; -} - -static void -remove_hash_entry(struct ftrace_hash *hash, - struct ftrace_func_entry *entry) -{ - hlist_del(&entry->hlist); - hash->count--; -} - -static void ftrace_hash_clear(struct ftrace_hash *hash) -{ - struct hlist_head *hhd; - struct hlist_node *tp, *tn; - struct ftrace_func_entry *entry; - int size = 1 << hash->size_bits; - int i; - - if (!hash->count) - return; - - for (i = 0; i < size; i++) { - hhd = &hash->buckets[i]; - hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) - free_hash_entry(hash, entry); - } - FTRACE_WARN_ON(hash->count); -} - -static void free_ftrace_hash(struct ftrace_hash *hash) -{ - if (!hash || hash == EMPTY_HASH) - return; - ftrace_hash_clear(hash); - kfree(hash->buckets); - kfree(hash); -} - -static void __free_ftrace_hash_rcu(struct rcu_head *rcu) -{ - struct ftrace_hash *hash; - - hash = container_of(rcu, struct ftrace_hash, rcu); - free_ftrace_hash(hash); -} - -static void free_ftrace_hash_rcu(struct ftrace_hash *hash) -{ - if (!hash || hash == EMPTY_HASH) - return; - call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); -} - -static struct ftrace_hash *alloc_ftrace_hash(int size_bits) -{ - struct ftrace_hash *hash; - int size; - - hash = kzalloc(sizeof(*hash), GFP_KERNEL); - if (!hash) - return NULL; - - size = 1 << size_bits; - hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL); - - if (!hash->buckets) { - kfree(hash); - return NULL; - } - - hash->size_bits = size_bits; - - return hash; -} - -static struct ftrace_hash * -alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) -{ - struct ftrace_func_entry *entry; - struct ftrace_hash *new_hash; - struct hlist_node *tp; - int size; - int ret; - int i; - - new_hash = alloc_ftrace_hash(size_bits); - if (!new_hash) - return NULL; - - /* Empty hash? */ - if (!hash || !hash->count) - return new_hash; - - size = 1 << hash->size_bits; - for (i = 0; i < size; i++) { - hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) { - ret = add_hash_entry(new_hash, entry->ip); - if (ret < 0) - goto free_hash; - } - } - - FTRACE_WARN_ON(new_hash->count != hash->count); - - return new_hash; - - free_hash: - free_ftrace_hash(new_hash); - return NULL; -} - -static int -ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) -{ - struct ftrace_func_entry *entry; - struct hlist_node *tp, *tn; - struct hlist_head *hhd; - struct ftrace_hash *old_hash; - struct ftrace_hash *new_hash; - unsigned long key; - int size = src->count; - int bits = 0; - int i; - - /* - * If the new source is empty, just free dst and assign it - * the empty_hash. - */ - if (!src->count) { - free_ftrace_hash_rcu(*dst); - rcu_assign_pointer(*dst, EMPTY_HASH); - return 0; - } - - /* - * Make the hash size about 1/2 the # found - */ - for (size /= 2; size; size >>= 1) - bits++; - - /* Don't allocate too much */ - if (bits > FTRACE_HASH_MAX_BITS) - bits = FTRACE_HASH_MAX_BITS; - - new_hash = alloc_ftrace_hash(bits); - if (!new_hash) - return -ENOMEM; - - size = 1 << src->size_bits; - for (i = 0; i < size; i++) { - hhd = &src->buckets[i]; - hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) { - if (bits > 0) - key = hash_long(entry->ip, bits); - else - key = 0; - remove_hash_entry(src, entry); - __add_hash_entry(new_hash, entry); - } - } - - old_hash = *dst; - rcu_assign_pointer(*dst, new_hash); - free_ftrace_hash_rcu(old_hash); - - return 0; -} - -/* - * Test the hashes for this ops to see if we want to call - * the ops->func or not. - * - * It's a match if the ip is in the ops->filter_hash or - * the filter_hash does not exist or is empty, - * AND - * the ip is not in the ops->notrace_hash. - * - * This needs to be called with preemption disabled as - * the hashes are freed with call_rcu_sched(). - */ -static int -ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) -{ - struct ftrace_hash *filter_hash; - struct ftrace_hash *notrace_hash; - int ret; - - filter_hash = rcu_dereference_raw(ops->filter_hash); - notrace_hash = rcu_dereference_raw(ops->notrace_hash); - - if ((!filter_hash || !filter_hash->count || - ftrace_lookup_ip(filter_hash, ip)) && - (!notrace_hash || !notrace_hash->count || - !ftrace_lookup_ip(notrace_hash, ip))) - ret = 1; - else - ret = 0; - - return ret; -} - /* * This is a double for. Do not use 'break' to break out of the loop, * you must use a goto. @@ -1280,105 +926,6 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) } \ } -static void __ftrace_hash_rec_update(struct ftrace_ops *ops, - int filter_hash, - bool inc) -{ - struct ftrace_hash *hash; - struct ftrace_hash *other_hash; - struct ftrace_page *pg; - struct dyn_ftrace *rec; - int count = 0; - int all = 0; - - /* Only update if the ops has been registered */ - if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) - return; - - /* - * In the filter_hash case: - * If the count is zero, we update all records. - * Otherwise we just update the items in the hash. - * - * In the notrace_hash case: - * We enable the update in the hash. - * As disabling notrace means enabling the tracing, - * and enabling notrace means disabling, the inc variable - * gets inversed. - */ - if (filter_hash) { - hash = ops->filter_hash; - other_hash = ops->notrace_hash; - if (!hash || !hash->count) - all = 1; - } else { - inc = !inc; - hash = ops->notrace_hash; - other_hash = ops->filter_hash; - /* - * If the notrace hash has no items, - * then there's nothing to do. - */ - if (hash && !hash->count) - return; - } - - do_for_each_ftrace_rec(pg, rec) { - int in_other_hash = 0; - int in_hash = 0; - int match = 0; - - if (all) { - /* - * Only the filter_hash affects all records. - * Update if the record is not in the notrace hash. - */ - if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) - match = 1; - } else { - in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip); - in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip); - - /* - * - */ - if (filter_hash && in_hash && !in_other_hash) - match = 1; - else if (!filter_hash && in_hash && - (in_other_hash || !other_hash->count)) - match = 1; - } - if (!match) - continue; - - if (inc) { - rec->flags++; - if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) - return; - } else { - if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) - return; - rec->flags--; - } - count++; - /* Shortcut, if we handled all records, we are done. */ - if (!all && count == hash->count) - return; - } while_for_each_ftrace_rec(); -} - -static void ftrace_hash_rec_disable(struct ftrace_ops *ops, - int filter_hash) -{ - __ftrace_hash_rec_update(ops, filter_hash, 0); -} - -static void ftrace_hash_rec_enable(struct ftrace_ops *ops, - int filter_hash) -{ - __ftrace_hash_rec_update(ops, filter_hash, 1); -} - static void ftrace_free_rec(struct dyn_ftrace *rec) { rec->freelist = ftrace_free_records; @@ -1500,18 +1047,18 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) ftrace_addr = (unsigned long)FTRACE_ADDR; /* - * If we are enabling tracing: - * - * If the record has a ref count, then we need to enable it - * because someone is using it. + * If this record is not to be traced or we want to disable it, + * then disable it. * - * Otherwise we make sure its disabled. + * If we want to enable it and filtering is off, then enable it. * - * If we are disabling tracing, then disable all records that - * are enabled. + * If we want to enable it and filtering is on, enable it only if + * it's filtered */ - if (enable && (rec->flags & ~FTRACE_FL_MASK)) - flag = FTRACE_FL_ENABLED; + if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) { + if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER)) + flag = FTRACE_FL_ENABLED; + } /* If the state of this record hasn't changed, then do nothing */ if ((rec->flags & FTRACE_FL_ENABLED) == flag) @@ -1532,16 +1079,19 @@ static void ftrace_replace_code(int enable) struct ftrace_page *pg; int failed; - if (unlikely(ftrace_disabled)) - return; - do_for_each_ftrace_rec(pg, rec) { - /* Skip over free records */ - if (rec->flags & FTRACE_FL_FREE) + /* + * Skip over free records, records that have + * failed and not converted. + */ + if (rec->flags & FTRACE_FL_FREE || + rec->flags & FTRACE_FL_FAILED || + !(rec->flags & FTRACE_FL_CONVERTED)) continue; failed = __ftrace_replace_code(rec, enable); if (failed) { + rec->flags |= FTRACE_FL_FAILED; ftrace_bug(failed, rec->ip); /* Stop processing */ return; @@ -1557,12 +1107,10 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) ip = rec->ip; - if (unlikely(ftrace_disabled)) - return 0; - ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); if (ret) { ftrace_bug(ret, ip); + rec->flags |= FTRACE_FL_FAILED; return 0; } return 1; @@ -1623,7 +1171,6 @@ static void ftrace_run_update_code(int command) static ftrace_func_t saved_ftrace_func; static int ftrace_start_up; -static int global_start_up; static void ftrace_startup_enable(int command) { @@ -1638,36 +1185,19 @@ static void ftrace_startup_enable(int command) ftrace_run_update_code(command); } -static void ftrace_startup(struct ftrace_ops *ops, int command) +static void ftrace_startup(int command) { - bool hash_enable = true; - if (unlikely(ftrace_disabled)) return; ftrace_start_up++; command |= FTRACE_ENABLE_CALLS; - /* ops marked global share the filter hashes */ - if (ops->flags & FTRACE_OPS_FL_GLOBAL) { - ops = &global_ops; - /* Don't update hash if global is already set */ - if (global_start_up) - hash_enable = false; - global_start_up++; - } - - ops->flags |= FTRACE_OPS_FL_ENABLED; - if (hash_enable) - ftrace_hash_rec_enable(ops, 1); - ftrace_startup_enable(command); } -static void ftrace_shutdown(struct ftrace_ops *ops, int command) +static void ftrace_shutdown(int command) { - bool hash_disable = true; - if (unlikely(ftrace_disabled)) return; @@ -1679,23 +1209,6 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command) */ WARN_ON_ONCE(ftrace_start_up < 0); - if (ops->flags & FTRACE_OPS_FL_GLOBAL) { - ops = &global_ops; - global_start_up--; - WARN_ON_ONCE(global_start_up < 0); - /* Don't update hash if global still has users */ - if (global_start_up) { - WARN_ON_ONCE(!ftrace_start_up); - hash_disable = false; - } - } - - if (hash_disable) - ftrace_hash_rec_disable(ops, 1); - - if (ops != &global_ops || !global_start_up) - ops->flags &= ~FTRACE_OPS_FL_ENABLED; - if (!ftrace_start_up) command |= FTRACE_DISABLE_CALLS; @@ -1760,10 +1273,10 @@ static int ftrace_update_code(struct module *mod) */ if (!ftrace_code_disable(mod, p)) { ftrace_free_rec(p); - /* Game over */ - break; + continue; } + p->flags |= FTRACE_FL_CONVERTED; ftrace_update_cnt++; /* @@ -1838,9 +1351,9 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) enum { FTRACE_ITER_FILTER = (1 << 0), FTRACE_ITER_NOTRACE = (1 << 1), - FTRACE_ITER_PRINTALL = (1 << 2), - FTRACE_ITER_HASH = (1 << 3), - FTRACE_ITER_ENABLED = (1 << 4), + FTRACE_ITER_FAILURES = (1 << 2), + FTRACE_ITER_PRINTALL = (1 << 3), + FTRACE_ITER_HASH = (1 << 4), }; #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ @@ -1852,8 +1365,6 @@ struct ftrace_iterator { struct dyn_ftrace *func; struct ftrace_func_probe *probe; struct trace_parser parser; - struct ftrace_hash *hash; - struct ftrace_ops *ops; int hidx; int idx; unsigned flags; @@ -1950,12 +1461,8 @@ static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct ftrace_iterator *iter = m->private; - struct ftrace_ops *ops = &global_ops; struct dyn_ftrace *rec = NULL; - if (unlikely(ftrace_disabled)) - return NULL; - if (iter->flags & FTRACE_ITER_HASH) return t_hash_next(m, pos); @@ -1976,15 +1483,17 @@ t_next(struct seq_file *m, void *v, loff_t *pos) rec = &iter->pg->records[iter->idx++]; if ((rec->flags & FTRACE_FL_FREE) || - ((iter->flags & FTRACE_ITER_FILTER) && - !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || + (!(iter->flags & FTRACE_ITER_FAILURES) && + (rec->flags & FTRACE_FL_FAILED)) || - ((iter->flags & FTRACE_ITER_NOTRACE) && - !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || + ((iter->flags & FTRACE_ITER_FAILURES) && + !(rec->flags & FTRACE_FL_FAILED)) || - ((iter->flags & FTRACE_ITER_ENABLED) && - !(rec->flags & ~FTRACE_FL_MASK))) { + ((iter->flags & FTRACE_ITER_FILTER) && + !(rec->flags & FTRACE_FL_FILTER)) || + ((iter->flags & FTRACE_ITER_NOTRACE) && + !(rec->flags & FTRACE_FL_NOTRACE))) { rec = NULL; goto retry; } @@ -2008,15 +1517,10 @@ static void reset_iter_read(struct ftrace_iterator *iter) static void *t_start(struct seq_file *m, loff_t *pos) { struct ftrace_iterator *iter = m->private; - struct ftrace_ops *ops = &global_ops; void *p = NULL; loff_t l; mutex_lock(&ftrace_lock); - - if (unlikely(ftrace_disabled)) - return NULL; - /* * If an lseek was done, then reset and start from beginning. */ @@ -2028,7 +1532,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) * off, we can short cut and just print out that all * functions are enabled. */ - if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) { + if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) { if (*pos > 0) return t_hash_start(m, pos); iter->flags |= FTRACE_ITER_PRINTALL; @@ -2086,11 +1590,7 @@ static int t_show(struct seq_file *m, void *v) if (!rec) return 0; - seq_printf(m, "%ps", (void *)rec->ip); - if (iter->flags & FTRACE_ITER_ENABLED) - seq_printf(m, " (%ld)", - rec->flags & ~FTRACE_FL_MASK); - seq_printf(m, "\n"); + seq_printf(m, "%ps\n", (void *)rec->ip); return 0; } @@ -2099,38 +1599,11 @@ static const struct seq_operations show_ftrace_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, - .show = t_show, -}; - -static int -ftrace_avail_open(struct inode *inode, struct file *file) -{ - struct ftrace_iterator *iter; - int ret; - - if (unlikely(ftrace_disabled)) - return -ENODEV; - - iter = kzalloc(sizeof(*iter), GFP_KERNEL); - if (!iter) - return -ENOMEM; - - iter->pg = ftrace_pages_start; - - ret = seq_open(file, &show_ftrace_seq_ops); - if (!ret) { - struct seq_file *m = file->private_data; - - m->private = iter; - } else { - kfree(iter); - } - - return ret; -} + .show = t_show, +}; static int -ftrace_enabled_open(struct inode *inode, struct file *file) +ftrace_avail_open(struct inode *inode, struct file *file) { struct ftrace_iterator *iter; int ret; @@ -2143,7 +1616,6 @@ ftrace_enabled_open(struct inode *inode, struct file *file) return -ENOMEM; iter->pg = ftrace_pages_start; - iter->flags = FTRACE_ITER_ENABLED; ret = seq_open(file, &show_ftrace_seq_ops); if (!ret) { @@ -2157,19 +1629,45 @@ ftrace_enabled_open(struct inode *inode, struct file *file) return ret; } -static void ftrace_filter_reset(struct ftrace_hash *hash) +static int +ftrace_failures_open(struct inode *inode, struct file *file) +{ + int ret; + struct seq_file *m; + struct ftrace_iterator *iter; + + ret = ftrace_avail_open(inode, file); + if (!ret) { + m = file->private_data; + iter = m->private; + iter->flags = FTRACE_ITER_FAILURES; + } + + return ret; +} + + +static void ftrace_filter_reset(int enable) { + struct ftrace_page *pg; + struct dyn_ftrace *rec; + unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; + mutex_lock(&ftrace_lock); - ftrace_hash_clear(hash); + if (enable) + ftrace_filtered = 0; + do_for_each_ftrace_rec(pg, rec) { + if (rec->flags & FTRACE_FL_FAILED) + continue; + rec->flags &= ~type; + } while_for_each_ftrace_rec(); mutex_unlock(&ftrace_lock); } static int -ftrace_regex_open(struct ftrace_ops *ops, int flag, - struct inode *inode, struct file *file) +ftrace_regex_open(struct inode *inode, struct file *file, int enable) { struct ftrace_iterator *iter; - struct ftrace_hash *hash; int ret = 0; if (unlikely(ftrace_disabled)) @@ -2184,42 +1682,21 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, return -ENOMEM; } - if (flag & FTRACE_ITER_NOTRACE) - hash = ops->notrace_hash; - else - hash = ops->filter_hash; - - iter->ops = ops; - iter->flags = flag; - - if (file->f_mode & FMODE_WRITE) { - mutex_lock(&ftrace_lock); - iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); - mutex_unlock(&ftrace_lock); - - if (!iter->hash) { - trace_parser_put(&iter->parser); - kfree(iter); - return -ENOMEM; - } - } - mutex_lock(&ftrace_regex_lock); - if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) - ftrace_filter_reset(iter->hash); + ftrace_filter_reset(enable); if (file->f_mode & FMODE_READ) { iter->pg = ftrace_pages_start; + iter->flags = enable ? FTRACE_ITER_FILTER : + FTRACE_ITER_NOTRACE; ret = seq_open(file, &show_ftrace_seq_ops); if (!ret) { struct seq_file *m = file->private_data; m->private = iter; } else { - /* Failed */ - free_ftrace_hash(iter->hash); trace_parser_put(&iter->parser); kfree(iter); } @@ -2233,15 +1710,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, static int ftrace_filter_open(struct inode *inode, struct file *file) { - return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER, - inode, file); + return ftrace_regex_open(inode, file, 1); } static int ftrace_notrace_open(struct inode *inode, struct file *file) { - return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE, - inode, file); + return ftrace_regex_open(inode, file, 0); } static loff_t @@ -2286,99 +1761,86 @@ static int ftrace_match(char *str, char *regex, int len, int type) } static int -enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not) -{ - struct ftrace_func_entry *entry; - int ret = 0; - - entry = ftrace_lookup_ip(hash, rec->ip); - if (not) { - /* Do nothing if it doesn't exist */ - if (!entry) - return 0; - - free_hash_entry(hash, entry); - } else { - /* Do nothing if it exists */ - if (entry) - return 0; - - ret = add_hash_entry(hash, rec->ip); - } - return ret; -} - -static int -ftrace_match_record(struct dyn_ftrace *rec, char *mod, - char *regex, int len, int type) +ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) { char str[KSYM_SYMBOL_LEN]; - char *modname; - - kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); - - if (mod) { - /* module lookup requires matching the module */ - if (!modname || strcmp(modname, mod)) - return 0; - - /* blank search means to match all funcs in the mod */ - if (!len) - return 1; - } + kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); return ftrace_match(str, regex, len, type); } -static int -match_records(struct ftrace_hash *hash, char *buff, - int len, char *mod, int not) +static int ftrace_match_records(char *buff, int len, int enable) { - unsigned search_len = 0; + unsigned int search_len; struct ftrace_page *pg; struct dyn_ftrace *rec; - int type = MATCH_FULL; - char *search = buff; + unsigned long flag; + char *search; + int type; + int not; int found = 0; - int ret; - - if (len) { - type = filter_parse_regex(buff, len, &search, ¬); - search_len = strlen(search); - } - mutex_lock(&ftrace_lock); + flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; + type = filter_parse_regex(buff, len, &search, ¬); - if (unlikely(ftrace_disabled)) - goto out_unlock; + search_len = strlen(search); + mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { - if (ftrace_match_record(rec, mod, search, search_len, type)) { - ret = enter_record(hash, rec, not); - if (ret < 0) { - found = ret; - goto out_unlock; - } + if (rec->flags & FTRACE_FL_FAILED) + continue; + + if (ftrace_match_record(rec, search, search_len, type)) { + if (not) + rec->flags &= ~flag; + else + rec->flags |= flag; found = 1; } + /* + * Only enable filtering if we have a function that + * is filtered on. + */ + if (enable && (rec->flags & FTRACE_FL_FILTER)) + ftrace_filtered = 1; } while_for_each_ftrace_rec(); - out_unlock: mutex_unlock(&ftrace_lock); return found; } static int -ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) +ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, + char *regex, int len, int type) { - return match_records(hash, buff, len, NULL, 0); + char str[KSYM_SYMBOL_LEN]; + char *modname; + + kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); + + if (!modname || strcmp(modname, mod)) + return 0; + + /* blank search means to match all funcs in the mod */ + if (len) + return ftrace_match(str, regex, len, type); + else + return 1; } -static int -ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod) +static int ftrace_match_module_records(char *buff, char *mod, int enable) { + unsigned search_len = 0; + struct ftrace_page *pg; + struct dyn_ftrace *rec; + int type = MATCH_FULL; + char *search = buff; + unsigned long flag; int not = 0; + int found = 0; + + flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; /* blank or '*' mean the same */ if (strcmp(buff, "*") == 0) @@ -2390,7 +1852,32 @@ ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod) not = 1; } - return match_records(hash, buff, strlen(buff), mod, not); + if (strlen(buff)) { + type = filter_parse_regex(buff, strlen(buff), &search, ¬); + search_len = strlen(search); + } + + mutex_lock(&ftrace_lock); + do_for_each_ftrace_rec(pg, rec) { + + if (rec->flags & FTRACE_FL_FAILED) + continue; + + if (ftrace_match_module_record(rec, mod, + search, search_len, type)) { + if (not) + rec->flags &= ~flag; + else + rec->flags |= flag; + found = 1; + } + if (enable && (rec->flags & FTRACE_FL_FILTER)) + ftrace_filtered = 1; + + } while_for_each_ftrace_rec(); + mutex_unlock(&ftrace_lock); + + return found; } /* @@ -2401,10 +1888,7 @@ ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod) static int ftrace_mod_callback(char *func, char *cmd, char *param, int enable) { - struct ftrace_ops *ops = &global_ops; - struct ftrace_hash *hash; char *mod; - int ret = -EINVAL; /* * cmd == 'mod' because we only registered this func @@ -2416,24 +1900,15 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable) /* we must have a module name */ if (!param) - return ret; + return -EINVAL; mod = strsep(¶m, ":"); if (!strlen(mod)) - return ret; - - if (enable) - hash = ops->filter_hash; - else - hash = ops->notrace_hash; - - ret = ftrace_match_module_records(hash, func, mod); - if (!ret) - ret = -EINVAL; - if (ret < 0) - return ret; + return -EINVAL; - return 0; + if (ftrace_match_module_records(func, mod, enable)) + return 0; + return -EINVAL; } static struct ftrace_func_command ftrace_mod_cmd = { @@ -2484,7 +1959,6 @@ static int ftrace_probe_registered; static void __enable_ftrace_function_probe(void) { - int ret; int i; if (ftrace_probe_registered) @@ -2499,16 +1973,13 @@ static void __enable_ftrace_function_probe(void) if (i == FTRACE_FUNC_HASHSIZE) return; - ret = __register_ftrace_function(&trace_probe_ops); - if (!ret) - ftrace_startup(&trace_probe_ops, 0); - + __register_ftrace_function(&trace_probe_ops); + ftrace_startup(0); ftrace_probe_registered = 1; } static void __disable_ftrace_function_probe(void) { - int ret; int i; if (!ftrace_probe_registered) @@ -2521,10 +1992,8 @@ static void __disable_ftrace_function_probe(void) } /* no more funcs left */ - ret = __unregister_ftrace_function(&trace_probe_ops); - if (!ret) - ftrace_shutdown(&trace_probe_ops, 0); - + __unregister_ftrace_function(&trace_probe_ops); + ftrace_shutdown(0); ftrace_probe_registered = 0; } @@ -2560,13 +2029,12 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, return -EINVAL; mutex_lock(&ftrace_lock); - - if (unlikely(ftrace_disabled)) - goto out_unlock; - do_for_each_ftrace_rec(pg, rec) { - if (!ftrace_match_record(rec, NULL, search, len, type)) + if (rec->flags & FTRACE_FL_FAILED) + continue; + + if (!ftrace_match_record(rec, search, len, type)) continue; entry = kmalloc(sizeof(*entry), GFP_KERNEL); @@ -2727,22 +2195,18 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd) return ret; } -static int ftrace_process_regex(struct ftrace_hash *hash, - char *buff, int len, int enable) +static int ftrace_process_regex(char *buff, int len, int enable) { char *func, *command, *next = buff; struct ftrace_func_command *p; - int ret; + int ret = -EINVAL; func = strsep(&next, ":"); if (!next) { - ret = ftrace_match_records(hash, func, len); - if (!ret) - ret = -EINVAL; - if (ret < 0) - return ret; - return 0; + if (ftrace_match_records(func, len, enable)) + return 0; + return ret; } /* command found */ @@ -2775,10 +2239,6 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, mutex_lock(&ftrace_regex_lock); - ret = -ENODEV; - if (unlikely(ftrace_disabled)) - goto out_unlock; - if (file->f_mode & FMODE_READ) { struct seq_file *m = file->private_data; iter = m->private; @@ -2790,7 +2250,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, if (read >= 0 && trace_parser_loaded(parser) && !trace_parser_cont(parser)) { - ret = ftrace_process_regex(iter->hash, parser->buffer, + ret = ftrace_process_regex(parser->buffer, parser->idx, enable); trace_parser_clear(parser); if (ret) @@ -2818,83 +2278,22 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, return ftrace_regex_write(file, ubuf, cnt, ppos, 0); } -static int -ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, - int reset, int enable) +static void +ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) { - struct ftrace_hash **orig_hash; - struct ftrace_hash *hash; - int ret; - - /* All global ops uses the global ops filters */ - if (ops->flags & FTRACE_OPS_FL_GLOBAL) - ops = &global_ops; - if (unlikely(ftrace_disabled)) - return -ENODEV; - - if (enable) - orig_hash = &ops->filter_hash; - else - orig_hash = &ops->notrace_hash; - - hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); - if (!hash) - return -ENOMEM; + return; mutex_lock(&ftrace_regex_lock); if (reset) - ftrace_filter_reset(hash); + ftrace_filter_reset(enable); if (buf) - ftrace_match_records(hash, buf, len); - - mutex_lock(&ftrace_lock); - ret = ftrace_hash_move(orig_hash, hash); - mutex_unlock(&ftrace_lock); - + ftrace_match_records(buf, len, enable); mutex_unlock(&ftrace_regex_lock); - - free_ftrace_hash(hash); - return ret; -} - -/** - * ftrace_set_filter - set a function to filter on in ftrace - * @ops - the ops to set the filter with - * @buf - the string that holds the function filter text. - * @len - the length of the string. - * @reset - non zero to reset all filters before applying this filter. - * - * Filters denote which functions should be enabled when tracing is enabled. - * If @buf is NULL and reset is set, all functions will be enabled for tracing. - */ -void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, - int len, int reset) -{ - ftrace_set_regex(ops, buf, len, reset, 1); } -EXPORT_SYMBOL_GPL(ftrace_set_filter); -/** - * ftrace_set_notrace - set a function to not trace in ftrace - * @ops - the ops to set the notrace filter with - * @buf - the string that holds the function notrace text. - * @len - the length of the string. - * @reset - non zero to reset all filters before applying this filter. - * - * Notrace Filters denote which functions should not be enabled when tracing - * is enabled. If @buf is NULL and reset is set, all functions will be enabled - * for tracing. - */ -void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, - int len, int reset) -{ - ftrace_set_regex(ops, buf, len, reset, 0); -} -EXPORT_SYMBOL_GPL(ftrace_set_notrace); /** * ftrace_set_filter - set a function to filter on in ftrace - * @ops - the ops to set the filter with * @buf - the string that holds the function filter text. * @len - the length of the string. * @reset - non zero to reset all filters before applying this filter. @@ -2902,15 +2301,13 @@ EXPORT_SYMBOL_GPL(ftrace_set_notrace); * Filters denote which functions should be enabled when tracing is enabled. * If @buf is NULL and reset is set, all functions will be enabled for tracing. */ -void ftrace_set_global_filter(unsigned char *buf, int len, int reset) +void ftrace_set_filter(unsigned char *buf, int len, int reset) { - ftrace_set_regex(&global_ops, buf, len, reset, 1); + ftrace_set_regex(buf, len, reset, 1); } -EXPORT_SYMBOL_GPL(ftrace_set_global_filter); /** * ftrace_set_notrace - set a function to not trace in ftrace - * @ops - the ops to set the notrace filter with * @buf - the string that holds the function notrace text. * @len - the length of the string. * @reset - non zero to reset all filters before applying this filter. @@ -2919,11 +2316,10 @@ EXPORT_SYMBOL_GPL(ftrace_set_global_filter); * is enabled. If @buf is NULL and reset is set, all functions will be enabled * for tracing. */ -void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) +void ftrace_set_notrace(unsigned char *buf, int len, int reset) { - ftrace_set_regex(&global_ops, buf, len, reset, 0); + ftrace_set_regex(buf, len, reset, 0); } -EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); /* * command line interface to allow users to set filters on boot up. @@ -2974,23 +2370,22 @@ static void __init set_ftrace_early_graph(char *buf) } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -static void __init -set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable) +static void __init set_ftrace_early_filter(char *buf, int enable) { char *func; while (buf) { func = strsep(&buf, ","); - ftrace_set_regex(ops, func, strlen(func), 0, enable); + ftrace_set_regex(func, strlen(func), 0, enable); } } static void __init set_ftrace_early_filters(void) { if (ftrace_filter_buf[0]) - set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1); + set_ftrace_early_filter(ftrace_filter_buf, 1); if (ftrace_notrace_buf[0]) - set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0); + set_ftrace_early_filter(ftrace_notrace_buf, 0); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (ftrace_graph_buf[0]) set_ftrace_early_graph(ftrace_graph_buf); @@ -2998,14 +2393,11 @@ static void __init set_ftrace_early_filters(void) } static int -ftrace_regex_release(struct inode *inode, struct file *file) +ftrace_regex_release(struct inode *inode, struct file *file, int enable) { struct seq_file *m = (struct seq_file *)file->private_data; struct ftrace_iterator *iter; - struct ftrace_hash **orig_hash; struct trace_parser *parser; - int filter_hash; - int ret; mutex_lock(&ftrace_regex_lock); if (file->f_mode & FMODE_READ) { @@ -3018,41 +2410,33 @@ ftrace_regex_release(struct inode *inode, struct file *file) parser = &iter->parser; if (trace_parser_loaded(parser)) { parser->buffer[parser->idx] = 0; - ftrace_match_records(iter->hash, parser->buffer, parser->idx); + ftrace_match_records(parser->buffer, parser->idx, enable); } - trace_parser_put(parser); - - if (file->f_mode & FMODE_WRITE) { - filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); - - if (filter_hash) - orig_hash = &iter->ops->filter_hash; - else - orig_hash = &iter->ops->notrace_hash; + mutex_lock(&ftrace_lock); + if (ftrace_start_up && ftrace_enabled) + ftrace_run_update_code(FTRACE_ENABLE_CALLS); + mutex_unlock(&ftrace_lock); - mutex_lock(&ftrace_lock); - /* - * Remove the current set, update the hash and add - * them back. - */ - ftrace_hash_rec_disable(iter->ops, filter_hash); - ret = ftrace_hash_move(orig_hash, iter->hash); - if (!ret) { - ftrace_hash_rec_enable(iter->ops, filter_hash); - if (iter->ops->flags & FTRACE_OPS_FL_ENABLED - && ftrace_enabled) - ftrace_run_update_code(FTRACE_ENABLE_CALLS); - } - mutex_unlock(&ftrace_lock); - } - free_ftrace_hash(iter->hash); + trace_parser_put(parser); kfree(iter); mutex_unlock(&ftrace_regex_lock); return 0; } +static int +ftrace_filter_release(struct inode *inode, struct file *file) +{ + return ftrace_regex_release(inode, file, 1); +} + +static int +ftrace_notrace_release(struct inode *inode, struct file *file) +{ + return ftrace_regex_release(inode, file, 0); +} + static const struct file_operations ftrace_avail_fops = { .open = ftrace_avail_open, .read = seq_read, @@ -3060,8 +2444,8 @@ static const struct file_operations ftrace_avail_fops = { .release = seq_release_private, }; -static const struct file_operations ftrace_enabled_fops = { - .open = ftrace_enabled_open, +static const struct file_operations ftrace_failures_fops = { + .open = ftrace_failures_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, @@ -3072,7 +2456,7 @@ static const struct file_operations ftrace_filter_fops = { .read = seq_read, .write = ftrace_filter_write, .llseek = ftrace_regex_lseek, - .release = ftrace_regex_release, + .release = ftrace_filter_release, }; static const struct file_operations ftrace_notrace_fops = { @@ -3080,7 +2464,7 @@ static const struct file_operations ftrace_notrace_fops = { .read = seq_read, .write = ftrace_notrace_write, .llseek = ftrace_regex_lseek, - .release = ftrace_regex_release, + .release = ftrace_notrace_release, }; #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -3189,6 +2573,9 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) bool exists; int i; + if (ftrace_disabled) + return -ENODEV; + /* decode regex */ type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) @@ -3197,18 +2584,12 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) search_len = strlen(search); mutex_lock(&ftrace_lock); - - if (unlikely(ftrace_disabled)) { - mutex_unlock(&ftrace_lock); - return -ENODEV; - } - do_for_each_ftrace_rec(pg, rec) { - if (rec->flags & FTRACE_FL_FREE) + if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) continue; - if (ftrace_match_record(rec, NULL, search, search_len, type)) { + if (ftrace_match_record(rec, search, search_len, type)) { /* if it is in the array */ exists = false; for (i = 0; i < *idx; i++) { @@ -3298,8 +2679,8 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) trace_create_file("available_filter_functions", 0444, d_tracer, NULL, &ftrace_avail_fops); - trace_create_file("enabled_functions", 0444, - d_tracer, NULL, &ftrace_enabled_fops); + trace_create_file("failures", 0444, + d_tracer, NULL, &ftrace_failures_fops); trace_create_file("set_ftrace_filter", 0644, d_tracer, NULL, &ftrace_filter_fops); @@ -3322,6 +2703,7 @@ static int ftrace_process_locs(struct module *mod, { unsigned long *p; unsigned long addr; + unsigned long flags; mutex_lock(&ftrace_lock); p = start; @@ -3338,7 +2720,10 @@ static int ftrace_process_locs(struct module *mod, ftrace_record_ip(addr); } + /* disable interrupts to prevent kstop machine */ + local_irq_save(flags); ftrace_update_code(mod); + local_irq_restore(flags); mutex_unlock(&ftrace_lock); return 0; @@ -3350,11 +2735,10 @@ void ftrace_release_mod(struct module *mod) struct dyn_ftrace *rec; struct ftrace_page *pg; - mutex_lock(&ftrace_lock); - if (ftrace_disabled) - goto out_unlock; + return; + mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { if (within_module_core(rec->ip, mod)) { /* @@ -3365,7 +2749,6 @@ void ftrace_release_mod(struct module *mod) ftrace_free_rec(rec); } } while_for_each_ftrace_rec(); - out_unlock: mutex_unlock(&ftrace_lock); } @@ -3452,10 +2835,6 @@ void __init ftrace_init(void) #else -static struct ftrace_ops global_ops = { - .func = ftrace_stub, -}; - static int __init ftrace_nodyn_init(void) { ftrace_enabled = 1; @@ -3466,38 +2845,12 @@ device_initcall(ftrace_nodyn_init); static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } static inline void ftrace_startup_enable(int command) { } /* Keep as macros so we do not need to define the commands */ -# define ftrace_startup(ops, command) do { } while (0) -# define ftrace_shutdown(ops, command) do { } while (0) +# define ftrace_startup(command) do { } while (0) +# define ftrace_shutdown(command) do { } while (0) # define ftrace_startup_sysctl() do { } while (0) # define ftrace_shutdown_sysctl() do { } while (0) - -static inline int -ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) -{ - return 1; -} - #endif /* CONFIG_DYNAMIC_FTRACE */ -static void -ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) -{ - struct ftrace_ops *op; - - /* - * Some of the ops may be dynamically allocated, - * they must be freed after a synchronize_sched(). - */ - preempt_disable_notrace(); - op = rcu_dereference_raw(ftrace_ops_list); - while (op != &ftrace_list_end) { - if (ftrace_ops_test(op, ip)) - op->func(ip, parent_ip); - op = rcu_dereference_raw(op->next); - }; - preempt_enable_notrace(); -} - static void clear_ftrace_swapper(void) { struct task_struct *p; @@ -3790,23 +3143,19 @@ void ftrace_kill(void) */ int register_ftrace_function(struct ftrace_ops *ops) { - int ret = -1; - - mutex_lock(&ftrace_lock); + int ret; if (unlikely(ftrace_disabled)) - goto out_unlock; + return -1; - ret = __register_ftrace_function(ops); - if (!ret) - ftrace_startup(ops, 0); + mutex_lock(&ftrace_lock); + ret = __register_ftrace_function(ops); + ftrace_startup(0); - out_unlock: mutex_unlock(&ftrace_lock); return ret; } -EXPORT_SYMBOL_GPL(register_ftrace_function); /** * unregister_ftrace_function - unregister a function for profiling. @@ -3820,27 +3169,25 @@ int unregister_ftrace_function(struct ftrace_ops *ops) mutex_lock(&ftrace_lock); ret = __unregister_ftrace_function(ops); - if (!ret) - ftrace_shutdown(ops, 0); + ftrace_shutdown(0); mutex_unlock(&ftrace_lock); return ret; } -EXPORT_SYMBOL_GPL(unregister_ftrace_function); int ftrace_enable_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - int ret = -ENODEV; - - mutex_lock(&ftrace_lock); + int ret; if (unlikely(ftrace_disabled)) - goto out; + return -ENODEV; + + mutex_lock(&ftrace_lock); - ret = proc_dointvec(table, write, buffer, lenp, ppos); + ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) goto out; @@ -3852,11 +3199,11 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, ftrace_startup_sysctl(); /* we are starting ftrace again */ - if (ftrace_ops_list != &ftrace_list_end) { - if (ftrace_ops_list->next == &ftrace_list_end) - ftrace_trace_function = ftrace_ops_list->func; + if (ftrace_list != &ftrace_list_end) { + if (ftrace_list->next == &ftrace_list_end) + ftrace_trace_function = ftrace_list->func; else - ftrace_trace_function = ftrace_ops_list_func; + ftrace_trace_function = ftrace_list_func; } } else { @@ -4045,7 +3392,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, ftrace_graph_return = retfunc; ftrace_graph_entry = entryfunc; - ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); + ftrace_startup(FTRACE_START_FUNC_RET); out: mutex_unlock(&ftrace_lock); @@ -4062,7 +3409,7 @@ void unregister_ftrace_graph(void) ftrace_graph_active--; ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; ftrace_graph_entry = ftrace_graph_entry_stub; - ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); + ftrace_shutdown(FTRACE_STOP_FUNC_RET); unregister_pm_notifier(&ftrace_suspend_notifier); unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index ee9c921d7f21..d38c16a06a6f 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -1110,7 +1110,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, entry->preempt_count = pc & 0xff; entry->pid = (tsk) ? tsk->pid : 0; - entry->padding = 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | @@ -2014,10 +2013,9 @@ enum print_line_t print_trace_line(struct trace_iterator *iter) { enum print_line_t ret; - if (iter->lost_events && - !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", - iter->cpu, iter->lost_events)) - return TRACE_TYPE_PARTIAL_LINE; + if (iter->lost_events) + trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", + iter->cpu, iter->lost_events); if (iter->trace && iter->trace->print_line) { ret = iter->trace->print_line(iter); @@ -3231,14 +3229,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, if (iter->seq.len >= cnt) break; - - /* - * Setting the full flag means we reached the trace_seq buffer - * size and we should leave by partial output condition above. - * One of the trace_seq_* functions is not used properly. - */ - WARN_ONCE(iter->seq.full, "full flag set for trace type %d", - iter->ent->type); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); diff --git a/trunk/kernel/trace/trace.h b/trunk/kernel/trace/trace.h index 6b69c4bd306f..5e9dfc6286dd 100644 --- a/trunk/kernel/trace/trace.h +++ b/trunk/kernel/trace/trace.h @@ -419,8 +419,6 @@ extern void trace_find_cmdline(int pid, char comm[]); extern unsigned long ftrace_update_tot_cnt; #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func extern int DYN_FTRACE_TEST_NAME(void); -#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 -extern int DYN_FTRACE_TEST_NAME2(void); #endif extern int ring_buffer_expanded; diff --git a/trunk/kernel/trace/trace_events.c b/trunk/kernel/trace/trace_events.c index 2fe110341359..e88f74fe1d4c 100644 --- a/trunk/kernel/trace/trace_events.c +++ b/trunk/kernel/trace/trace_events.c @@ -116,7 +116,6 @@ static int trace_define_common_fields(void) __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); - __common_field(int, padding); return ret; } diff --git a/trunk/kernel/trace/trace_functions.c b/trunk/kernel/trace/trace_functions.c index 8d0e1cc4e974..16aee4d44e8f 100644 --- a/trunk/kernel/trace/trace_functions.c +++ b/trunk/kernel/trace/trace_functions.c @@ -149,13 +149,11 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) static struct ftrace_ops trace_ops __read_mostly = { .func = function_trace_call, - .flags = FTRACE_OPS_FL_GLOBAL, }; static struct ftrace_ops trace_stack_ops __read_mostly = { .func = function_stack_trace_call, - .flags = FTRACE_OPS_FL_GLOBAL, }; /* Our two options */ diff --git a/trunk/kernel/trace/trace_irqsoff.c b/trunk/kernel/trace/trace_irqsoff.c index c77424be284d..a4969b47afc1 100644 --- a/trunk/kernel/trace/trace_irqsoff.c +++ b/trunk/kernel/trace/trace_irqsoff.c @@ -153,7 +153,6 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) static struct ftrace_ops trace_ops __read_mostly = { .func = irqsoff_tracer_call, - .flags = FTRACE_OPS_FL_GLOBAL, }; #endif /* CONFIG_FUNCTION_TRACER */ diff --git a/trunk/kernel/trace/trace_kprobe.c b/trunk/kernel/trace/trace_kprobe.c index f925c45f0afa..35d55a386145 100644 --- a/trunk/kernel/trace/trace_kprobe.c +++ b/trunk/kernel/trace/trace_kprobe.c @@ -53,6 +53,7 @@ const char *reserved_field_names[] = { "common_preempt_count", "common_pid", "common_tgid", + "common_lock_depth", FIELD_STRING_IP, FIELD_STRING_RETIP, FIELD_STRING_FUNC, diff --git a/trunk/kernel/trace/trace_output.c b/trunk/kernel/trace/trace_output.c index cf535ccedc86..456be9063c2d 100644 --- a/trunk/kernel/trace/trace_output.c +++ b/trunk/kernel/trace/trace_output.c @@ -830,9 +830,6 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_event); enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, struct trace_event *event) { - if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type)) - return TRACE_TYPE_PARTIAL_LINE; - return TRACE_TYPE_HANDLED; } diff --git a/trunk/kernel/trace/trace_printk.c b/trunk/kernel/trace/trace_printk.c index dff763b7baf1..2547d8813cf0 100644 --- a/trunk/kernel/trace/trace_printk.c +++ b/trunk/kernel/trace/trace_printk.c @@ -32,7 +32,7 @@ static DEFINE_MUTEX(btrace_mutex); struct trace_bprintk_fmt { struct list_head list; - const char *fmt; + char fmt[0]; }; static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) @@ -49,7 +49,6 @@ static void hold_module_trace_bprintk_format(const char **start, const char **end) { const char **iter; - char *fmt; mutex_lock(&btrace_mutex); for (iter = start; iter < end; iter++) { @@ -59,18 +58,14 @@ void hold_module_trace_bprintk_format(const char **start, const char **end) continue; } - tb_fmt = kmalloc(sizeof(*tb_fmt), GFP_KERNEL); - if (tb_fmt) - fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL); - if (tb_fmt && fmt) { + tb_fmt = kmalloc(offsetof(struct trace_bprintk_fmt, fmt) + + strlen(*iter) + 1, GFP_KERNEL); + if (tb_fmt) { list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list); - strcpy(fmt, *iter); - tb_fmt->fmt = fmt; + strcpy(tb_fmt->fmt, *iter); *iter = tb_fmt->fmt; - } else { - kfree(tb_fmt); + } else *iter = NULL; - } } mutex_unlock(&btrace_mutex); } @@ -89,76 +84,6 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self, return 0; } -/* - * The debugfs/tracing/printk_formats file maps the addresses with - * the ASCII formats that are used in the bprintk events in the - * buffer. For userspace tools to be able to decode the events from - * the buffer, they need to be able to map the address with the format. - * - * The addresses of the bprintk formats are in their own section - * __trace_printk_fmt. But for modules we copy them into a link list. - * The code to print the formats and their addresses passes around the - * address of the fmt string. If the fmt address passed into the seq - * functions is within the kernel core __trace_printk_fmt section, then - * it simply uses the next pointer in the list. - * - * When the fmt pointer is outside the kernel core __trace_printk_fmt - * section, then we need to read the link list pointers. The trick is - * we pass the address of the string to the seq function just like - * we do for the kernel core formats. To get back the structure that - * holds the format, we simply use containerof() and then go to the - * next format in the list. - */ -static const char ** -find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos) -{ - struct trace_bprintk_fmt *mod_fmt; - - if (list_empty(&trace_bprintk_fmt_list)) - return NULL; - - /* - * v will point to the address of the fmt record from t_next - * v will be NULL from t_start. - * If this is the first pointer or called from start - * then we need to walk the list. - */ - if (!v || start_index == *pos) { - struct trace_bprintk_fmt *p; - - /* search the module list */ - list_for_each_entry(p, &trace_bprintk_fmt_list, list) { - if (start_index == *pos) - return &p->fmt; - start_index++; - } - /* pos > index */ - return NULL; - } - - /* - * v points to the address of the fmt field in the mod list - * structure that holds the module print format. - */ - mod_fmt = container_of(v, typeof(*mod_fmt), fmt); - if (mod_fmt->list.next == &trace_bprintk_fmt_list) - return NULL; - - mod_fmt = container_of(mod_fmt->list.next, typeof(*mod_fmt), list); - - return &mod_fmt->fmt; -} - -static void format_mod_start(void) -{ - mutex_lock(&btrace_mutex); -} - -static void format_mod_stop(void) -{ - mutex_unlock(&btrace_mutex); -} - #else /* !CONFIG_MODULES */ __init static int module_trace_bprintk_format_notify(struct notifier_block *self, @@ -166,13 +91,6 @@ module_trace_bprintk_format_notify(struct notifier_block *self, { return 0; } -static inline const char ** -find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos) -{ - return NULL; -} -static inline void format_mod_start(void) { } -static inline void format_mod_stop(void) { } #endif /* CONFIG_MODULES */ @@ -235,33 +153,20 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) } EXPORT_SYMBOL_GPL(__ftrace_vprintk); -static const char **find_next(void *v, loff_t *pos) -{ - const char **fmt = v; - int start_index; - - if (!fmt) - fmt = __start___trace_bprintk_fmt + *pos; - - start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; - - if (*pos < start_index) - return fmt; - - return find_next_mod_format(start_index, v, fmt, pos); -} - static void * t_start(struct seq_file *m, loff_t *pos) { - format_mod_start(); - return find_next(NULL, pos); + const char **fmt = __start___trace_bprintk_fmt + *pos; + + if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt) + return NULL; + return fmt; } static void *t_next(struct seq_file *m, void * v, loff_t *pos) { (*pos)++; - return find_next(v, pos); + return t_start(m, pos); } static int t_show(struct seq_file *m, void *v) @@ -300,7 +205,6 @@ static int t_show(struct seq_file *m, void *v) static void t_stop(struct seq_file *m, void *p) { - format_mod_stop(); } static const struct seq_operations show_format_seq_ops = { diff --git a/trunk/kernel/trace/trace_sched_wakeup.c b/trunk/kernel/trace/trace_sched_wakeup.c index f029dd4fd2ca..7319559ed59f 100644 --- a/trunk/kernel/trace/trace_sched_wakeup.c +++ b/trunk/kernel/trace/trace_sched_wakeup.c @@ -129,7 +129,6 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) static struct ftrace_ops trace_ops __read_mostly = { .func = wakeup_tracer_call, - .flags = FTRACE_OPS_FL_GLOBAL, }; #endif /* CONFIG_FUNCTION_TRACER */ diff --git a/trunk/kernel/trace/trace_selftest.c b/trunk/kernel/trace/trace_selftest.c index 288541f977fb..659732eba07c 100644 --- a/trunk/kernel/trace/trace_selftest.c +++ b/trunk/kernel/trace/trace_selftest.c @@ -101,206 +101,6 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) #ifdef CONFIG_DYNAMIC_FTRACE -static int trace_selftest_test_probe1_cnt; -static void trace_selftest_test_probe1_func(unsigned long ip, - unsigned long pip) -{ - trace_selftest_test_probe1_cnt++; -} - -static int trace_selftest_test_probe2_cnt; -static void trace_selftest_test_probe2_func(unsigned long ip, - unsigned long pip) -{ - trace_selftest_test_probe2_cnt++; -} - -static int trace_selftest_test_probe3_cnt; -static void trace_selftest_test_probe3_func(unsigned long ip, - unsigned long pip) -{ - trace_selftest_test_probe3_cnt++; -} - -static int trace_selftest_test_global_cnt; -static void trace_selftest_test_global_func(unsigned long ip, - unsigned long pip) -{ - trace_selftest_test_global_cnt++; -} - -static int trace_selftest_test_dyn_cnt; -static void trace_selftest_test_dyn_func(unsigned long ip, - unsigned long pip) -{ - trace_selftest_test_dyn_cnt++; -} - -static struct ftrace_ops test_probe1 = { - .func = trace_selftest_test_probe1_func, -}; - -static struct ftrace_ops test_probe2 = { - .func = trace_selftest_test_probe2_func, -}; - -static struct ftrace_ops test_probe3 = { - .func = trace_selftest_test_probe3_func, -}; - -static struct ftrace_ops test_global = { - .func = trace_selftest_test_global_func, - .flags = FTRACE_OPS_FL_GLOBAL, -}; - -static void print_counts(void) -{ - printk("(%d %d %d %d %d) ", - trace_selftest_test_probe1_cnt, - trace_selftest_test_probe2_cnt, - trace_selftest_test_probe3_cnt, - trace_selftest_test_global_cnt, - trace_selftest_test_dyn_cnt); -} - -static void reset_counts(void) -{ - trace_selftest_test_probe1_cnt = 0; - trace_selftest_test_probe2_cnt = 0; - trace_selftest_test_probe3_cnt = 0; - trace_selftest_test_global_cnt = 0; - trace_selftest_test_dyn_cnt = 0; -} - -static int trace_selftest_ops(int cnt) -{ - int save_ftrace_enabled = ftrace_enabled; - struct ftrace_ops *dyn_ops; - char *func1_name; - char *func2_name; - int len1; - int len2; - int ret = -1; - - printk(KERN_CONT "PASSED\n"); - pr_info("Testing dynamic ftrace ops #%d: ", cnt); - - ftrace_enabled = 1; - reset_counts(); - - /* Handle PPC64 '.' name */ - func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); - func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); - len1 = strlen(func1_name); - len2 = strlen(func2_name); - - /* - * Probe 1 will trace function 1. - * Probe 2 will trace function 2. - * Probe 3 will trace functions 1 and 2. - */ - ftrace_set_filter(&test_probe1, func1_name, len1, 1); - ftrace_set_filter(&test_probe2, func2_name, len2, 1); - ftrace_set_filter(&test_probe3, func1_name, len1, 1); - ftrace_set_filter(&test_probe3, func2_name, len2, 0); - - register_ftrace_function(&test_probe1); - register_ftrace_function(&test_probe2); - register_ftrace_function(&test_probe3); - register_ftrace_function(&test_global); - - DYN_FTRACE_TEST_NAME(); - - print_counts(); - - if (trace_selftest_test_probe1_cnt != 1) - goto out; - if (trace_selftest_test_probe2_cnt != 0) - goto out; - if (trace_selftest_test_probe3_cnt != 1) - goto out; - if (trace_selftest_test_global_cnt == 0) - goto out; - - DYN_FTRACE_TEST_NAME2(); - - print_counts(); - - if (trace_selftest_test_probe1_cnt != 1) - goto out; - if (trace_selftest_test_probe2_cnt != 1) - goto out; - if (trace_selftest_test_probe3_cnt != 2) - goto out; - - /* Add a dynamic probe */ - dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); - if (!dyn_ops) { - printk("MEMORY ERROR "); - goto out; - } - - dyn_ops->func = trace_selftest_test_dyn_func; - - register_ftrace_function(dyn_ops); - - trace_selftest_test_global_cnt = 0; - - DYN_FTRACE_TEST_NAME(); - - print_counts(); - - if (trace_selftest_test_probe1_cnt != 2) - goto out_free; - if (trace_selftest_test_probe2_cnt != 1) - goto out_free; - if (trace_selftest_test_probe3_cnt != 3) - goto out_free; - if (trace_selftest_test_global_cnt == 0) - goto out; - if (trace_selftest_test_dyn_cnt == 0) - goto out_free; - - DYN_FTRACE_TEST_NAME2(); - - print_counts(); - - if (trace_selftest_test_probe1_cnt != 2) - goto out_free; - if (trace_selftest_test_probe2_cnt != 2) - goto out_free; - if (trace_selftest_test_probe3_cnt != 4) - goto out_free; - - ret = 0; - out_free: - unregister_ftrace_function(dyn_ops); - kfree(dyn_ops); - - out: - /* Purposely unregister in the same order */ - unregister_ftrace_function(&test_probe1); - unregister_ftrace_function(&test_probe2); - unregister_ftrace_function(&test_probe3); - unregister_ftrace_function(&test_global); - - /* Make sure everything is off */ - reset_counts(); - DYN_FTRACE_TEST_NAME(); - DYN_FTRACE_TEST_NAME(); - - if (trace_selftest_test_probe1_cnt || - trace_selftest_test_probe2_cnt || - trace_selftest_test_probe3_cnt || - trace_selftest_test_global_cnt || - trace_selftest_test_dyn_cnt) - ret = -1; - - ftrace_enabled = save_ftrace_enabled; - - return ret; -} - /* Test dynamic code modification and ftrace filters */ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, struct trace_array *tr, @@ -331,7 +131,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); /* filter only on our function */ - ftrace_set_global_filter(func_name, strlen(func_name), 1); + ftrace_set_filter(func_name, strlen(func_name), 1); /* enable tracing */ ret = tracer_init(trace, tr); @@ -366,30 +166,22 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, /* check the trace buffer */ ret = trace_test_buffer(tr, &count); + trace->reset(tr); tracing_start(); /* we should only have one item */ if (!ret && count != 1) { - trace->reset(tr); printk(KERN_CONT ".. filter failed count=%ld ..", count); ret = -1; goto out; } - /* Test the ops with global tracing running */ - ret = trace_selftest_ops(1); - trace->reset(tr); - out: ftrace_enabled = save_ftrace_enabled; tracer_enabled = save_tracer_enabled; /* Enable tracing on all functions again */ - ftrace_set_global_filter(NULL, 0, 1); - - /* Test the ops with global tracing off */ - if (!ret) - ret = trace_selftest_ops(2); + ftrace_set_filter(NULL, 0, 1); return ret; } diff --git a/trunk/kernel/trace/trace_selftest_dynamic.c b/trunk/kernel/trace/trace_selftest_dynamic.c index b4c475a0a48b..54dd77cce5bf 100644 --- a/trunk/kernel/trace/trace_selftest_dynamic.c +++ b/trunk/kernel/trace/trace_selftest_dynamic.c @@ -5,9 +5,3 @@ int DYN_FTRACE_TEST_NAME(void) /* used to call mcount */ return 0; } - -int DYN_FTRACE_TEST_NAME2(void) -{ - /* used to call mcount */ - return 0; -} diff --git a/trunk/kernel/trace/trace_stack.c b/trunk/kernel/trace/trace_stack.c index b0b53b8e4c25..4c5dead0c239 100644 --- a/trunk/kernel/trace/trace_stack.c +++ b/trunk/kernel/trace/trace_stack.c @@ -133,7 +133,6 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) static struct ftrace_ops trace_ops __read_mostly = { .func = stack_trace_call, - .flags = FTRACE_OPS_FL_GLOBAL, }; static ssize_t diff --git a/trunk/kernel/tracepoint.c b/trunk/kernel/tracepoint.c index b219f1449c54..68187af4889e 100644 --- a/trunk/kernel/tracepoint.c +++ b/trunk/kernel/tracepoint.c @@ -251,9 +251,9 @@ static void set_tracepoint(struct tracepoint_entry **entry, { WARN_ON(strcmp((*entry)->name, elem->name) != 0); - if (elem->regfunc && !jump_label_enabled(&elem->key) && active) + if (elem->regfunc && !elem->state && active) elem->regfunc(); - else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active) + else if (elem->unregfunc && elem->state && !active) elem->unregfunc(); /* @@ -264,10 +264,13 @@ static void set_tracepoint(struct tracepoint_entry **entry, * is used. */ rcu_assign_pointer(elem->funcs, (*entry)->funcs); - if (active && !jump_label_enabled(&elem->key)) - jump_label_inc(&elem->key); - else if (!active && jump_label_enabled(&elem->key)) - jump_label_dec(&elem->key); + if (!elem->state && active) { + jump_label_enable(&elem->state); + elem->state = active; + } else if (elem->state && !active) { + jump_label_disable(&elem->state); + elem->state = active; + } } /* @@ -278,11 +281,13 @@ static void set_tracepoint(struct tracepoint_entry **entry, */ static void disable_tracepoint(struct tracepoint *elem) { - if (elem->unregfunc && jump_label_enabled(&elem->key)) + if (elem->unregfunc && elem->state) elem->unregfunc(); - if (jump_label_enabled(&elem->key)) - jump_label_dec(&elem->key); + if (elem->state) { + jump_label_disable(&elem->state); + elem->state = 0; + } rcu_assign_pointer(elem->funcs, NULL); } diff --git a/trunk/kernel/watchdog.c b/trunk/kernel/watchdog.c index 14733d4d156b..140dce750450 100644 --- a/trunk/kernel/watchdog.c +++ b/trunk/kernel/watchdog.c @@ -430,12 +430,9 @@ static int watchdog_enable(int cpu) p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); if (IS_ERR(p)) { printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); - if (!err) { + if (!err) /* if hardlockup hasn't already set this */ err = PTR_ERR(p); - /* and disable the perf event */ - watchdog_nmi_disable(cpu); - } goto out; } kthread_bind(p, cpu); diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index e3378e8d3a5c..8859a41806dd 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -1291,14 +1291,8 @@ __acquires(&gcwq->lock) return true; spin_unlock_irq(&gcwq->lock); - /* - * We've raced with CPU hot[un]plug. Give it a breather - * and retry migration. cond_resched() is required here; - * otherwise, we might deadlock against cpu_stop trying to - * bring down the CPU on non-preemptive kernel. - */ + /* CPU has come up in between, retry migration */ cpu_relax(); - cond_resched(); } } diff --git a/trunk/lib/Kconfig.debug b/trunk/lib/Kconfig.debug index 9b1707b5f646..c768bcdda1b7 100644 --- a/trunk/lib/Kconfig.debug +++ b/trunk/lib/Kconfig.debug @@ -238,21 +238,6 @@ config DETECT_HUNG_TASK enabled then all held locks will also be reported. This feature has negligible overhead. -config DEFAULT_HUNG_TASK_TIMEOUT - int "Default timeout for hung task detection (in seconds)" - depends on DETECT_HUNG_TASK - default 120 - help - This option controls the default timeout (in seconds) used - to determine when a task has become non-responsive and should - be considered hung. - - It can be adjusted at runtime via the kernel.hung_task_timeout - sysctl or by writing a value to /proc/sys/kernel/hung_task_timeout. - - A timeout of 0 disables the check. The default is two minutes. - Keeping the default should be fine in most cases. - config BOOTPARAM_HUNG_TASK_PANIC bool "Panic (Reboot) On Hung Tasks" depends on DETECT_HUNG_TASK @@ -413,9 +398,9 @@ config SLUB_STATS config DEBUG_KMEMLEAK bool "Kernel memory leak detector" depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ - (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) + (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) - select DEBUG_FS + select DEBUG_FS if SYSFS select STACKTRACE if STACKTRACE_SUPPORT select KALLSYMS select CRC32 diff --git a/trunk/lib/Makefile b/trunk/lib/Makefile index 4b49a249064b..ef0f28571156 100644 --- a/trunk/lib/Makefile +++ b/trunk/lib/Makefile @@ -21,8 +21,7 @@ lib-y += kobject.o kref.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ - string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ - bsearch.o + string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o obj-y += kstrtox.o obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o diff --git a/trunk/lib/bsearch.c b/trunk/lib/bsearch.c deleted file mode 100644 index 5b54758e2afb..000000000000 --- a/trunk/lib/bsearch.c +++ /dev/null @@ -1,53 +0,0 @@ -/* - * A generic implementation of binary search for the Linux kernel - * - * Copyright (C) 2008-2009 Ksplice, Inc. - * Author: Tim Abbott - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; version 2. - */ - -#include -#include - -/* - * bsearch - binary search an array of elements - * @key: pointer to item being searched for - * @base: pointer to first element to search - * @num: number of elements - * @size: size of each element - * @cmp: pointer to comparison function - * - * This function does a binary search on the given array. The - * contents of the array should already be in ascending sorted order - * under the provided comparison function. - * - * Note that the key need not have the same type as the elements in - * the array, e.g. key could be a string and the comparison function - * could compare the string with the struct's name field. However, if - * the key and elements in the array are of the same type, you can use - * the same comparison function for both sort() and bsearch(). - */ -void *bsearch(const void *key, const void *base, size_t num, size_t size, - int (*cmp)(const void *key, const void *elt)) -{ - size_t start = 0, end = num; - int result; - - while (start < end) { - size_t mid = start + (end - start) / 2; - - result = cmp(key, base + mid * size); - if (result < 0) - end = mid; - else if (result > 0) - start = mid + 1; - else - return (void *)base + mid * size; - } - - return NULL; -} -EXPORT_SYMBOL(bsearch); diff --git a/trunk/lib/dma-debug.c b/trunk/lib/dma-debug.c index db07bfd9298e..4bfb0471f106 100644 --- a/trunk/lib/dma-debug.c +++ b/trunk/lib/dma-debug.c @@ -649,7 +649,7 @@ static int dma_debug_fs_init(void) return -ENOMEM; } -static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) +static int device_dma_allocations(struct device *dev) { struct dma_debug_entry *entry; unsigned long flags; @@ -660,10 +660,8 @@ static int device_dma_allocations(struct device *dev, struct dma_debug_entry **o for (i = 0; i < HASH_SIZE; ++i) { spin_lock(&dma_entry_hash[i].lock); list_for_each_entry(entry, &dma_entry_hash[i].list, list) { - if (entry->dev == dev) { + if (entry->dev == dev) count += 1; - *out_entry = entry; - } } spin_unlock(&dma_entry_hash[i].lock); } @@ -676,7 +674,6 @@ static int device_dma_allocations(struct device *dev, struct dma_debug_entry **o static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; - struct dma_debug_entry *uninitialized_var(entry); int count; if (global_disable) @@ -684,17 +681,12 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti switch (action) { case BUS_NOTIFY_UNBOUND_DRIVER: - count = device_dma_allocations(dev, &entry); + count = device_dma_allocations(dev); if (count == 0) break; - err_printk(dev, entry, "DMA-API: device driver has pending " + err_printk(dev, NULL, "DMA-API: device driver has pending " "DMA allocations while released from device " - "[count=%d]\n" - "One of leaked entries details: " - "[device address=0x%016llx] [size=%llu bytes] " - "[mapped with %s] [mapped as %s]\n", - count, entry->dev_addr, entry->size, - dir2name[entry->direction], type2name[entry->type]); + "[count=%d]\n", count); break; default: break; diff --git a/trunk/lib/flex_array.c b/trunk/lib/flex_array.c index 854b57bd7d9d..c0ea40ba2082 100644 --- a/trunk/lib/flex_array.c +++ b/trunk/lib/flex_array.c @@ -232,10 +232,10 @@ EXPORT_SYMBOL(flex_array_clear); /** * flex_array_prealloc - guarantee that array space exists - * @fa: the flex array for which to preallocate parts - * @start: index of first array element for which space is allocated - * @nr_elements: number of elements for which space is allocated - * @flags: page allocation flags + * @fa: the flex array for which to preallocate parts + * @start: index of first array element for which space is allocated + * @end: index of last (inclusive) element for which space is allocated + * @flags: page allocation flags * * This will guarantee that no future calls to flex_array_put() * will allocate memory. It can be used if you are expecting to @@ -245,24 +245,14 @@ EXPORT_SYMBOL(flex_array_clear); * Locking must be provided by the caller. */ int flex_array_prealloc(struct flex_array *fa, unsigned int start, - unsigned int nr_elements, gfp_t flags) + unsigned int end, gfp_t flags) { int start_part; int end_part; int part_nr; - unsigned int end; struct flex_array_part *part; - if (!start && !nr_elements) - return 0; - if (start >= fa->total_nr_elements) - return -ENOSPC; - if (!nr_elements) - return 0; - - end = start + nr_elements - 1; - - if (end >= fa->total_nr_elements) + if (start >= fa->total_nr_elements || end >= fa->total_nr_elements) return -ENOSPC; if (elements_fit_in_base(fa)) return 0; @@ -353,8 +343,6 @@ int flex_array_shrink(struct flex_array *fa) int part_nr; int ret = 0; - if (!fa->total_nr_elements) - return 0; if (elements_fit_in_base(fa)) return ret; for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) { diff --git a/trunk/lib/string.c b/trunk/lib/string.c index 01fad9b203e1..f71bead1be3e 100644 --- a/trunk/lib/string.c +++ b/trunk/lib/string.c @@ -535,35 +535,6 @@ bool sysfs_streq(const char *s1, const char *s2) } EXPORT_SYMBOL(sysfs_streq); -/** - * strtobool - convert common user inputs into boolean values - * @s: input string - * @res: result - * - * This routine returns 0 iff the first character is one of 'Yy1Nn0'. - * Otherwise it will return -EINVAL. Value pointed to by res is - * updated upon finding a match. - */ -int strtobool(const char *s, bool *res) -{ - switch (s[0]) { - case 'y': - case 'Y': - case '1': - *res = true; - break; - case 'n': - case 'N': - case '0': - *res = false; - break; - default: - return -EINVAL; - } - return 0; -} -EXPORT_SYMBOL(strtobool); - #ifndef __HAVE_ARCH_MEMSET /** * memset - Fill a region of memory with the given value diff --git a/trunk/lib/vsprintf.c b/trunk/lib/vsprintf.c index dfd60192bc2e..bc0ac6b333dc 100644 --- a/trunk/lib/vsprintf.c +++ b/trunk/lib/vsprintf.c @@ -797,7 +797,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr, return string(buf, end, uuid, spec); } -int kptr_restrict __read_mostly; +int kptr_restrict = 1; /* * Show a '%p' thing. A kernel extension is that the '%p' is followed diff --git a/trunk/lib/xz/xz_dec_lzma2.c b/trunk/lib/xz/xz_dec_lzma2.c index a6cdc969ea42..ea5fa4fe9d67 100644 --- a/trunk/lib/xz/xz_dec_lzma2.c +++ b/trunk/lib/xz/xz_dec_lzma2.c @@ -969,9 +969,6 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, */ tmp = b->in[b->in_pos++]; - if (tmp == 0x00) - return XZ_STREAM_END; - if (tmp >= 0xE0 || tmp == 0x01) { s->lzma2.need_props = true; s->lzma2.need_dict_reset = false; @@ -1004,6 +1001,9 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, lzma_reset(s); } } else { + if (tmp == 0x00) + return XZ_STREAM_END; + if (tmp > 0x02) return XZ_DATA_ERROR; diff --git a/trunk/mm/huge_memory.c b/trunk/mm/huge_memory.c index 83326ad66d9b..470dcda10add 100644 --- a/trunk/mm/huge_memory.c +++ b/trunk/mm/huge_memory.c @@ -1408,9 +1408,6 @@ int split_huge_page(struct page *page) return ret; } -#define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \ - VM_HUGETLB|VM_SHARED|VM_MAYSHARE) - int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { @@ -1419,7 +1416,11 @@ int hugepage_madvise(struct vm_area_struct *vma, /* * Be somewhat over-protective like KSM for now! */ - if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) + if (*vm_flags & (VM_HUGEPAGE | + VM_SHARED | VM_MAYSHARE | + VM_PFNMAP | VM_IO | VM_DONTEXPAND | + VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | + VM_MIXEDMAP | VM_SAO)) return -EINVAL; *vm_flags &= ~VM_NOHUGEPAGE; *vm_flags |= VM_HUGEPAGE; @@ -1435,7 +1436,11 @@ int hugepage_madvise(struct vm_area_struct *vma, /* * Be somewhat over-protective like KSM for now! */ - if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) + if (*vm_flags & (VM_NOHUGEPAGE | + VM_SHARED | VM_MAYSHARE | + VM_PFNMAP | VM_IO | VM_DONTEXPAND | + VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | + VM_MIXEDMAP | VM_SAO)) return -EINVAL; *vm_flags &= ~VM_HUGEPAGE; *vm_flags |= VM_NOHUGEPAGE; @@ -1569,14 +1574,10 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma) * page fault if needed. */ return 0; - if (vma->vm_ops) + if (vma->vm_file || vma->vm_ops) /* khugepaged not yet working on file or special mappings */ return 0; - /* - * If is_pfn_mapping() is true is_learn_pfn_mapping() must be - * true too, verify it here. - */ - VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); + VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (hstart < hend) @@ -1827,15 +1828,12 @@ static void collapse_huge_page(struct mm_struct *mm, (vma->vm_flags & VM_NOHUGEPAGE)) goto out; - if (!vma->anon_vma || vma->vm_ops) + /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ + if (!vma->anon_vma || vma->vm_ops || vma->vm_file) goto out; if (is_vma_temporary_stack(vma)) goto out; - /* - * If is_pfn_mapping() is true is_learn_pfn_mapping() must be - * true too, verify it here. - */ - VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); + VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) @@ -2068,16 +2066,13 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, progress++; continue; } - if (!vma->anon_vma || vma->vm_ops) + /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ + if (!vma->anon_vma || vma->vm_ops || vma->vm_file) goto skip; if (is_vma_temporary_stack(vma)) goto skip; - /* - * If is_pfn_mapping() is true is_learn_pfn_mapping() - * must be true too, verify it here. - */ - VM_BUG_ON(is_linear_pfn_mapping(vma) || - vma->vm_flags & VM_NO_THP); + + VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; diff --git a/trunk/mm/kmemleak.c b/trunk/mm/kmemleak.c index aacee45616fc..c1d5867543e4 100644 --- a/trunk/mm/kmemleak.c +++ b/trunk/mm/kmemleak.c @@ -1414,12 +1414,9 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) ++(*pos); list_for_each_continue_rcu(n, &object_list) { - struct kmemleak_object *obj = - list_entry(n, struct kmemleak_object, object_list); - if (get_object(obj)) { - next_obj = obj; + next_obj = list_entry(n, struct kmemleak_object, object_list); + if (get_object(next_obj)) break; - } } put_object(prev_obj); diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c index 61e66f026563..ce22a250926f 100644 --- a/trunk/mm/memory.c +++ b/trunk/mm/memory.c @@ -1359,7 +1359,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, */ mark_page_accessed(page); } - if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { + if (flags & FOLL_MLOCK) { /* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE @@ -1412,8 +1412,9 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) { - return stack_guard_page_start(vma, addr) || - stack_guard_page_end(vma, addr+PAGE_SIZE); + return (vma->vm_flags & VM_GROWSDOWN) && + (vma->vm_start == addr) && + !vma_stack_continue(vma->vm_prev, addr); } /** @@ -1550,6 +1551,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, continue; } + /* + * If we don't actually want the page itself, + * and it's the stack guard page, just skip it. + */ + if (!pages && stack_guard_page(vma, start)) + goto next_page; + do { struct page *page; unsigned int foll_flags = gup_flags; @@ -1566,11 +1574,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int ret; unsigned int fault_flags = 0; - /* For mlock, just skip the stack guard page. */ - if (foll_flags & FOLL_MLOCK) { - if (stack_guard_page(vma, start)) - goto next_page; - } if (foll_flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (nonblocking) @@ -3393,7 +3396,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, * run pte_offset_map on the pmd, if an huge pmd could * materialize from under us from a different thread. */ - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address)) + if (unlikely(__pte_alloc(mm, vma, pmd, address))) return VM_FAULT_OOM; /* if an huge pmd materialized from under us just retry later */ if (unlikely(pmd_trans_huge(*pmd))) diff --git a/trunk/mm/mlock.c b/trunk/mm/mlock.c index 516b2c2ddd5a..6b55e3efe0df 100644 --- a/trunk/mm/mlock.c +++ b/trunk/mm/mlock.c @@ -162,7 +162,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, VM_BUG_ON(end > vma->vm_end); VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - gup_flags = FOLL_TOUCH | FOLL_MLOCK; + gup_flags = FOLL_TOUCH; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW @@ -178,6 +178,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) gup_flags |= FOLL_FORCE; + if (vma->vm_flags & VM_LOCKED) + gup_flags |= FOLL_MLOCK; + return __get_user_pages(current, mm, addr, nr_pages, gup_flags, NULL, NULL, nonblocking); } diff --git a/trunk/mm/mmap.c b/trunk/mm/mmap.c index 772140c53ab1..e27e0cf0de03 100644 --- a/trunk/mm/mmap.c +++ b/trunk/mm/mmap.c @@ -1767,13 +1767,10 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) size = address - vma->vm_start; grow = (address - vma->vm_end) >> PAGE_SHIFT; - error = -ENOMEM; - if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { - error = acct_stack_growth(vma, size, grow); - if (!error) { - vma->vm_end = address; - perf_event_mmap(vma); - } + error = acct_stack_growth(vma, size, grow); + if (!error) { + vma->vm_end = address; + perf_event_mmap(vma); } } vma_unlock_anon_vma(vma); diff --git a/trunk/mm/oom_kill.c b/trunk/mm/oom_kill.c index f52e85c80e8d..83fb72c108b7 100644 --- a/trunk/mm/oom_kill.c +++ b/trunk/mm/oom_kill.c @@ -172,13 +172,10 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, /* * The baseline for the badness score is the proportion of RAM that each - * task's rss, pagetable and swap space use. + * task's rss and swap space use. */ - points = get_mm_rss(p->mm) + p->mm->nr_ptes; - points += get_mm_counter(p->mm, MM_SWAPENTS); - - points *= 1000; - points /= totalpages; + points = (get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS)) * 1000 / + totalpages; task_unlock(p); /* diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index 3f8bce264df6..9f8a97b9a350 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -2317,21 +2317,6 @@ void free_pages(unsigned long addr, unsigned int order) EXPORT_SYMBOL(free_pages); -static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) -{ - if (addr) { - unsigned long alloc_end = addr + (PAGE_SIZE << order); - unsigned long used = addr + PAGE_ALIGN(size); - - split_page(virt_to_page((void *)addr), order); - while (used < alloc_end) { - free_page(used); - used += PAGE_SIZE; - } - } - return (void *)addr; -} - /** * alloc_pages_exact - allocate an exact number physically-contiguous pages. * @size: the number of bytes to allocate @@ -2351,31 +2336,20 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) unsigned long addr; addr = __get_free_pages(gfp_mask, order); - return make_alloc_exact(addr, order, size); -} -EXPORT_SYMBOL(alloc_pages_exact); + if (addr) { + unsigned long alloc_end = addr + (PAGE_SIZE << order); + unsigned long used = addr + PAGE_ALIGN(size); -/** - * alloc_pages_exact_nid - allocate an exact number of physically-contiguous - * pages on a node. - * @nid: the preferred node ID where memory should be allocated - * @size: the number of bytes to allocate - * @gfp_mask: GFP flags for the allocation - * - * Like alloc_pages_exact(), but try to allocate on node nid first before falling - * back. - * Note this is not alloc_pages_exact_node() which allocates on a specific node, - * but is not exact. - */ -void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) -{ - unsigned order = get_order(size); - struct page *p = alloc_pages_node(nid, gfp_mask, order); - if (!p) - return NULL; - return make_alloc_exact((unsigned long)page_address(p), order, size); + split_page(virt_to_page((void *)addr), order); + while (used < alloc_end) { + free_page(used); + used += PAGE_SIZE; + } + } + + return (void *)addr; } -EXPORT_SYMBOL(alloc_pages_exact_nid); +EXPORT_SYMBOL(alloc_pages_exact); /** * free_pages_exact - release memory allocated via alloc_pages_exact() @@ -3590,7 +3564,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) if (!slab_is_available()) { zone->wait_table = (wait_queue_head_t *) - alloc_bootmem_node_nopanic(pgdat, alloc_size); + alloc_bootmem_node(pgdat, alloc_size); } else { /* * This case means that a zone whose size was 0 gets new memory @@ -4167,8 +4141,7 @@ static void __init setup_usemap(struct pglist_data *pgdat, unsigned long usemapsize = usemap_size(zonesize); zone->pageblock_flags = NULL; if (usemapsize) - zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, - usemapsize); + zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); } #else static inline void setup_usemap(struct pglist_data *pgdat, @@ -4334,7 +4307,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) size = (end - start) * sizeof(struct page); map = alloc_remap(pgdat->node_id, size); if (!map) - map = alloc_bootmem_node_nopanic(pgdat, size); + map = alloc_bootmem_node(pgdat, size); pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); } #ifndef CONFIG_NEED_MULTIPLE_NODES diff --git a/trunk/mm/page_cgroup.c b/trunk/mm/page_cgroup.c index 2daadc322ba6..99055010cece 100644 --- a/trunk/mm/page_cgroup.c +++ b/trunk/mm/page_cgroup.c @@ -134,7 +134,7 @@ static void *__init_refok alloc_page_cgroup(size_t size, int nid) { void *addr = NULL; - addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN); + addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_NOWARN); if (addr) return addr; diff --git a/trunk/mm/shmem.c b/trunk/mm/shmem.c index dfc7069102ee..8fa27e4e582a 100644 --- a/trunk/mm/shmem.c +++ b/trunk/mm/shmem.c @@ -852,7 +852,7 @@ static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) { - struct address_space *mapping; + struct inode *inode; unsigned long idx; unsigned long size; unsigned long limit; @@ -875,10 +875,8 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s if (size > SHMEM_NR_DIRECT) size = SHMEM_NR_DIRECT; offset = shmem_find_swp(entry, ptr, ptr+size); - if (offset >= 0) { - shmem_swp_balance_unmap(); + if (offset >= 0) goto found; - } if (!info->i_indirect) goto lost2; @@ -916,11 +914,11 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s if (size > ENTRIES_PER_PAGE) size = ENTRIES_PER_PAGE; offset = shmem_find_swp(entry, ptr, ptr+size); + shmem_swp_unmap(ptr); if (offset >= 0) { shmem_dir_unmap(dir); goto found; } - shmem_swp_unmap(ptr); } } lost1: @@ -930,7 +928,8 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s return 0; found: idx += offset; - ptr += offset; + inode = igrab(&info->vfs_inode); + spin_unlock(&info->lock); /* * Move _head_ to start search for next from here. @@ -941,18 +940,37 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s */ if (shmem_swaplist.next != &info->swaplist) list_move_tail(&shmem_swaplist, &info->swaplist); + mutex_unlock(&shmem_swaplist_mutex); + error = 1; + if (!inode) + goto out; /* - * We rely on shmem_swaplist_mutex, not only to protect the swaplist, - * but also to hold up shmem_evict_inode(): so inode cannot be freed - * beneath us (pagelock doesn't help until the page is in pagecache). + * Charge page using GFP_KERNEL while we can wait. + * Charged back to the user(not to caller) when swap account is used. + * add_to_page_cache() will be called with GFP_NOWAIT. */ - mapping = info->vfs_inode.i_mapping; - error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT); - /* which does mem_cgroup_uncharge_cache_page on error */ + error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); + if (error) + goto out; + error = radix_tree_preload(GFP_KERNEL); + if (error) { + mem_cgroup_uncharge_cache_page(page); + goto out; + } + error = 1; + + spin_lock(&info->lock); + ptr = shmem_swp_entry(info, idx, NULL); + if (ptr && ptr->val == entry.val) { + error = add_to_page_cache_locked(page, inode->i_mapping, + idx, GFP_NOWAIT); + /* does mem_cgroup_uncharge_cache_page on error */ + } else /* we must compensate for our precharge above */ + mem_cgroup_uncharge_cache_page(page); if (error == -EEXIST) { - struct page *filepage = find_get_page(mapping, idx); + struct page *filepage = find_get_page(inode->i_mapping, idx); error = 1; if (filepage) { /* @@ -972,8 +990,14 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s swap_free(entry); error = 1; /* not an error, but entry was found */ } - shmem_swp_unmap(ptr); + if (ptr) + shmem_swp_unmap(ptr); spin_unlock(&info->lock); + radix_tree_preload_end(); +out: + unlock_page(page); + page_cache_release(page); + iput(inode); /* allows for NULL */ return error; } @@ -985,26 +1009,6 @@ int shmem_unuse(swp_entry_t entry, struct page *page) struct list_head *p, *next; struct shmem_inode_info *info; int found = 0; - int error; - - /* - * Charge page using GFP_KERNEL while we can wait, before taking - * the shmem_swaplist_mutex which might hold up shmem_writepage(). - * Charged back to the user (not to caller) when swap account is used. - * add_to_page_cache() will be called with GFP_NOWAIT. - */ - error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); - if (error) - goto out; - /* - * Try to preload while we can wait, to not make a habit of - * draining atomic reserves; but don't latch on to this cpu, - * it's okay if sometimes we get rescheduled after this. - */ - error = radix_tree_preload(GFP_KERNEL); - if (error) - goto uncharge; - radix_tree_preload_end(); mutex_lock(&shmem_swaplist_mutex); list_for_each_safe(p, next, &shmem_swaplist) { @@ -1012,19 +1016,17 @@ int shmem_unuse(swp_entry_t entry, struct page *page) found = shmem_unuse_inode(info, entry, page); cond_resched(); if (found) - break; + goto out; } mutex_unlock(&shmem_swaplist_mutex); - -uncharge: - if (!found) - mem_cgroup_uncharge_cache_page(page); - if (found < 0) - error = found; -out: + /* + * Can some race bring us here? We've been holding page lock, + * so I think not; but would rather try again later than BUG() + */ unlock_page(page); page_cache_release(page); - return error; +out: + return (found < 0) ? found : 0; } /* @@ -1062,25 +1064,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) else swap.val = 0; - /* - * Add inode to shmem_unuse()'s list of swapped-out inodes, - * if it's not already there. Do it now because we cannot take - * mutex while holding spinlock, and must do so before the page - * is moved to swap cache, when its pagelock no longer protects - * the inode from eviction. But don't unlock the mutex until - * we've taken the spinlock, because shmem_unuse_inode() will - * prune a !swapped inode from the swaplist under both locks. - */ - if (swap.val) { - mutex_lock(&shmem_swaplist_mutex); - if (list_empty(&info->swaplist)) - list_add_tail(&info->swaplist, &shmem_swaplist); - } - spin_lock(&info->lock); - if (swap.val) - mutex_unlock(&shmem_swaplist_mutex); - if (index >= info->next_index) { BUG_ON(!(info->flags & SHMEM_TRUNCATE)); goto unlock; @@ -1100,10 +1084,21 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) delete_from_page_cache(page); shmem_swp_set(info, entry, swap.val); shmem_swp_unmap(entry); + if (list_empty(&info->swaplist)) + inode = igrab(inode); + else + inode = NULL; spin_unlock(&info->lock); swap_shmem_alloc(swap); BUG_ON(page_mapped(page)); swap_writepage(page, wbc); + if (inode) { + mutex_lock(&shmem_swaplist_mutex); + /* move instead of add in case we're racing */ + list_move_tail(&info->swaplist, &shmem_swaplist); + mutex_unlock(&shmem_swaplist_mutex); + iput(inode); + } return 0; } @@ -1405,14 +1400,20 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, if (sbinfo->max_blocks) { if (percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks) >= 0 || - shmem_acct_block(info->flags)) - goto nospace; + shmem_acct_block(info->flags)) { + spin_unlock(&info->lock); + error = -ENOSPC; + goto failed; + } percpu_counter_inc(&sbinfo->used_blocks); spin_lock(&inode->i_lock); inode->i_blocks += BLOCKS_PER_PAGE; spin_unlock(&inode->i_lock); - } else if (shmem_acct_block(info->flags)) - goto nospace; + } else if (shmem_acct_block(info->flags)) { + spin_unlock(&info->lock); + error = -ENOSPC; + goto failed; + } if (!filepage) { int ret; @@ -1492,24 +1493,6 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, error = 0; goto out; -nospace: - /* - * Perhaps the page was brought in from swap between find_lock_page - * and taking info->lock? We allow for that at add_to_page_cache_lru, - * but must also avoid reporting a spurious ENOSPC while working on a - * full tmpfs. (When filepage has been passed in to shmem_getpage, it - * is already in page cache, which prevents this race from occurring.) - */ - if (!filepage) { - struct page *page = find_get_page(mapping, idx); - if (page) { - spin_unlock(&info->lock); - page_cache_release(page); - goto repeat; - } - } - spin_unlock(&info->lock); - error = -ENOSPC; failed: if (*pagep != filepage) { unlock_page(filepage); diff --git a/trunk/mm/slub.c b/trunk/mm/slub.c index 9d2e5e46bf09..94d2a33a866e 100644 --- a/trunk/mm/slub.c +++ b/trunk/mm/slub.c @@ -1940,7 +1940,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, * Since this is without lock semantics the protection is only against * code executing on this cpu *not* from access by other cpus. */ - if (unlikely(!irqsafe_cpu_cmpxchg_double( + if (unlikely(!this_cpu_cmpxchg_double( s->cpu_slab->freelist, s->cpu_slab->tid, object, tid, get_freepointer(s, object), next_tid(tid)))) { @@ -2145,7 +2145,7 @@ static __always_inline void slab_free(struct kmem_cache *s, set_freepointer(s, object, c->freelist); #ifdef CONFIG_CMPXCHG_LOCAL - if (unlikely(!irqsafe_cpu_cmpxchg_double( + if (unlikely(!this_cpu_cmpxchg_double( s->cpu_slab->freelist, s->cpu_slab->tid, c->freelist, tid, object, next_tid(tid)))) { diff --git a/trunk/mm/swap.c b/trunk/mm/swap.c index 5602f1a1b1e7..a448db377cb0 100644 --- a/trunk/mm/swap.c +++ b/trunk/mm/swap.c @@ -396,9 +396,6 @@ static void lru_deactivate_fn(struct page *page, void *arg) if (!PageLRU(page)) return; - if (PageUnevictable(page)) - return; - /* Some processes are using the page */ if (page_mapped(page)) return; diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index 8bfd45050a61..f6b435c80079 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -937,7 +937,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, * back off and wait for congestion to clear because further reclaim * will encounter the same problem */ - if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc)) + if (nr_dirty == nr_congested && nr_dirty != 0) zone_set_flag(zone, ZONE_CONGESTED); free_page_list(&free_pages); diff --git a/trunk/net/8021q/vlan.c b/trunk/net/8021q/vlan.c index 0eb1a886b370..7850412f52b7 100644 --- a/trunk/net/8021q/vlan.c +++ b/trunk/net/8021q/vlan.c @@ -124,9 +124,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) grp->nr_vlans--; - if (vlan->flags & VLAN_FLAG_GVRP) - vlan_gvrp_request_leave(dev); - vlan_group_set_device(grp, vlan_id, NULL); if (!grp->killall) synchronize_net(); diff --git a/trunk/net/8021q/vlan_dev.c b/trunk/net/8021q/vlan_dev.c index b2ff6c8d3603..e34ea9e5e28b 100644 --- a/trunk/net/8021q/vlan_dev.c +++ b/trunk/net/8021q/vlan_dev.c @@ -487,6 +487,9 @@ static int vlan_dev_stop(struct net_device *dev) struct vlan_dev_info *vlan = vlan_dev_info(dev); struct net_device *real_dev = vlan->real_dev; + if (vlan->flags & VLAN_FLAG_GVRP) + vlan_gvrp_request_leave(dev); + dev_mc_unsync(real_dev, dev); dev_uc_unsync(real_dev, dev); if (dev->flags & IFF_ALLMULTI) diff --git a/trunk/net/9p/client.c b/trunk/net/9p/client.c index a9aa2dd66482..77367745be9b 100644 --- a/trunk/net/9p/client.c +++ b/trunk/net/9p/client.c @@ -614,7 +614,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) err = c->trans_mod->request(c, req); if (err < 0) { - if (err != -ERESTARTSYS && err != -EFAULT) + if (err != -ERESTARTSYS) c->status = Disconnected; goto reterr; } diff --git a/trunk/net/9p/protocol.c b/trunk/net/9p/protocol.c index a873277cb996..b58a501cf3d1 100644 --- a/trunk/net/9p/protocol.c +++ b/trunk/net/9p/protocol.c @@ -674,7 +674,6 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent, } strcpy(dirent->d_name, nameptr); - kfree(nameptr); out: return fake_pdu.offset; diff --git a/trunk/net/9p/trans_common.c b/trunk/net/9p/trans_common.c index 9a70ebdec56e..e883172f9aa2 100644 --- a/trunk/net/9p/trans_common.c +++ b/trunk/net/9p/trans_common.c @@ -63,7 +63,7 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len, int nr_pages, u8 rw) { uint32_t first_page_bytes = 0; - int32_t pdata_mapped_pages; + uint32_t pdata_mapped_pages; struct trans_rpage_info *rpinfo; *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1); @@ -75,9 +75,14 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len, rpinfo = req->tc->private; pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf, nr_pages, rw, &rpinfo->rp_data[0]); - if (pdata_mapped_pages <= 0) - return pdata_mapped_pages; + if (pdata_mapped_pages < 0) { + printk(KERN_ERR "get_user_pages_fast failed:%d udata:%p" + "nr_pages:%d\n", pdata_mapped_pages, + req->tc->pubuf, nr_pages); + pdata_mapped_pages = 0; + return -EIO; + } rpinfo->rp_nr_pages = pdata_mapped_pages; if (*pdata_off) { *pdata_len = first_page_bytes; diff --git a/trunk/net/bluetooth/hci_core.c b/trunk/net/bluetooth/hci_core.c index b5a8afc2be33..c83f618282f7 100644 --- a/trunk/net/bluetooth/hci_core.c +++ b/trunk/net/bluetooth/hci_core.c @@ -587,8 +587,10 @@ static int hci_dev_do_close(struct hci_dev *hdev) hci_req_cancel(hdev, ENODEV); hci_req_lock(hdev); + /* Stop timer, it might be running */ + del_timer_sync(&hdev->cmd_timer); + if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { - del_timer_sync(&hdev->cmd_timer); hci_req_unlock(hdev); return 0; } @@ -627,7 +629,6 @@ static int hci_dev_do_close(struct hci_dev *hdev) /* Drop last sent command */ if (hdev->sent_cmd) { - del_timer_sync(&hdev->cmd_timer); kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } diff --git a/trunk/net/bluetooth/hci_event.c b/trunk/net/bluetooth/hci_event.c index b2570159a044..cebe7588469f 100644 --- a/trunk/net/bluetooth/hci_event.c +++ b/trunk/net/bluetooth/hci_event.c @@ -2387,6 +2387,8 @@ static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *s if (!conn) goto unlock; + hci_conn_hold(conn); + conn->remote_cap = ev->capability; conn->remote_oob = ev->oob_data; conn->remote_auth = ev->authentication; diff --git a/trunk/net/bluetooth/l2cap_core.c b/trunk/net/bluetooth/l2cap_core.c index 2c8dd4494c63..ca27f3a41536 100644 --- a/trunk/net/bluetooth/l2cap_core.c +++ b/trunk/net/bluetooth/l2cap_core.c @@ -1051,7 +1051,6 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq) tx_skb = skb_clone(skb, GFP_ATOMIC); bt_cb(skb)->retries++; control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); - control &= L2CAP_CTRL_SAR; if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { control |= L2CAP_CTRL_FINAL; diff --git a/trunk/net/bridge/br_input.c b/trunk/net/bridge/br_input.c index 0c7badad62af..e2160792e1bc 100644 --- a/trunk/net/bridge/br_input.c +++ b/trunk/net/bridge/br_input.c @@ -164,7 +164,7 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb) goto drop; /* If STP is turned off, then forward */ - if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0) + if (p->br->stp_enabled == BR_NO_STP) goto forward; if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, diff --git a/trunk/net/bridge/br_netfilter.c b/trunk/net/bridge/br_netfilter.c index 74ef4d4846a4..f3bc322c5891 100644 --- a/trunk/net/bridge/br_netfilter.c +++ b/trunk/net/bridge/br_netfilter.c @@ -737,7 +737,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, nf_bridge->mask |= BRNF_PKT_TYPE; } - if (pf == PF_INET && br_parse_ip_options(skb)) + if (br_parse_ip_options(skb)) return NF_DROP; /* The physdev module checks on this */ diff --git a/trunk/net/bridge/netfilter/ebtables.c b/trunk/net/bridge/netfilter/ebtables.c index 1a92b369c820..893669caa8de 100644 --- a/trunk/net/bridge/netfilter/ebtables.c +++ b/trunk/net/bridge/netfilter/ebtables.c @@ -1766,7 +1766,7 @@ static int compat_table_info(const struct ebt_table_info *info, newinfo->entries_size = size; - xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries); + xt_compat_init_offsets(AF_INET, info->nentries); return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, entries, newinfo); } @@ -1882,7 +1882,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, struct xt_match *match; struct xt_target *wt; void *dst = NULL; - int off, pad = 0; + int off, pad = 0, ret = 0; unsigned int size_kern, entry_offset, match_size = mwt->match_size; strlcpy(name, mwt->u.name, sizeof(name)); @@ -1935,6 +1935,13 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, break; } + if (!dst) { + ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, + off + ebt_compat_entry_padsize()); + if (ret < 0) + return ret; + } + state->buf_kern_offset += match_size + off; state->buf_user_offset += match_size; pad = XT_ALIGN(size_kern) - size_kern; @@ -2009,6 +2016,50 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, return growth; } +#define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \ +({ \ + unsigned int __i; \ + int __ret = 0; \ + struct compat_ebt_entry_mwt *__watcher; \ + \ + for (__i = e->watchers_offset; \ + __i < (e)->target_offset; \ + __i += __watcher->watcher_size + \ + sizeof(struct compat_ebt_entry_mwt)) { \ + __watcher = (void *)(e) + __i; \ + __ret = fn(__watcher , ## args); \ + if (__ret != 0) \ + break; \ + } \ + if (__ret == 0) { \ + if (__i != (e)->target_offset) \ + __ret = -EINVAL; \ + } \ + __ret; \ +}) + +#define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \ +({ \ + unsigned int __i; \ + int __ret = 0; \ + struct compat_ebt_entry_mwt *__match; \ + \ + for (__i = sizeof(struct ebt_entry); \ + __i < (e)->watchers_offset; \ + __i += __match->match_size + \ + sizeof(struct compat_ebt_entry_mwt)) { \ + __match = (void *)(e) + __i; \ + __ret = fn(__match , ## args); \ + if (__ret != 0) \ + break; \ + } \ + if (__ret == 0) { \ + if (__i != (e)->watchers_offset) \ + __ret = -EINVAL; \ + } \ + __ret; \ +}) + /* called for all ebt_entry structures. */ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, unsigned int *total, @@ -2081,14 +2132,6 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, } } - if (state->buf_kern_start == NULL) { - unsigned int offset = buf_start - (char *) base; - - ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset); - if (ret < 0) - return ret; - } - startoff = state->buf_user_offset - startoff; BUG_ON(*total < startoff); @@ -2197,7 +2240,6 @@ static int compat_do_replace(struct net *net, void __user *user, xt_compat_lock(NFPROTO_BRIDGE); - xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); if (ret < 0) goto out_unlock; diff --git a/trunk/net/can/bcm.c b/trunk/net/can/bcm.c index 8a6a05e7c3c8..57b1aed79014 100644 --- a/trunk/net/can/bcm.c +++ b/trunk/net/can/bcm.c @@ -1427,14 +1427,9 @@ static int bcm_init(struct sock *sk) static int bcm_release(struct socket *sock) { struct sock *sk = sock->sk; - struct bcm_sock *bo; + struct bcm_sock *bo = bcm_sk(sk); struct bcm_op *op, *next; - if (sk == NULL) - return 0; - - bo = bcm_sk(sk); - /* remove bcm_ops, timer, rx_unregister(), etc. */ unregister_netdevice_notifier(&bo->notifier); diff --git a/trunk/net/can/raw.c b/trunk/net/can/raw.c index 0eb39a7fdf64..649acfa7c70a 100644 --- a/trunk/net/can/raw.c +++ b/trunk/net/can/raw.c @@ -305,12 +305,7 @@ static int raw_init(struct sock *sk) static int raw_release(struct socket *sock) { struct sock *sk = sock->sk; - struct raw_sock *ro; - - if (!sk) - return 0; - - ro = raw_sk(sk); + struct raw_sock *ro = raw_sk(sk); unregister_netdevice_notifier(&ro->notifier); diff --git a/trunk/net/ceph/messenger.c b/trunk/net/ceph/messenger.c index e15a82ccc05f..05f357828a2f 100644 --- a/trunk/net/ceph/messenger.c +++ b/trunk/net/ceph/messenger.c @@ -2267,19 +2267,6 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags) m->more_to_follow = false; m->pool = NULL; - /* middle */ - m->middle = NULL; - - /* data */ - m->nr_pages = 0; - m->page_alignment = 0; - m->pages = NULL; - m->pagelist = NULL; - m->bio = NULL; - m->bio_iter = NULL; - m->bio_seg = 0; - m->trail = NULL; - /* front */ if (front_len) { if (front_len > PAGE_CACHE_SIZE) { @@ -2299,6 +2286,19 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags) } m->front.iov_len = front_len; + /* middle */ + m->middle = NULL; + + /* data */ + m->nr_pages = 0; + m->page_alignment = 0; + m->pages = NULL; + m->pagelist = NULL; + m->bio = NULL; + m->bio_iter = NULL; + m->bio_seg = 0; + m->trail = NULL; + dout("ceph_msg_new %p front %d\n", m, front_len); return m; diff --git a/trunk/net/ceph/osd_client.c b/trunk/net/ceph/osd_client.c index 6b5dda1cb5df..5a80f41c0cba 100644 --- a/trunk/net/ceph/osd_client.c +++ b/trunk/net/ceph/osd_client.c @@ -470,8 +470,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, snapc, ops, use_mempool, GFP_NOFS, NULL, NULL); - if (!req) - return NULL; + if (IS_ERR(req)) + return req; /* calculate max write size */ calc_layout(osdc, vino, layout, off, plen, req, ops); diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c index b624fe4d9bd7..c2ac599fa0f6 100644 --- a/trunk/net/core/dev.c +++ b/trunk/net/core/dev.c @@ -1284,13 +1284,11 @@ static int dev_close_many(struct list_head *head) */ int dev_close(struct net_device *dev) { - if (dev->flags & IFF_UP) { - LIST_HEAD(single); + LIST_HEAD(single); - list_add(&dev->unreg_list, &single); - dev_close_many(&single); - list_del(&single); - } + list_add(&dev->unreg_list, &single); + dev_close_many(&single); + list_del(&single); return 0; } EXPORT_SYMBOL(dev_close); @@ -4775,7 +4773,7 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm * is never reached */ WARN_ON(1); - err = -ENOTTY; + err = -EINVAL; break; } @@ -5043,7 +5041,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) /* Set the per device memory buffer space. * Not applicable in our case */ case SIOCSIFLINK: - return -ENOTTY; + return -EINVAL; /* * Unknown or private ioctl. @@ -5064,7 +5062,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) /* Take care of Wireless Extensions */ if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) return wext_handle_ioctl(net, &ifr, cmd, arg); - return -ENOTTY; + return -EINVAL; } } @@ -5186,27 +5184,27 @@ u32 netdev_fix_features(struct net_device *dev, u32 features) /* Fix illegal checksum combinations */ if ((features & NETIF_F_HW_CSUM) && (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { - netdev_warn(dev, "mixed HW and IP checksum settings.\n"); + netdev_info(dev, "mixed HW and IP checksum settings.\n"); features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); } if ((features & NETIF_F_NO_CSUM) && (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { - netdev_warn(dev, "mixed no checksumming and other settings.\n"); + netdev_info(dev, "mixed no checksumming and other settings.\n"); features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); } /* Fix illegal SG+CSUM combinations. */ if ((features & NETIF_F_SG) && !(features & NETIF_F_ALL_CSUM)) { - netdev_dbg(dev, - "Dropping NETIF_F_SG since no checksum feature.\n"); + netdev_info(dev, + "Dropping NETIF_F_SG since no checksum feature.\n"); features &= ~NETIF_F_SG; } /* TSO requires that SG is present as well. */ if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { - netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); + netdev_info(dev, "Dropping TSO features since no SG feature.\n"); features &= ~NETIF_F_ALL_TSO; } @@ -5216,7 +5214,7 @@ u32 netdev_fix_features(struct net_device *dev, u32 features) /* Software GSO depends on SG. */ if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { - netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); + netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); features &= ~NETIF_F_GSO; } @@ -5226,13 +5224,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features) if (!((features & NETIF_F_GEN_CSUM) || (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { - netdev_dbg(dev, + netdev_info(dev, "Dropping NETIF_F_UFO since no checksum offload features.\n"); features &= ~NETIF_F_UFO; } if (!(features & NETIF_F_SG)) { - netdev_dbg(dev, + netdev_info(dev, "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); features &= ~NETIF_F_UFO; } @@ -5414,6 +5412,12 @@ int register_netdevice(struct net_device *dev) dev->features |= NETIF_F_SOFT_FEATURES; dev->wanted_features = dev->features & dev->hw_features; + /* Avoid warning from netdev_fix_features() for GSO without SG */ + if (!(dev->wanted_features & NETIF_F_SG)) { + dev->wanted_features &= ~NETIF_F_GSO; + dev->features &= ~NETIF_F_GSO; + } + /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, * vlan_dev_init() will do the dev->features check, so these features * are enabled only if supported by underlying device. diff --git a/trunk/net/dccp/options.c b/trunk/net/dccp/options.c index 4b2ab657ac8e..f06ffcfc8d71 100644 --- a/trunk/net/dccp/options.c +++ b/trunk/net/dccp/options.c @@ -123,8 +123,6 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, case DCCPO_CHANGE_L ... DCCPO_CONFIRM_R: if (pkt_type == DCCP_PKT_DATA) /* RFC 4340, 6 */ break; - if (len == 0) - goto out_invalid_option; rc = dccp_feat_parse_options(sk, dreq, mandatory, opt, *value, value + 1, len - 1); if (rc) diff --git a/trunk/net/dsa/Kconfig b/trunk/net/dsa/Kconfig index c53ded2a98df..87bb5f4de0e8 100644 --- a/trunk/net/dsa/Kconfig +++ b/trunk/net/dsa/Kconfig @@ -41,12 +41,12 @@ config NET_DSA_MV88E6XXX_NEED_PPU default n config NET_DSA_MV88E6131 - bool "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support" + bool "Marvell 88E6095/6095F/6131 ethernet switch chip support" select NET_DSA_MV88E6XXX select NET_DSA_MV88E6XXX_NEED_PPU select NET_DSA_TAG_DSA ---help--- - This enables support for the Marvell 88E6085/6095/6095F/6131 + This enables support for the Marvell 88E6095/6095F/6131 ethernet switch chips. config NET_DSA_MV88E6123_61_65 diff --git a/trunk/net/dsa/mv88e6131.c b/trunk/net/dsa/mv88e6131.c index 45f7411e90ba..3da418894efc 100644 --- a/trunk/net/dsa/mv88e6131.c +++ b/trunk/net/dsa/mv88e6131.c @@ -207,15 +207,8 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p) * mode, but do not enable forwarding of unknown unicasts. */ val = 0x0433; - if (p == dsa_upstream_port(ds)) { + if (p == dsa_upstream_port(ds)) val |= 0x0104; - /* - * On 6085, unknown multicast forward is controlled - * here rather than in Port Control 2 register. - */ - if (ps->id == ID_6085) - val |= 0x0008; - } if (ds->dsa_port_mask & (1 << p)) val |= 0x0100; REG_WRITE(addr, 0x04, val); @@ -258,19 +251,10 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p) * If this is the upstream port for this switch, enable * forwarding of unknown multicast addresses. */ - if (ps->id == ID_6085) - /* - * on 6085, bits 3:0 are reserved, bit 6 control ARP - * mirroring, and multicast forward is handled in - * Port Control register. - */ - REG_WRITE(addr, 0x08, 0x0080); - else { - val = 0x0080 | dsa_upstream_port(ds); - if (p == dsa_upstream_port(ds)) - val |= 0x0040; - REG_WRITE(addr, 0x08, val); - } + val = 0x0080 | dsa_upstream_port(ds); + if (p == dsa_upstream_port(ds)) + val |= 0x0040; + REG_WRITE(addr, 0x08, val); /* * Rate Control: disable ingress rate limiting. diff --git a/trunk/net/ipv4/devinet.c b/trunk/net/ipv4/devinet.c index cd9ca0811cfa..5345b0bee6df 100644 --- a/trunk/net/ipv4/devinet.c +++ b/trunk/net/ipv4/devinet.c @@ -1680,7 +1680,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf) return; cnf->sysctl = NULL; - unregister_net_sysctl_table(t->sysctl_header); + unregister_sysctl_table(t->sysctl_header); kfree(t->dev_name); kfree(t); } diff --git a/trunk/net/ipv4/fib_trie.c b/trunk/net/ipv4/fib_trie.c index 5fe9b8b41df3..e9013d6c1f51 100644 --- a/trunk/net/ipv4/fib_trie.c +++ b/trunk/net/ipv4/fib_trie.c @@ -1978,6 +1978,9 @@ struct fib_table *fib_trie_table(u32 id) t = (struct trie *) tb->tb_data; memset(t, 0, sizeof(*t)); + if (id == RT_TABLE_LOCAL) + pr_info("IPv4 FIB: Using LC-trie version %s\n", VERSION); + return tb; } diff --git a/trunk/net/ipv4/ip_fragment.c b/trunk/net/ipv4/ip_fragment.c index b1d282f11be7..a1151b8adf3c 100644 --- a/trunk/net/ipv4/ip_fragment.c +++ b/trunk/net/ipv4/ip_fragment.c @@ -223,30 +223,31 @@ static void ip_expire(unsigned long arg) if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { struct sk_buff *head = qp->q.fragments; - const struct iphdr *iph; - int err; rcu_read_lock(); head->dev = dev_get_by_index_rcu(net, qp->iif); if (!head->dev) goto out_rcu_unlock; - /* skb dst is stale, drop it, and perform route lookup again */ - skb_dst_drop(head); - iph = ip_hdr(head); - err = ip_route_input_noref(head, iph->daddr, iph->saddr, - iph->tos, head->dev); - if (err) - goto out_rcu_unlock; - /* - * Only an end host needs to send an ICMP - * "Fragment Reassembly Timeout" message, per RFC792. + * Only search router table for the head fragment, + * when defraging timeout at PRE_ROUTING HOOK. */ - if (qp->user == IP_DEFRAG_CONNTRACK_IN && - skb_rtable(head)->rt_type != RTN_LOCAL) - goto out_rcu_unlock; + if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) { + const struct iphdr *iph = ip_hdr(head); + int err = ip_route_input(head, iph->daddr, iph->saddr, + iph->tos, head->dev); + if (unlikely(err)) + goto out_rcu_unlock; + + /* + * Only an end host needs to send an ICMP + * "Fragment Reassembly Timeout" message, per RFC792. + */ + if (skb_rtable(head)->rt_type != RTN_LOCAL) + goto out_rcu_unlock; + } /* Send an ICMP "Fragment Reassembly Timeout" message. */ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index 99e6e4bb1c72..c1acf69858fd 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -2690,12 +2690,6 @@ static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) { } -static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst, - unsigned long old) -{ - return NULL; -} - static struct dst_ops ipv4_dst_blackhole_ops = { .family = AF_INET, .protocol = cpu_to_be16(ETH_P_IP), @@ -2704,7 +2698,6 @@ static struct dst_ops ipv4_dst_blackhole_ops = { .default_mtu = ipv4_blackhole_default_mtu, .default_advmss = ipv4_default_advmss, .update_pmtu = ipv4_rt_blackhole_update_pmtu, - .cow_metrics = ipv4_rt_blackhole_cow_metrics, }; struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig) diff --git a/trunk/net/ipv4/tcp_cubic.c b/trunk/net/ipv4/tcp_cubic.c index f376b05cca81..34340c9c95fa 100644 --- a/trunk/net/ipv4/tcp_cubic.c +++ b/trunk/net/ipv4/tcp_cubic.c @@ -93,7 +93,6 @@ struct bictcp { u32 ack_cnt; /* number of acks */ u32 tcp_cwnd; /* estimated tcp cwnd */ #define ACK_RATIO_SHIFT 4 -#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT) u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ u8 sample_cnt; /* number of samples to decide curr_rtt */ u8 found; /* the exit point is found? */ @@ -399,12 +398,8 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) u32 delay; if (icsk->icsk_ca_state == TCP_CA_Open) { - u32 ratio = ca->delayed_ack; - - ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT; - ratio += cnt; - - ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT); + cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; + ca->delayed_ack += cnt; } /* Some calls are for duplicates without timetamps */ diff --git a/trunk/net/ipv4/xfrm4_output.c b/trunk/net/ipv4/xfrm4_output.c index 2d51840e53a1..571aa96a175c 100644 --- a/trunk/net/ipv4/xfrm4_output.c +++ b/trunk/net/ipv4/xfrm4_output.c @@ -69,7 +69,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) } EXPORT_SYMBOL(xfrm4_prepare_output); -int xfrm4_output_finish(struct sk_buff *skb) +static int xfrm4_output_finish(struct sk_buff *skb) { #ifdef CONFIG_NETFILTER if (!skb_dst(skb)->xfrm) { @@ -86,11 +86,7 @@ int xfrm4_output_finish(struct sk_buff *skb) int xfrm4_output(struct sk_buff *skb) { - struct dst_entry *dst = skb_dst(skb); - struct xfrm_state *x = dst->xfrm; - return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, - NULL, dst->dev, - x->outer_mode->afinfo->output_finish, + NULL, skb_dst(skb)->dev, xfrm4_output_finish, !(IPCB(skb)->flags & IPSKB_REROUTED)); } diff --git a/trunk/net/ipv4/xfrm4_state.c b/trunk/net/ipv4/xfrm4_state.c index 805d63ef4340..1717c64628d1 100644 --- a/trunk/net/ipv4/xfrm4_state.c +++ b/trunk/net/ipv4/xfrm4_state.c @@ -78,7 +78,6 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = { .init_tempsel = __xfrm4_init_tempsel, .init_temprop = xfrm4_init_temprop, .output = xfrm4_output, - .output_finish = xfrm4_output_finish, .extract_input = xfrm4_extract_input, .extract_output = xfrm4_extract_output, .transport_finish = xfrm4_transport_finish, diff --git a/trunk/net/ipv6/addrconf.c b/trunk/net/ipv6/addrconf.c index a7bda0757053..1493534116df 100644 --- a/trunk/net/ipv6/addrconf.c +++ b/trunk/net/ipv6/addrconf.c @@ -4537,7 +4537,7 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p) t = p->sysctl; p->sysctl = NULL; - unregister_net_sysctl_table(t->sysctl_header); + unregister_sysctl_table(t->sysctl_header); kfree(t->dev_name); kfree(t); } diff --git a/trunk/net/ipv6/esp6.c b/trunk/net/ipv6/esp6.c index 59dccfbb5b11..5aa8ec88f194 100644 --- a/trunk/net/ipv6/esp6.c +++ b/trunk/net/ipv6/esp6.c @@ -371,7 +371,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) iv = esp_tmp_iv(aead, tmp, seqhilen); req = esp_tmp_req(aead, iv); asg = esp_req_sg(aead, req); - sg = asg + sglists; + sg = asg + 1; skb->ip_summed = CHECKSUM_NONE; diff --git a/trunk/net/ipv6/netfilter/ip6t_REJECT.c b/trunk/net/ipv6/netfilter/ip6t_REJECT.c index a5a4c5dd5396..28e74488a329 100644 --- a/trunk/net/ipv6/netfilter/ip6t_REJECT.c +++ b/trunk/net/ipv6/netfilter/ip6t_REJECT.c @@ -45,8 +45,6 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) int tcphoff, needs_ack; const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); struct ipv6hdr *ip6h; -#define DEFAULT_TOS_VALUE 0x0U - const __u8 tclass = DEFAULT_TOS_VALUE; struct dst_entry *dst = NULL; u8 proto; struct flowi6 fl6; @@ -126,7 +124,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) skb_put(nskb, sizeof(struct ipv6hdr)); skb_reset_network_header(nskb); ip6h = ipv6_hdr(nskb); - *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20)); + ip6h->version = 6; ip6h->hop_limit = ip6_dst_hoplimit(dst); ip6h->nexthdr = IPPROTO_TCP; ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr); diff --git a/trunk/net/ipv6/route.c b/trunk/net/ipv6/route.c index fd0eec6f88c6..843406f14d7b 100644 --- a/trunk/net/ipv6/route.c +++ b/trunk/net/ipv6/route.c @@ -153,12 +153,6 @@ static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) { } -static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst, - unsigned long old) -{ - return NULL; -} - static struct dst_ops ip6_dst_blackhole_ops = { .family = AF_INET6, .protocol = cpu_to_be16(ETH_P_IPV6), @@ -167,7 +161,6 @@ static struct dst_ops ip6_dst_blackhole_ops = { .default_mtu = ip6_blackhole_default_mtu, .default_advmss = ip6_default_advmss, .update_pmtu = ip6_rt_blackhole_update_pmtu, - .cow_metrics = ip6_rt_blackhole_cow_metrics, }; static const u32 ip6_template_metrics[RTAX_MAX] = { @@ -2019,6 +2012,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, rt->dst.output = ip6_output; rt->rt6i_dev = net->loopback_dev; rt->rt6i_idev = idev; + dst_metric_set(&rt->dst, RTAX_HOPLIMIT, -1); rt->dst.obsolete = -1; rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; diff --git a/trunk/net/ipv6/udp.c b/trunk/net/ipv6/udp.c index 9e305d74b3d4..15c37746845e 100644 --- a/trunk/net/ipv6/udp.c +++ b/trunk/net/ipv6/udp.c @@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && + if ((skb_headroom(skb) < frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out; diff --git a/trunk/net/ipv6/xfrm6_output.c b/trunk/net/ipv6/xfrm6_output.c index 49a91c5f5623..8e688b3de9ab 100644 --- a/trunk/net/ipv6/xfrm6_output.c +++ b/trunk/net/ipv6/xfrm6_output.c @@ -79,7 +79,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) } EXPORT_SYMBOL(xfrm6_prepare_output); -int xfrm6_output_finish(struct sk_buff *skb) +static int xfrm6_output_finish(struct sk_buff *skb) { #ifdef CONFIG_NETFILTER IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; @@ -97,9 +97,9 @@ static int __xfrm6_output(struct sk_buff *skb) if ((x && x->props.mode == XFRM_MODE_TUNNEL) && ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || dst_allfrag(skb_dst(skb)))) { - return ip6_fragment(skb, x->outer_mode->afinfo->output_finish); + return ip6_fragment(skb, xfrm6_output_finish); } - return x->outer_mode->afinfo->output_finish(skb); + return xfrm6_output_finish(skb); } int xfrm6_output(struct sk_buff *skb) diff --git a/trunk/net/ipv6/xfrm6_state.c b/trunk/net/ipv6/xfrm6_state.c index 248f0b2a7ee9..afe941e9415c 100644 --- a/trunk/net/ipv6/xfrm6_state.c +++ b/trunk/net/ipv6/xfrm6_state.c @@ -178,7 +178,6 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = { .tmpl_sort = __xfrm6_tmpl_sort, .state_sort = __xfrm6_state_sort, .output = xfrm6_output, - .output_finish = xfrm6_output_finish, .extract_input = xfrm6_extract_input, .extract_output = xfrm6_extract_output, .transport_finish = xfrm6_transport_finish, diff --git a/trunk/net/l2tp/l2tp_ip.c b/trunk/net/l2tp/l2tp_ip.c index 5c04f3e42704..fce9bd3bd3fe 100644 --- a/trunk/net/l2tp/l2tp_ip.c +++ b/trunk/net/l2tp/l2tp_ip.c @@ -667,7 +667,7 @@ MODULE_AUTHOR("James Chapman "); MODULE_DESCRIPTION("L2TP over IP"); MODULE_VERSION("1.0"); -/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like +/* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like * enums */ MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP); diff --git a/trunk/net/mac80211/cfg.c b/trunk/net/mac80211/cfg.c index 44049733c4ea..334213571ad0 100644 --- a/trunk/net/mac80211/cfg.c +++ b/trunk/net/mac80211/cfg.c @@ -1504,8 +1504,6 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode old_req; int err; - lockdep_assert_held(&sdata->u.mgd.mtx); - old_req = sdata->u.mgd.req_smps; sdata->u.mgd.req_smps = smps_mode; diff --git a/trunk/net/mac80211/debugfs_netdev.c b/trunk/net/mac80211/debugfs_netdev.c index 9ea7c0d0103f..dacace6b1393 100644 --- a/trunk/net/mac80211/debugfs_netdev.c +++ b/trunk/net/mac80211/debugfs_netdev.c @@ -177,9 +177,9 @@ static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata, if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; - mutex_lock(&sdata->u.mgd.mtx); + mutex_lock(&local->iflist_mtx); err = __ieee80211_request_smps(sdata, smps_mode); - mutex_unlock(&sdata->u.mgd.mtx); + mutex_unlock(&local->iflist_mtx); return err; } diff --git a/trunk/net/mac80211/tx.c b/trunk/net/mac80211/tx.c index bd1224fd216a..ce4596ed1268 100644 --- a/trunk/net/mac80211/tx.c +++ b/trunk/net/mac80211/tx.c @@ -237,10 +237,6 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) &local->dynamic_ps_disable_work); } - /* Don't restart the timer if we're not disassociated */ - if (!ifmgd->associated) - return TX_CONTINUE; - mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); diff --git a/trunk/net/netfilter/ipvs/ip_vs_app.c b/trunk/net/netfilter/ipvs/ip_vs_app.c index 059af3120be7..2dc6de13ac18 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_app.c +++ b/trunk/net/netfilter/ipvs/ip_vs_app.c @@ -572,11 +572,11 @@ static const struct file_operations ip_vs_app_fops = { .open = ip_vs_app_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_net, + .release = seq_release, }; #endif -int __net_init __ip_vs_app_init(struct net *net) +static int __net_init __ip_vs_app_init(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -585,17 +585,26 @@ int __net_init __ip_vs_app_init(struct net *net) return 0; } -void __net_exit __ip_vs_app_cleanup(struct net *net) +static void __net_exit __ip_vs_app_cleanup(struct net *net) { proc_net_remove(net, "ip_vs_app"); } +static struct pernet_operations ip_vs_app_ops = { + .init = __ip_vs_app_init, + .exit = __ip_vs_app_cleanup, +}; + int __init ip_vs_app_init(void) { - return 0; + int rv; + + rv = register_pernet_subsys(&ip_vs_app_ops); + return rv; } void ip_vs_app_cleanup(void) { + unregister_pernet_subsys(&ip_vs_app_ops); } diff --git a/trunk/net/netfilter/ipvs/ip_vs_conn.c b/trunk/net/netfilter/ipvs/ip_vs_conn.c index bf28ac2fc99b..c97bd45975be 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_conn.c +++ b/trunk/net/netfilter/ipvs/ip_vs_conn.c @@ -1046,7 +1046,7 @@ static const struct file_operations ip_vs_conn_fops = { .open = ip_vs_conn_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_net, + .release = seq_release, }; static const char *ip_vs_origin_name(unsigned flags) @@ -1114,7 +1114,7 @@ static const struct file_operations ip_vs_conn_sync_fops = { .open = ip_vs_conn_sync_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_net, + .release = seq_release, }; #endif @@ -1258,17 +1258,22 @@ int __net_init __ip_vs_conn_init(struct net *net) return 0; } -void __net_exit __ip_vs_conn_cleanup(struct net *net) +static void __net_exit __ip_vs_conn_cleanup(struct net *net) { /* flush all the connection entries first */ ip_vs_conn_flush(net); proc_net_remove(net, "ip_vs_conn"); proc_net_remove(net, "ip_vs_conn_sync"); } +static struct pernet_operations ipvs_conn_ops = { + .init = __ip_vs_conn_init, + .exit = __ip_vs_conn_cleanup, +}; int __init ip_vs_conn_init(void) { int idx; + int retc; /* Compute size and mask */ ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; @@ -1304,14 +1309,17 @@ int __init ip_vs_conn_init(void) rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); } + retc = register_pernet_subsys(&ipvs_conn_ops); + /* calculate the random value for connection hash */ get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd)); - return 0; + return retc; } void ip_vs_conn_cleanup(void) { + unregister_pernet_subsys(&ipvs_conn_ops); /* Release the empty cache */ kmem_cache_destroy(ip_vs_conn_cachep); vfree(ip_vs_conn_tab); diff --git a/trunk/net/netfilter/ipvs/ip_vs_core.c b/trunk/net/netfilter/ipvs/ip_vs_core.c index a74dae6c5dbc..07accf6b2401 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_core.c +++ b/trunk/net/netfilter/ipvs/ip_vs_core.c @@ -1113,9 +1113,6 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) return NF_ACCEPT; net = skb_net(skb); - if (!net_ipvs(net)->enable) - return NF_ACCEPT; - ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { @@ -1346,7 +1343,6 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) return NF_ACCEPT; /* The packet looks wrong, ignore */ net = skb_net(skb); - pd = ip_vs_proto_data_get(net, cih->protocol); if (!pd) return NF_ACCEPT; @@ -1533,11 +1529,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) IP_VS_DBG_ADDR(af, &iph.daddr), hooknum); return NF_ACCEPT; } - /* ipvs enabled in this netns ? */ - net = skb_net(skb); - if (!net_ipvs(net)->enable) - return NF_ACCEPT; - ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); /* Bad... Do not break raw sockets */ @@ -1571,6 +1562,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); } + net = skb_net(skb); /* Protocol supported? */ pd = ip_vs_proto_data_get(net, iph.protocol); if (unlikely(!pd)) @@ -1596,6 +1588,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) } IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); + net = skb_net(skb); ipvs = net_ipvs(net); /* Check the server status */ if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { @@ -1750,16 +1743,10 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb, int (*okfn)(struct sk_buff *)) { int r; - struct net *net; if (ip_hdr(skb)->protocol != IPPROTO_ICMP) return NF_ACCEPT; - /* ipvs enabled in this netns ? */ - net = skb_net(skb); - if (!net_ipvs(net)->enable) - return NF_ACCEPT; - return ip_vs_in_icmp(skb, &r, hooknum); } @@ -1770,16 +1757,10 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, int (*okfn)(struct sk_buff *)) { int r; - struct net *net; if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) return NF_ACCEPT; - /* ipvs enabled in this netns ? */ - net = skb_net(skb); - if (!net_ipvs(net)->enable) - return NF_ACCEPT; - return ip_vs_in_icmp_v6(skb, &r, hooknum); } #endif @@ -1903,70 +1884,19 @@ static int __net_init __ip_vs_init(struct net *net) pr_err("%s(): no memory.\n", __func__); return -ENOMEM; } - /* Hold the beast until a service is registerd */ - ipvs->enable = 0; ipvs->net = net; /* Counters used for creating unique names */ ipvs->gen = atomic_read(&ipvs_netns_cnt); atomic_inc(&ipvs_netns_cnt); net->ipvs = ipvs; - - if (__ip_vs_estimator_init(net) < 0) - goto estimator_fail; - - if (__ip_vs_control_init(net) < 0) - goto control_fail; - - if (__ip_vs_protocol_init(net) < 0) - goto protocol_fail; - - if (__ip_vs_app_init(net) < 0) - goto app_fail; - - if (__ip_vs_conn_init(net) < 0) - goto conn_fail; - - if (__ip_vs_sync_init(net) < 0) - goto sync_fail; - printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n", sizeof(struct netns_ipvs), ipvs->gen); return 0; -/* - * Error handling - */ - -sync_fail: - __ip_vs_conn_cleanup(net); -conn_fail: - __ip_vs_app_cleanup(net); -app_fail: - __ip_vs_protocol_cleanup(net); -protocol_fail: - __ip_vs_control_cleanup(net); -control_fail: - __ip_vs_estimator_cleanup(net); -estimator_fail: - return -ENOMEM; } static void __net_exit __ip_vs_cleanup(struct net *net) { - __ip_vs_service_cleanup(net); /* ip_vs_flush() with locks */ - __ip_vs_conn_cleanup(net); - __ip_vs_app_cleanup(net); - __ip_vs_protocol_cleanup(net); - __ip_vs_control_cleanup(net); - __ip_vs_estimator_cleanup(net); - IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen); -} - -static void __net_exit __ip_vs_dev_cleanup(struct net *net) -{ - EnterFunction(2); - net_ipvs(net)->enable = 0; /* Disable packet reception */ - __ip_vs_sync_cleanup(net); - LeaveFunction(2); + IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen); } static struct pernet_operations ipvs_core_ops = { @@ -1976,10 +1906,6 @@ static struct pernet_operations ipvs_core_ops = { .size = sizeof(struct netns_ipvs), }; -static struct pernet_operations ipvs_core_dev_ops = { - .exit = __ip_vs_dev_cleanup, -}; - /* * Initialize IP Virtual Server */ @@ -1987,6 +1913,10 @@ static int __init ip_vs_init(void) { int ret; + ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */ + if (ret < 0) + return ret; + ip_vs_estimator_init(); ret = ip_vs_control_init(); if (ret < 0) { @@ -2014,28 +1944,15 @@ static int __init ip_vs_init(void) goto cleanup_conn; } - ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */ - if (ret < 0) - goto cleanup_sync; - - ret = register_pernet_device(&ipvs_core_dev_ops); - if (ret < 0) - goto cleanup_sub; - ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); if (ret < 0) { pr_err("can't register hooks.\n"); - goto cleanup_dev; + goto cleanup_sync; } pr_info("ipvs loaded.\n"); - return ret; -cleanup_dev: - unregister_pernet_device(&ipvs_core_dev_ops); -cleanup_sub: - unregister_pernet_subsys(&ipvs_core_ops); cleanup_sync: ip_vs_sync_cleanup(); cleanup_conn: @@ -2047,20 +1964,20 @@ static int __init ip_vs_init(void) ip_vs_control_cleanup(); cleanup_estimator: ip_vs_estimator_cleanup(); + unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ return ret; } static void __exit ip_vs_cleanup(void) { nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); - unregister_pernet_device(&ipvs_core_dev_ops); - unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ ip_vs_sync_cleanup(); ip_vs_conn_cleanup(); ip_vs_app_cleanup(); ip_vs_protocol_cleanup(); ip_vs_control_cleanup(); ip_vs_estimator_cleanup(); + unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ pr_info("ipvs unloaded.\n"); } diff --git a/trunk/net/netfilter/ipvs/ip_vs_ctl.c b/trunk/net/netfilter/ipvs/ip_vs_ctl.c index 37890f228b19..ae47090bf45f 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_ctl.c +++ b/trunk/net/netfilter/ipvs/ip_vs_ctl.c @@ -69,11 +69,6 @@ int ip_vs_get_debug_level(void) } #endif - -/* Protos */ -static void __ip_vs_del_service(struct ip_vs_service *svc); - - #ifdef CONFIG_IP_VS_IPV6 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */ static int __ip_vs_addr_is_local_v6(struct net *net, @@ -1219,8 +1214,6 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, write_unlock_bh(&__ip_vs_svc_lock); *svc_p = svc; - /* Now there is a service - full throttle */ - ipvs->enable = 1; return 0; @@ -1479,84 +1472,6 @@ static int ip_vs_flush(struct net *net) return 0; } -/* - * Delete service by {netns} in the service table. - * Called by __ip_vs_cleanup() - */ -void __ip_vs_service_cleanup(struct net *net) -{ - EnterFunction(2); - /* Check for "full" addressed entries */ - mutex_lock(&__ip_vs_mutex); - ip_vs_flush(net); - mutex_unlock(&__ip_vs_mutex); - LeaveFunction(2); -} -/* - * Release dst hold by dst_cache - */ -static inline void -__ip_vs_dev_reset(struct ip_vs_dest *dest, struct net_device *dev) -{ - spin_lock_bh(&dest->dst_lock); - if (dest->dst_cache && dest->dst_cache->dev == dev) { - IP_VS_DBG_BUF(3, "Reset dev:%s dest %s:%u ,dest->refcnt=%d\n", - dev->name, - IP_VS_DBG_ADDR(dest->af, &dest->addr), - ntohs(dest->port), - atomic_read(&dest->refcnt)); - ip_vs_dst_reset(dest); - } - spin_unlock_bh(&dest->dst_lock); - -} -/* - * Netdev event receiver - * Currently only NETDEV_UNREGISTER is handled, i.e. if we hold a reference to - * a device that is "unregister" it must be released. - */ -static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, - void *ptr) -{ - struct net_device *dev = ptr; - struct net *net = dev_net(dev); - struct ip_vs_service *svc; - struct ip_vs_dest *dest; - unsigned int idx; - - if (event != NETDEV_UNREGISTER) - return NOTIFY_DONE; - IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name); - EnterFunction(2); - mutex_lock(&__ip_vs_mutex); - for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { - list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { - if (net_eq(svc->net, net)) { - list_for_each_entry(dest, &svc->destinations, - n_list) { - __ip_vs_dev_reset(dest, dev); - } - } - } - - list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { - if (net_eq(svc->net, net)) { - list_for_each_entry(dest, &svc->destinations, - n_list) { - __ip_vs_dev_reset(dest, dev); - } - } - - } - } - - list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) { - __ip_vs_dev_reset(dest, dev); - } - mutex_unlock(&__ip_vs_mutex); - LeaveFunction(2); - return NOTIFY_DONE; -} /* * Zero counters in a service or all services @@ -2066,7 +1981,7 @@ static const struct file_operations ip_vs_info_fops = { .open = ip_vs_info_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_net, + .release = seq_release_private, }; #endif @@ -2109,7 +2024,7 @@ static const struct file_operations ip_vs_stats_fops = { .open = ip_vs_stats_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = single_release_net, + .release = single_release, }; static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v) @@ -2178,7 +2093,7 @@ static const struct file_operations ip_vs_stats_percpu_fops = { .open = ip_vs_stats_percpu_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = single_release_net, + .release = single_release, }; #endif @@ -3673,10 +3588,6 @@ void __net_init __ip_vs_control_cleanup_sysctl(struct net *net) { } #endif -static struct notifier_block ip_vs_dst_notifier = { - .notifier_call = ip_vs_dst_event, -}; - int __net_init __ip_vs_control_init(struct net *net) { int idx; @@ -3715,7 +3626,7 @@ int __net_init __ip_vs_control_init(struct net *net) return -ENOMEM; } -void __net_exit __ip_vs_control_cleanup(struct net *net) +static void __net_exit __ip_vs_control_cleanup(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -3728,6 +3639,11 @@ void __net_exit __ip_vs_control_cleanup(struct net *net) free_percpu(ipvs->tot_stats.cpustats); } +static struct pernet_operations ipvs_control_ops = { + .init = __ip_vs_control_init, + .exit = __ip_vs_control_cleanup, +}; + int __init ip_vs_control_init(void) { int idx; @@ -3741,32 +3657,33 @@ int __init ip_vs_control_init(void) INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); } + ret = register_pernet_subsys(&ipvs_control_ops); + if (ret) { + pr_err("cannot register namespace.\n"); + goto err; + } + smp_wmb(); /* Do we really need it now ? */ ret = nf_register_sockopt(&ip_vs_sockopts); if (ret) { pr_err("cannot register sockopt.\n"); - goto err_sock; + goto err_net; } ret = ip_vs_genl_register(); if (ret) { pr_err("cannot register Generic Netlink interface.\n"); - goto err_genl; + nf_unregister_sockopt(&ip_vs_sockopts); + goto err_net; } - ret = register_netdevice_notifier(&ip_vs_dst_notifier); - if (ret < 0) - goto err_notf; - LeaveFunction(2); return 0; -err_notf: - ip_vs_genl_unregister(); -err_genl: - nf_unregister_sockopt(&ip_vs_sockopts); -err_sock: +err_net: + unregister_pernet_subsys(&ipvs_control_ops); +err: return ret; } @@ -3774,6 +3691,7 @@ int __init ip_vs_control_init(void) void ip_vs_control_cleanup(void) { EnterFunction(2); + unregister_pernet_subsys(&ipvs_control_ops); ip_vs_genl_unregister(); nf_unregister_sockopt(&ip_vs_sockopts); LeaveFunction(2); diff --git a/trunk/net/netfilter/ipvs/ip_vs_est.c b/trunk/net/netfilter/ipvs/ip_vs_est.c index 508cce98777c..8c8766ca56ad 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_est.c +++ b/trunk/net/netfilter/ipvs/ip_vs_est.c @@ -192,7 +192,7 @@ void ip_vs_read_estimator(struct ip_vs_stats_user *dst, dst->outbps = (e->outbps + 0xF) >> 5; } -int __net_init __ip_vs_estimator_init(struct net *net) +static int __net_init __ip_vs_estimator_init(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -203,16 +203,24 @@ int __net_init __ip_vs_estimator_init(struct net *net) return 0; } -void __net_exit __ip_vs_estimator_cleanup(struct net *net) +static void __net_exit __ip_vs_estimator_exit(struct net *net) { del_timer_sync(&net_ipvs(net)->est_timer); } +static struct pernet_operations ip_vs_app_ops = { + .init = __ip_vs_estimator_init, + .exit = __ip_vs_estimator_exit, +}; int __init ip_vs_estimator_init(void) { - return 0; + int rv; + + rv = register_pernet_subsys(&ip_vs_app_ops); + return rv; } void ip_vs_estimator_cleanup(void) { + unregister_pernet_subsys(&ip_vs_app_ops); } diff --git a/trunk/net/netfilter/ipvs/ip_vs_proto.c b/trunk/net/netfilter/ipvs/ip_vs_proto.c index eb86028536fc..17484a4416ef 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_proto.c +++ b/trunk/net/netfilter/ipvs/ip_vs_proto.c @@ -316,7 +316,7 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, /* * per network name-space init */ -int __net_init __ip_vs_protocol_init(struct net *net) +static int __net_init __ip_vs_protocol_init(struct net *net) { #ifdef CONFIG_IP_VS_PROTO_TCP register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); @@ -336,7 +336,7 @@ int __net_init __ip_vs_protocol_init(struct net *net) return 0; } -void __net_exit __ip_vs_protocol_cleanup(struct net *net) +static void __net_exit __ip_vs_protocol_cleanup(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_proto_data *pd; @@ -349,6 +349,11 @@ void __net_exit __ip_vs_protocol_cleanup(struct net *net) } } +static struct pernet_operations ipvs_proto_ops = { + .init = __ip_vs_protocol_init, + .exit = __ip_vs_protocol_cleanup, +}; + int __init ip_vs_protocol_init(void) { char protocols[64]; @@ -377,6 +382,7 @@ int __init ip_vs_protocol_init(void) REGISTER_PROTOCOL(&ip_vs_protocol_esp); #endif pr_info("Registered protocols (%s)\n", &protocols[2]); + return register_pernet_subsys(&ipvs_proto_ops); return 0; } @@ -387,6 +393,7 @@ void ip_vs_protocol_cleanup(void) struct ip_vs_protocol *pp; int i; + unregister_pernet_subsys(&ipvs_proto_ops); /* unregister all the ipvs protocols */ for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { while ((pp = ip_vs_proto_table[i]) != NULL) diff --git a/trunk/net/netfilter/ipvs/ip_vs_sync.c b/trunk/net/netfilter/ipvs/ip_vs_sync.c index e292e5bddc70..3e7961e85e9c 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_sync.c +++ b/trunk/net/netfilter/ipvs/ip_vs_sync.c @@ -1303,18 +1303,13 @@ static struct socket *make_send_sock(struct net *net) struct socket *sock; int result; - /* First create a socket move it to right name space later */ - result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); + /* First create a socket */ + result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); return ERR_PTR(result); } - /* - * Kernel sockets that are a part of a namespace, should not - * hold a reference to a namespace in order to allow to stop it. - * After sk_change_net should be released using sk_release_kernel. - */ - sk_change_net(sock->sk, net); + result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn); if (result < 0) { pr_err("Error setting outbound mcast interface\n"); @@ -1339,8 +1334,8 @@ static struct socket *make_send_sock(struct net *net) return sock; -error: - sk_release_kernel(sock->sk); + error: + sock_release(sock); return ERR_PTR(result); } @@ -1355,17 +1350,12 @@ static struct socket *make_receive_sock(struct net *net) int result; /* First create a socket */ - result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); + result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); return ERR_PTR(result); } - /* - * Kernel sockets that are a part of a namespace, should not - * hold a reference to a namespace in order to allow to stop it. - * After sk_change_net should be released using sk_release_kernel. - */ - sk_change_net(sock->sk, net); + /* it is equivalent to the REUSEADDR option in user-space */ sock->sk->sk_reuse = 1; @@ -1387,8 +1377,8 @@ static struct socket *make_receive_sock(struct net *net) return sock; -error: - sk_release_kernel(sock->sk); + error: + sock_release(sock); return ERR_PTR(result); } @@ -1483,7 +1473,7 @@ static int sync_thread_master(void *data) ip_vs_sync_buff_release(sb); /* release the sending multicast socket */ - sk_release_kernel(tinfo->sock->sk); + sock_release(tinfo->sock); kfree(tinfo); return 0; @@ -1523,7 +1513,7 @@ static int sync_thread_backup(void *data) } /* release the sending multicast socket */ - sk_release_kernel(tinfo->sock->sk); + sock_release(tinfo->sock); kfree(tinfo->buf); kfree(tinfo); @@ -1611,7 +1601,7 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) outbuf: kfree(buf); outsocket: - sk_release_kernel(sock->sk); + sock_release(sock); out: return result; } @@ -1620,7 +1610,6 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) int stop_sync_thread(struct net *net, int state) { struct netns_ipvs *ipvs = net_ipvs(net); - int retc = -EINVAL; IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); @@ -1640,7 +1629,7 @@ int stop_sync_thread(struct net *net, int state) spin_lock_bh(&ipvs->sync_lock); ipvs->sync_state &= ~IP_VS_STATE_MASTER; spin_unlock_bh(&ipvs->sync_lock); - retc = kthread_stop(ipvs->master_thread); + kthread_stop(ipvs->master_thread); ipvs->master_thread = NULL; } else if (state == IP_VS_STATE_BACKUP) { if (!ipvs->backup_thread) @@ -1650,20 +1639,22 @@ int stop_sync_thread(struct net *net, int state) task_pid_nr(ipvs->backup_thread)); ipvs->sync_state &= ~IP_VS_STATE_BACKUP; - retc = kthread_stop(ipvs->backup_thread); + kthread_stop(ipvs->backup_thread); ipvs->backup_thread = NULL; + } else { + return -EINVAL; } /* decrease the module use count */ ip_vs_use_count_dec(); - return retc; + return 0; } /* * Initialize data struct for each netns */ -int __net_init __ip_vs_sync_init(struct net *net) +static int __net_init __ip_vs_sync_init(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -1677,24 +1668,24 @@ int __net_init __ip_vs_sync_init(struct net *net) return 0; } -void __ip_vs_sync_cleanup(struct net *net) +static void __ip_vs_sync_cleanup(struct net *net) { - int retc; + stop_sync_thread(net, IP_VS_STATE_MASTER); + stop_sync_thread(net, IP_VS_STATE_BACKUP); +} - retc = stop_sync_thread(net, IP_VS_STATE_MASTER); - if (retc && retc != -ESRCH) - pr_err("Failed to stop Master Daemon\n"); +static struct pernet_operations ipvs_sync_ops = { + .init = __ip_vs_sync_init, + .exit = __ip_vs_sync_cleanup, +}; - retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); - if (retc && retc != -ESRCH) - pr_err("Failed to stop Backup Daemon\n"); -} int __init ip_vs_sync_init(void) { - return 0; + return register_pernet_subsys(&ipvs_sync_ops); } void ip_vs_sync_cleanup(void) { + unregister_pernet_subsys(&ipvs_sync_ops); } diff --git a/trunk/net/netfilter/nf_conntrack_netlink.c b/trunk/net/netfilter/nf_conntrack_netlink.c index 482e90c61850..30bf8a167fc8 100644 --- a/trunk/net/netfilter/nf_conntrack_netlink.c +++ b/trunk/net/netfilter/nf_conntrack_netlink.c @@ -1334,7 +1334,6 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, struct nf_conn *ct; int err = -EINVAL; struct nf_conntrack_helper *helper; - struct nf_conn_tstamp *tstamp; ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); if (IS_ERR(ct)) @@ -1452,9 +1451,6 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, __set_bit(IPS_EXPECTED_BIT, &ct->status); ct->master = master_ct; } - tstamp = nf_conn_tstamp_find(ct); - if (tstamp) - tstamp->start = ktime_to_ns(ktime_get_real()); add_timer(&ct->timeout); nf_conntrack_hash_insert(ct); diff --git a/trunk/net/netfilter/x_tables.c b/trunk/net/netfilter/x_tables.c index 8a025a585d2f..a9adf4c6b299 100644 --- a/trunk/net/netfilter/x_tables.c +++ b/trunk/net/netfilter/x_tables.c @@ -455,7 +455,6 @@ void xt_compat_flush_offsets(u_int8_t af) vfree(xt[af].compat_tab); xt[af].compat_tab = NULL; xt[af].number = 0; - xt[af].cur = 0; } } EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); @@ -474,7 +473,8 @@ int xt_compat_calc_jump(u_int8_t af, unsigned int offset) else return mid ? tmp[mid - 1].delta : 0; } - return left ? tmp[left - 1].delta : 0; + WARN_ON_ONCE(1); + return 0; } EXPORT_SYMBOL_GPL(xt_compat_calc_jump); diff --git a/trunk/net/netfilter/xt_DSCP.c b/trunk/net/netfilter/xt_DSCP.c index ae8271652efa..0a229191e55b 100644 --- a/trunk/net/netfilter/xt_DSCP.c +++ b/trunk/net/netfilter/xt_DSCP.c @@ -99,7 +99,7 @@ tos_tg6(struct sk_buff *skb, const struct xt_action_param *par) u_int8_t orig, nv; orig = ipv6_get_dsfield(iph); - nv = (orig & ~info->tos_mask) ^ info->tos_value; + nv = (orig & info->tos_mask) ^ info->tos_value; if (orig != nv) { if (!skb_make_writable(skb, sizeof(struct iphdr))) diff --git a/trunk/net/netfilter/xt_conntrack.c b/trunk/net/netfilter/xt_conntrack.c index 61805d7b38aa..481a86fdc409 100644 --- a/trunk/net/netfilter/xt_conntrack.c +++ b/trunk/net/netfilter/xt_conntrack.c @@ -272,6 +272,11 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par) { int ret; + if (strcmp(par->table, "raw") == 0) { + pr_info("state is undetermined at the time of raw table\n"); + return -EINVAL; + } + ret = nf_ct_l3proto_try_module_get(par->family); if (ret < 0) pr_info("cannot load conntrack support for proto=%u\n", diff --git a/trunk/net/sctp/ulpevent.c b/trunk/net/sctp/ulpevent.c index 61b1f5ada96a..dff27d5e22fd 100644 --- a/trunk/net/sctp/ulpevent.c +++ b/trunk/net/sctp/ulpevent.c @@ -554,7 +554,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed( memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo)); /* Per TSVWG discussion with Randy. Allow the application to - * reassemble a fragmented message. + * resemble a fragmented message. */ ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags; diff --git a/trunk/net/sunrpc/Kconfig b/trunk/net/sunrpc/Kconfig index b2198e65d8bb..8873fd8ddacd 100644 --- a/trunk/net/sunrpc/Kconfig +++ b/trunk/net/sunrpc/Kconfig @@ -18,13 +18,14 @@ config SUNRPC_XPRT_RDMA If unsure, say N. config RPCSEC_GSS_KRB5 - tristate "Secure RPC: Kerberos V mechanism" + tristate depends on SUNRPC && CRYPTO - depends on CRYPTO_MD5 && CRYPTO_DES && CRYPTO_CBC && CRYPTO_CTS - depends on CRYPTO_ECB && CRYPTO_HMAC && CRYPTO_SHA1 && CRYPTO_AES - depends on CRYPTO_ARC4 + prompt "Secure RPC: Kerberos V mechanism" if !(NFS_V4 || NFSD_V4) default y select SUNRPC_GSS + select CRYPTO_MD5 + select CRYPTO_DES + select CRYPTO_CBC help Choose Y here to enable Secure RPC using the Kerberos version 5 GSS-API mechanism (RFC 1964). diff --git a/trunk/net/sunrpc/auth_gss/auth_gss.c b/trunk/net/sunrpc/auth_gss/auth_gss.c index 339ba64cce1e..f3914d0c5079 100644 --- a/trunk/net/sunrpc/auth_gss/auth_gss.c +++ b/trunk/net/sunrpc/auth_gss/auth_gss.c @@ -520,7 +520,7 @@ gss_refresh_upcall(struct rpc_task *task) warn_gssd(); task->tk_timeout = 15*HZ; rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL); - return -EAGAIN; + return 0; } if (IS_ERR(gss_msg)) { err = PTR_ERR(gss_msg); @@ -563,12 +563,10 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) if (PTR_ERR(gss_msg) == -EAGAIN) { err = wait_event_interruptible_timeout(pipe_version_waitqueue, pipe_version >= 0, 15*HZ); - if (pipe_version < 0) { - warn_gssd(); - err = -EACCES; - } if (err) goto out; + if (pipe_version < 0) + warn_gssd(); goto retry; } if (IS_ERR(gss_msg)) { diff --git a/trunk/net/sunrpc/clnt.c b/trunk/net/sunrpc/clnt.c index 8d83f9d48713..e7a96e478f63 100644 --- a/trunk/net/sunrpc/clnt.c +++ b/trunk/net/sunrpc/clnt.c @@ -1508,10 +1508,7 @@ call_timeout(struct rpc_task *task) if (clnt->cl_chatty) printk(KERN_NOTICE "%s: server %s not responding, timed out\n", clnt->cl_protname, clnt->cl_server); - if (task->tk_flags & RPC_TASK_TIMEOUT) - rpc_exit(task, -ETIMEDOUT); - else - rpc_exit(task, -EIO); + rpc_exit(task, -EIO); return; } diff --git a/trunk/net/sunrpc/xprt.c b/trunk/net/sunrpc/xprt.c index ce5eb68a9664..9494c3767356 100644 --- a/trunk/net/sunrpc/xprt.c +++ b/trunk/net/sunrpc/xprt.c @@ -906,7 +906,6 @@ void xprt_transmit(struct rpc_task *task) } dprintk("RPC: %5u xmit complete\n", task->tk_pid); - task->tk_flags |= RPC_TASK_SENT; spin_lock_bh(&xprt->transport_lock); xprt->ops->set_retrans_timeout(task); diff --git a/trunk/net/unix/af_unix.c b/trunk/net/unix/af_unix.c index b1d75beb7e20..3a43a8304768 100644 --- a/trunk/net/unix/af_unix.c +++ b/trunk/net/unix/af_unix.c @@ -524,8 +524,6 @@ static int unix_dgram_connect(struct socket *, struct sockaddr *, int, int); static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t); -static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *, - struct msghdr *, size_t, int); static const struct proto_ops unix_stream_ops = { .family = PF_UNIX, @@ -585,7 +583,7 @@ static const struct proto_ops unix_seqpacket_ops = { .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = unix_seqpacket_sendmsg, - .recvmsg = unix_seqpacket_recvmsg, + .recvmsg = unix_dgram_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; @@ -1701,18 +1699,6 @@ static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock, return unix_dgram_sendmsg(kiocb, sock, msg, len); } -static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, - int flags) -{ - struct sock *sk = sock->sk; - - if (sk->sk_state != TCP_ESTABLISHED) - return -ENOTCONN; - - return unix_dgram_recvmsg(iocb, sock, msg, size, flags); -} - static void unix_copy_addr(struct msghdr *msg, struct sock *sk) { struct unix_sock *u = unix_sk(sk); diff --git a/trunk/net/xfrm/xfrm_policy.c b/trunk/net/xfrm/xfrm_policy.c index b4d745ea8ee1..15792d8b6272 100644 --- a/trunk/net/xfrm/xfrm_policy.c +++ b/trunk/net/xfrm/xfrm_policy.c @@ -1406,7 +1406,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, struct net *net = xp_net(policy); unsigned long now = jiffies; struct net_device *dev; - struct xfrm_mode *inner_mode; struct dst_entry *dst_prev = NULL; struct dst_entry *dst0 = NULL; int i = 0; @@ -1437,17 +1436,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, goto put_states; } - if (xfrm[i]->sel.family == AF_UNSPEC) { - inner_mode = xfrm_ip2inner_mode(xfrm[i], - xfrm_af2proto(family)); - if (!inner_mode) { - err = -EAFNOSUPPORT; - dst_release(dst); - goto put_states; - } - } else - inner_mode = xfrm[i]->inner_mode; - if (!dst_prev) dst0 = dst1; else { @@ -1476,7 +1464,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, dst1->lastuse = now; dst1->input = dst_discard; - dst1->output = inner_mode->afinfo->output; + dst1->output = xfrm[i]->outer_mode->afinfo->output; dst1->next = dst_prev; dst_prev = dst1; diff --git a/trunk/net/xfrm/xfrm_replay.c b/trunk/net/xfrm/xfrm_replay.c index 47f1b8638df9..f218385950ca 100644 --- a/trunk/net/xfrm/xfrm_replay.c +++ b/trunk/net/xfrm/xfrm_replay.c @@ -532,12 +532,9 @@ int xfrm_init_replay(struct xfrm_state *x) if (replay_esn) { if (replay_esn->replay_window > - replay_esn->bmp_len * sizeof(__u32) * 8) + replay_esn->bmp_len * sizeof(__u32)) return -EINVAL; - if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0) - return -EINVAL; - if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn) x->repl = &xfrm_replay_esn; else diff --git a/trunk/net/xfrm/xfrm_user.c b/trunk/net/xfrm/xfrm_user.c index c658cb3bc7c3..5d1d60d3ca83 100644 --- a/trunk/net/xfrm/xfrm_user.c +++ b/trunk/net/xfrm/xfrm_user.c @@ -124,9 +124,6 @@ static inline int verify_replay(struct xfrm_usersa_info *p, { struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; - if ((p->flags & XFRM_STATE_ESN) && !rt) - return -EINVAL; - if (!rt) return 0; diff --git a/trunk/scripts/Makefile.build b/trunk/scripts/Makefile.build index 6165622c3e29..d5f925abe4d2 100644 --- a/trunk/scripts/Makefile.build +++ b/trunk/scripts/Makefile.build @@ -244,19 +244,14 @@ endif ifdef CONFIG_FTRACE_MCOUNT_RECORD ifdef BUILD_C_RECORDMCOUNT -ifeq ("$(origin RECORDMCOUNT_WARN)", "command line") - RECORDMCOUNT_FLAGS = -w -endif # Due to recursion, we must skip empty.o. # The empty.o file is created in the make process in order to determine # the target endianness and word size. It is made before all other C # files, including recordmcount. sub_cmd_record_mcount = \ if [ $(@) != "scripts/mod/empty.o" ]; then \ - $(objtree)/scripts/recordmcount $(RECORDMCOUNT_FLAGS) "$(@)"; \ + $(objtree)/scripts/recordmcount "$(@)"; \ fi; -recordmcount_source := $(srctree)/scripts/recordmcount.c \ - $(srctree)/scripts/recordmcount.h else sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \ "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \ @@ -264,7 +259,6 @@ sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CFLAGS)" \ "$(LD)" "$(NM)" "$(RM)" "$(MV)" \ "$(if $(part-of-module),1,0)" "$(@)"; -recordmcount_source := $(srctree)/scripts/recordmcount.pl endif cmd_record_mcount = \ if [ "$(findstring -pg,$(_c_flags))" = "-pg" ]; then \ @@ -285,13 +279,13 @@ define rule_cc_o_c endef # Built-in and composite module parts -$(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE +$(obj)/%.o: $(src)/%.c FORCE $(call cmd,force_checksrc) $(call if_changed_rule,cc_o_c) # Single-part modules are special since we need to mark them in $(MODVERDIR) -$(single-used-m): $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE +$(single-used-m): $(obj)/%.o: $(src)/%.c FORCE $(call cmd,force_checksrc) $(call if_changed_rule,cc_o_c) @{ echo $(@:.o=.ko); echo $@; } > $(MODVERDIR)/$(@F:.o=.mod) diff --git a/trunk/scripts/mod/modpost.c b/trunk/scripts/mod/modpost.c index 413c53693e62..cd104afcc5f2 100644 --- a/trunk/scripts/mod/modpost.c +++ b/trunk/scripts/mod/modpost.c @@ -420,10 +420,11 @@ static int parse_elf(struct elf_info *info, const char *filename) return 0; } - if (hdr->e_shnum == SHN_UNDEF) { + if (hdr->e_shnum == 0) { /* * There are more than 64k sections, * read count from .sh_size. + * note: it doesn't need shndx2secindex() */ info->num_sections = TO_NATIVE(sechdrs[0].sh_size); } @@ -431,7 +432,8 @@ static int parse_elf(struct elf_info *info, const char *filename) info->num_sections = hdr->e_shnum; } if (hdr->e_shstrndx == SHN_XINDEX) { - info->secindex_strings = TO_NATIVE(sechdrs[0].sh_link); + info->secindex_strings = + shndx2secindex(TO_NATIVE(sechdrs[0].sh_link)); } else { info->secindex_strings = hdr->e_shstrndx; @@ -487,7 +489,7 @@ static int parse_elf(struct elf_info *info, const char *filename) sechdrs[i].sh_offset; info->symtab_stop = (void *)hdr + sechdrs[i].sh_offset + sechdrs[i].sh_size; - sh_link_idx = sechdrs[i].sh_link; + sh_link_idx = shndx2secindex(sechdrs[i].sh_link); info->strtab = (void *)hdr + sechdrs[sh_link_idx].sh_offset; } @@ -514,9 +516,11 @@ static int parse_elf(struct elf_info *info, const char *filename) if (symtab_shndx_idx != ~0U) { Elf32_Word *p; - if (symtab_idx != sechdrs[symtab_shndx_idx].sh_link) + if (symtab_idx != + shndx2secindex(sechdrs[symtab_shndx_idx].sh_link)) fatal("%s: SYMTAB_SHNDX has bad sh_link: %u!=%u\n", - filename, sechdrs[symtab_shndx_idx].sh_link, + filename, + shndx2secindex(sechdrs[symtab_shndx_idx].sh_link), symtab_idx); /* Fix endianness */ for (p = info->symtab_shndx_start; p < info->symtab_shndx_stop; @@ -1442,7 +1446,7 @@ static unsigned int *reloc_location(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r) { Elf_Shdr *sechdrs = elf->sechdrs; - int section = sechdr->sh_info; + int section = shndx2secindex(sechdr->sh_info); return (void *)elf->hdr + sechdrs[section].sh_offset + r->r_offset; diff --git a/trunk/scripts/mod/modpost.h b/trunk/scripts/mod/modpost.h index 2031119080dc..0388cfccac8d 100644 --- a/trunk/scripts/mod/modpost.h +++ b/trunk/scripts/mod/modpost.h @@ -145,22 +145,33 @@ static inline int is_shndx_special(unsigned int i) return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE; } -/* - * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of - * the way to -256..-1, to avoid conflicting with real section - * indices. +/* shndx is in [0..SHN_LORESERVE) U (SHN_HIRESERVE, 0xfffffff], thus: + * shndx == 0 <=> sechdrs[0] + * ...... + * shndx == SHN_LORESERVE-1 <=> sechdrs[SHN_LORESERVE-1] + * shndx == SHN_HIRESERVE+1 <=> sechdrs[SHN_LORESERVE] + * shndx == SHN_HIRESERVE+2 <=> sechdrs[SHN_LORESERVE+1] + * ...... + * fyi: sym->st_shndx is uint16, SHN_LORESERVE = ff00, SHN_HIRESERVE = ffff, + * so basically we map 0000..feff -> 0000..feff + * ff00..ffff -> (you are a bad boy, dont do it) + * 10000..xxxx -> ff00..(xxxx-0x100) */ -#define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1)) +static inline unsigned int shndx2secindex(unsigned int i) +{ + if (i <= SHN_HIRESERVE) + return i; + return i - (SHN_HIRESERVE + 1 - SHN_LORESERVE); +} /* Accessor for sym->st_shndx, hides ugliness of "64k sections" */ static inline unsigned int get_secindex(const struct elf_info *info, const Elf_Sym *sym) { - if (is_shndx_special(sym->st_shndx)) - return SPECIAL(sym->st_shndx); if (sym->st_shndx != SHN_XINDEX) return sym->st_shndx; - return info->symtab_shndx_start[sym - info->symtab_start]; + return shndx2secindex(info->symtab_shndx_start[sym - + info->symtab_start]); } /* file2alias.c */ diff --git a/trunk/scripts/module-common.lds b/trunk/scripts/module-common.lds index 0865b3e752be..47a1f9ae0ede 100644 --- a/trunk/scripts/module-common.lds +++ b/trunk/scripts/module-common.lds @@ -5,15 +5,4 @@ */ SECTIONS { /DISCARD/ : { *(.discard) } - - __ksymtab : { *(SORT(___ksymtab+*)) } - __ksymtab_gpl : { *(SORT(___ksymtab_gpl+*)) } - __ksymtab_unused : { *(SORT(___ksymtab_unused+*)) } - __ksymtab_unused_gpl : { *(SORT(___ksymtab_unused_gpl+*)) } - __ksymtab_gpl_future : { *(SORT(___ksymtab_gpl_future+*)) } - __kcrctab : { *(SORT(___kcrctab+*)) } - __kcrctab_gpl : { *(SORT(___kcrctab_gpl+*)) } - __kcrctab_unused : { *(SORT(___kcrctab_unused+*)) } - __kcrctab_unused_gpl : { *(SORT(___kcrctab_unused_gpl+*)) } - __kcrctab_gpl_future : { *(SORT(___kcrctab_gpl_future+*)) } } diff --git a/trunk/scripts/recordmcount.c b/trunk/scripts/recordmcount.c index ee52cb8e17ad..f9f6f52db772 100644 --- a/trunk/scripts/recordmcount.c +++ b/trunk/scripts/recordmcount.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -40,7 +39,6 @@ static char gpfx; /* prefix for global symbol name (sometimes '_') */ static struct stat sb; /* Remember .st_size, etc. */ static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */ static const char *altmcount; /* alternate mcount symbol name */ -static int warn_on_notrace_sect; /* warn when section has mcount not being recorded */ /* setjmp() return values */ enum { @@ -80,7 +78,7 @@ static off_t ulseek(int const fd, off_t const offset, int const whence) { off_t const w = lseek(fd, offset, whence); - if (w == (off_t)-1) { + if ((off_t)-1 == w) { perror("lseek"); fail_file(); } @@ -113,41 +111,13 @@ static void * umalloc(size_t size) { void *const addr = malloc(size); - if (addr == 0) { + if (0 == addr) { fprintf(stderr, "malloc failed: %zu bytes\n", size); fail_file(); } return addr; } -static unsigned char ideal_nop5_x86_64[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 }; -static unsigned char ideal_nop5_x86_32[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 }; -static unsigned char *ideal_nop; - -static char rel_type_nop; - -static int (*make_nop)(void *map, size_t const offset); - -static int make_nop_x86(void *map, size_t const offset) -{ - uint32_t *ptr; - unsigned char *op; - - /* Confirm we have 0xe8 0x0 0x0 0x0 0x0 */ - ptr = map + offset; - if (*ptr != 0) - return -1; - - op = map + offset - 1; - if (*op != 0xe8) - return -1; - - /* convert to nop */ - ulseek(fd_map, offset - 1, SEEK_SET); - uwrite(fd_map, ideal_nop, 5); - return 0; -} - /* * Get the whole file as a programming convenience in order to avoid * malloc+lseek+read+free of many pieces. If successful, then mmap @@ -166,7 +136,7 @@ static void *mmap_file(char const *fname) void *addr; fd_map = open(fname, O_RDWR); - if (fd_map < 0 || fstat(fd_map, &sb) < 0) { + if (0 > fd_map || 0 > fstat(fd_map, &sb)) { perror(fname); fail_file(); } @@ -177,7 +147,7 @@ static void *mmap_file(char const *fname) addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd_map, 0); mmap_failed = 0; - if (addr == MAP_FAILED) { + if (MAP_FAILED == addr) { mmap_failed = 1; addr = umalloc(sb.st_size); uread(fd_map, addr, sb.st_size); @@ -236,13 +206,12 @@ static uint32_t (*w2)(uint16_t); static int is_mcounted_section_name(char const *const txtname) { - return strcmp(".text", txtname) == 0 || - strcmp(".ref.text", txtname) == 0 || - strcmp(".sched.text", txtname) == 0 || - strcmp(".spinlock.text", txtname) == 0 || - strcmp(".irqentry.text", txtname) == 0 || - strcmp(".kprobes.text", txtname) == 0 || - strcmp(".text.unlikely", txtname) == 0; + return 0 == strcmp(".text", txtname) || + 0 == strcmp(".ref.text", txtname) || + 0 == strcmp(".sched.text", txtname) || + 0 == strcmp(".spinlock.text", txtname) || + 0 == strcmp(".irqentry.text", txtname) || + 0 == strcmp(".text.unlikely", txtname); } /* 32 bit and 64 bit are very similar */ @@ -295,48 +264,43 @@ do_file(char const *const fname) w8 = w8nat; switch (ehdr->e_ident[EI_DATA]) { static unsigned int const endian = 1; - default: + default: { fprintf(stderr, "unrecognized ELF data encoding %d: %s\n", ehdr->e_ident[EI_DATA], fname); fail_file(); - break; - case ELFDATA2LSB: - if (*(unsigned char const *)&endian != 1) { + } break; + case ELFDATA2LSB: { + if (1 != *(unsigned char const *)&endian) { /* main() is big endian, file.o is little endian. */ w = w4rev; w2 = w2rev; w8 = w8rev; } - break; - case ELFDATA2MSB: - if (*(unsigned char const *)&endian != 0) { + } break; + case ELFDATA2MSB: { + if (0 != *(unsigned char const *)&endian) { /* main() is little endian, file.o is big endian. */ w = w4rev; w2 = w2rev; w8 = w8rev; } - break; + } break; } /* end switch */ - if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 - || w2(ehdr->e_type) != ET_REL - || ehdr->e_ident[EI_VERSION] != EV_CURRENT) { + if (0 != memcmp(ELFMAG, ehdr->e_ident, SELFMAG) + || ET_REL != w2(ehdr->e_type) + || EV_CURRENT != ehdr->e_ident[EI_VERSION]) { fprintf(stderr, "unrecognized ET_REL file %s\n", fname); fail_file(); } gpfx = 0; switch (w2(ehdr->e_machine)) { - default: + default: { fprintf(stderr, "unrecognized e_machine %d %s\n", w2(ehdr->e_machine), fname); fail_file(); - break; - case EM_386: - reltype = R_386_32; - make_nop = make_nop_x86; - ideal_nop = ideal_nop5_x86_32; - mcount_adjust_32 = -1; - break; + } break; + case EM_386: reltype = R_386_32; break; case EM_ARM: reltype = R_ARM_ABS32; altmcount = "__gnu_mcount_nc"; break; @@ -347,91 +311,67 @@ do_file(char const *const fname) case EM_S390: /* reltype: e_class */ gpfx = '_'; break; case EM_SH: reltype = R_SH_DIR32; break; case EM_SPARCV9: reltype = R_SPARC_64; gpfx = '_'; break; - case EM_X86_64: - make_nop = make_nop_x86; - ideal_nop = ideal_nop5_x86_64; - reltype = R_X86_64_64; - mcount_adjust_64 = -1; - break; + case EM_X86_64: reltype = R_X86_64_64; break; } /* end switch */ switch (ehdr->e_ident[EI_CLASS]) { - default: + default: { fprintf(stderr, "unrecognized ELF class %d %s\n", ehdr->e_ident[EI_CLASS], fname); fail_file(); - break; - case ELFCLASS32: - if (w2(ehdr->e_ehsize) != sizeof(Elf32_Ehdr) - || w2(ehdr->e_shentsize) != sizeof(Elf32_Shdr)) { + } break; + case ELFCLASS32: { + if (sizeof(Elf32_Ehdr) != w2(ehdr->e_ehsize) + || sizeof(Elf32_Shdr) != w2(ehdr->e_shentsize)) { fprintf(stderr, "unrecognized ET_REL file: %s\n", fname); fail_file(); } - if (w2(ehdr->e_machine) == EM_S390) { + if (EM_S390 == w2(ehdr->e_machine)) reltype = R_390_32; - mcount_adjust_32 = -4; - } - if (w2(ehdr->e_machine) == EM_MIPS) { + if (EM_MIPS == w2(ehdr->e_machine)) { reltype = R_MIPS_32; is_fake_mcount32 = MIPS32_is_fake_mcount; } do32(ehdr, fname, reltype); - break; + } break; case ELFCLASS64: { Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr; - if (w2(ghdr->e_ehsize) != sizeof(Elf64_Ehdr) - || w2(ghdr->e_shentsize) != sizeof(Elf64_Shdr)) { + if (sizeof(Elf64_Ehdr) != w2(ghdr->e_ehsize) + || sizeof(Elf64_Shdr) != w2(ghdr->e_shentsize)) { fprintf(stderr, "unrecognized ET_REL file: %s\n", fname); fail_file(); } - if (w2(ghdr->e_machine) == EM_S390) { + if (EM_S390 == w2(ghdr->e_machine)) reltype = R_390_64; - mcount_adjust_64 = -8; - } - if (w2(ghdr->e_machine) == EM_MIPS) { + if (EM_MIPS == w2(ghdr->e_machine)) { reltype = R_MIPS_64; Elf64_r_sym = MIPS64_r_sym; Elf64_r_info = MIPS64_r_info; is_fake_mcount64 = MIPS64_is_fake_mcount; } do64(ghdr, fname, reltype); - break; - } + } break; } /* end switch */ cleanup(); } int -main(int argc, char *argv[]) +main(int argc, char const *argv[]) { const char ftrace[] = "/ftrace.o"; int ftrace_size = sizeof(ftrace) - 1; int n_error = 0; /* gcc-4.3.0 false positive complaint */ - int c; - int i; - - while ((c = getopt(argc, argv, "w")) >= 0) { - switch (c) { - case 'w': - warn_on_notrace_sect = 1; - break; - default: - fprintf(stderr, "usage: recordmcount [-w] file.o...\n"); - return 0; - } - } - if ((argc - optind) < 1) { - fprintf(stderr, "usage: recordmcount [-w] file.o...\n"); + if (argc <= 1) { + fprintf(stderr, "usage: recordmcount file.o...\n"); return 0; } /* Process each file in turn, allowing deep failure. */ - for (i = optind; i < argc; i++) { - char *file = argv[i]; + for (--argc, ++argv; 0 < argc; --argc, ++argv) { int const sjval = setjmp(jmpenv); int len; @@ -440,29 +380,29 @@ main(int argc, char *argv[]) * function but does not call it. Since ftrace.o should * not be traced anyway, we just skip it. */ - len = strlen(file); + len = strlen(argv[0]); if (len >= ftrace_size && - strcmp(file + (len - ftrace_size), ftrace) == 0) + strcmp(argv[0] + (len - ftrace_size), ftrace) == 0) continue; switch (sjval) { - default: - fprintf(stderr, "internal error: %s\n", file); + default: { + fprintf(stderr, "internal error: %s\n", argv[0]); exit(1); - break; - case SJ_SETJMP: /* normal sequence */ + } break; + case SJ_SETJMP: { /* normal sequence */ /* Avoid problems if early cleanup() */ fd_map = -1; ehdr_curr = NULL; mmap_failed = 1; - do_file(file); - break; - case SJ_FAIL: /* error in do_file or below */ + do_file(argv[0]); + } break; + case SJ_FAIL: { /* error in do_file or below */ ++n_error; - break; - case SJ_SUCCEED: /* premature success */ + } break; + case SJ_SUCCEED: { /* premature success */ /* do nothing */ - break; + } break; } /* end switch */ } return !!n_error; diff --git a/trunk/scripts/recordmcount.h b/trunk/scripts/recordmcount.h index 4be60364a405..baf187bee983 100644 --- a/trunk/scripts/recordmcount.h +++ b/trunk/scripts/recordmcount.h @@ -22,15 +22,11 @@ #undef is_fake_mcount #undef fn_is_fake_mcount #undef MIPS_is_fake_mcount -#undef mcount_adjust #undef sift_rel_mcount -#undef nop_mcount #undef find_secsym_ndx #undef __has_rel_mcount #undef has_rel_mcount #undef tot_relsize -#undef get_mcountsym -#undef get_sym_str_and_relp #undef do_func #undef Elf_Addr #undef Elf_Ehdr @@ -53,18 +49,14 @@ #ifdef RECORD_MCOUNT_64 # define append_func append64 # define sift_rel_mcount sift64_rel_mcount -# define nop_mcount nop_mcount_64 # define find_secsym_ndx find64_secsym_ndx # define __has_rel_mcount __has64_rel_mcount # define has_rel_mcount has64_rel_mcount # define tot_relsize tot64_relsize -# define get_sym_str_and_relp get_sym_str_and_relp_64 # define do_func do64 -# define get_mcountsym get_mcountsym_64 # define is_fake_mcount is_fake_mcount64 # define fn_is_fake_mcount fn_is_fake_mcount64 # define MIPS_is_fake_mcount MIPS64_is_fake_mcount -# define mcount_adjust mcount_adjust_64 # define Elf_Addr Elf64_Addr # define Elf_Ehdr Elf64_Ehdr # define Elf_Shdr Elf64_Shdr @@ -85,18 +77,14 @@ #else # define append_func append32 # define sift_rel_mcount sift32_rel_mcount -# define nop_mcount nop_mcount_32 # define find_secsym_ndx find32_secsym_ndx # define __has_rel_mcount __has32_rel_mcount # define has_rel_mcount has32_rel_mcount # define tot_relsize tot32_relsize -# define get_sym_str_and_relp get_sym_str_and_relp_32 # define do_func do32 -# define get_mcountsym get_mcountsym_32 # define is_fake_mcount is_fake_mcount32 # define fn_is_fake_mcount fn_is_fake_mcount32 # define MIPS_is_fake_mcount MIPS32_is_fake_mcount -# define mcount_adjust mcount_adjust_32 # define Elf_Addr Elf32_Addr # define Elf_Ehdr Elf32_Ehdr # define Elf_Shdr Elf32_Shdr @@ -135,8 +123,6 @@ static void fn_ELF_R_INFO(Elf_Rel *const rp, unsigned sym, unsigned type) } static void (*Elf_r_info)(Elf_Rel *const rp, unsigned sym, unsigned type) = fn_ELF_R_INFO; -static int mcount_adjust = 0; - /* * MIPS mcount long call has 2 _mcount symbols, only the position of the 1st * _mcount symbol is needed for dynamic function tracer, with it, to disable @@ -248,49 +234,6 @@ static void append_func(Elf_Ehdr *const ehdr, uwrite(fd_map, ehdr, sizeof(*ehdr)); } -static unsigned get_mcountsym(Elf_Sym const *const sym0, - Elf_Rel const *relp, - char const *const str0) -{ - unsigned mcountsym = 0; - - Elf_Sym const *const symp = - &sym0[Elf_r_sym(relp)]; - char const *symname = &str0[w(symp->st_name)]; - char const *mcount = gpfx == '_' ? "_mcount" : "mcount"; - - if (symname[0] == '.') - ++symname; /* ppc64 hack */ - if (strcmp(mcount, symname) == 0 || - (altmcount && strcmp(altmcount, symname) == 0)) - mcountsym = Elf_r_sym(relp); - - return mcountsym; -} - -static void get_sym_str_and_relp(Elf_Shdr const *const relhdr, - Elf_Ehdr const *const ehdr, - Elf_Sym const **sym0, - char const **str0, - Elf_Rel const **relp) -{ - Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) - + (void *)ehdr); - unsigned const symsec_sh_link = w(relhdr->sh_link); - Elf_Shdr const *const symsec = &shdr0[symsec_sh_link]; - Elf_Shdr const *const strsec = &shdr0[w(symsec->sh_link)]; - Elf_Rel const *const rel0 = (Elf_Rel const *)(_w(relhdr->sh_offset) - + (void *)ehdr); - - *sym0 = (Elf_Sym const *)(_w(symsec->sh_offset) - + (void *)ehdr); - - *str0 = (char const *)(_w(strsec->sh_offset) - + (void *)ehdr); - - *relp = rel0; -} - /* * Look at the relocations in order to find the calls to mcount. * Accumulate the section offsets that are found, and their relocation info, @@ -307,27 +250,47 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, { uint_t *const mloc0 = mlocp; Elf_Rel *mrelp = *mrelpp; - Elf_Sym const *sym0; - char const *str0; - Elf_Rel const *relp; + Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) + + (void *)ehdr); + unsigned const symsec_sh_link = w(relhdr->sh_link); + Elf_Shdr const *const symsec = &shdr0[symsec_sh_link]; + Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symsec->sh_offset) + + (void *)ehdr); + + Elf_Shdr const *const strsec = &shdr0[w(symsec->sh_link)]; + char const *const str0 = (char const *)(_w(strsec->sh_offset) + + (void *)ehdr); + + Elf_Rel const *const rel0 = (Elf_Rel const *)(_w(relhdr->sh_offset) + + (void *)ehdr); unsigned rel_entsize = _w(relhdr->sh_entsize); unsigned const nrel = _w(relhdr->sh_size) / rel_entsize; + Elf_Rel const *relp = rel0; + unsigned mcountsym = 0; unsigned t; - get_sym_str_and_relp(relhdr, ehdr, &sym0, &str0, &relp); - for (t = nrel; t; --t) { - if (!mcountsym) - mcountsym = get_mcountsym(sym0, relp, str0); + if (!mcountsym) { + Elf_Sym const *const symp = + &sym0[Elf_r_sym(relp)]; + char const *symname = &str0[w(symp->st_name)]; + char const *mcount = '_' == gpfx ? "_mcount" : "mcount"; + + if ('.' == symname[0]) + ++symname; /* ppc64 hack */ + if (0 == strcmp(mcount, symname) || + (altmcount && 0 == strcmp(altmcount, symname))) + mcountsym = Elf_r_sym(relp); + } if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { - uint_t const addend = - _w(_w(relp->r_offset) - recval + mcount_adjust); + uint_t const addend = _w(_w(relp->r_offset) - recval); + mrelp->r_offset = _w(offbase + ((void *)mlocp - (void *)mloc0)); Elf_r_info(mrelp, recsym, reltype); - if (rel_entsize == sizeof(Elf_Rela)) { + if (sizeof(Elf_Rela) == rel_entsize) { ((Elf_Rela *)mrelp)->r_addend = addend; *mlocp++ = 0; } else @@ -341,63 +304,6 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, return mlocp; } -/* - * Read the relocation table again, but this time its called on sections - * that are not going to be traced. The mcount calls here will be converted - * into nops. - */ -static void nop_mcount(Elf_Shdr const *const relhdr, - Elf_Ehdr const *const ehdr, - const char *const txtname) -{ - Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) - + (void *)ehdr); - Elf_Sym const *sym0; - char const *str0; - Elf_Rel const *relp; - Elf_Shdr const *const shdr = &shdr0[w(relhdr->sh_info)]; - unsigned rel_entsize = _w(relhdr->sh_entsize); - unsigned const nrel = _w(relhdr->sh_size) / rel_entsize; - unsigned mcountsym = 0; - unsigned t; - int once = 0; - - get_sym_str_and_relp(relhdr, ehdr, &sym0, &str0, &relp); - - for (t = nrel; t; --t) { - int ret = -1; - - if (!mcountsym) - mcountsym = get_mcountsym(sym0, relp, str0); - - if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { - if (make_nop) - ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset); - if (warn_on_notrace_sect && !once) { - printf("Section %s has mcount callers being ignored\n", - txtname); - once = 1; - /* just warn? */ - if (!make_nop) - return; - } - } - - /* - * If we successfully removed the mcount, mark the relocation - * as a nop (don't do anything with it). - */ - if (!ret) { - Elf_Rel rel; - rel = *(Elf_Rel *)relp; - Elf_r_info(&rel, Elf_r_sym(relp), rel_type_nop); - ulseek(fd_map, (void *)relp - (void *)ehdr, SEEK_SET); - uwrite(fd_map, &rel, sizeof(rel)); - } - relp = (Elf_Rel const *)(rel_entsize + (void *)relp); - } -} - /* * Find a symbol in the given section, to be used as the base for relocating @@ -448,13 +354,13 @@ __has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */ Elf_Shdr const *const txthdr = &shdr0[w(relhdr->sh_info)]; char const *const txtname = &shstrtab[w(txthdr->sh_name)]; - if (strcmp("__mcount_loc", txtname) == 0) { + if (0 == strcmp("__mcount_loc", txtname)) { fprintf(stderr, "warning: __mcount_loc already exists: %s\n", fname); succeed_file(); } - if (w(txthdr->sh_type) != SHT_PROGBITS || - !(w(txthdr->sh_flags) & SHF_EXECINSTR)) + if (SHT_PROGBITS != w(txthdr->sh_type) || + !is_mcounted_section_name(txtname)) return NULL; return txtname; } @@ -464,7 +370,7 @@ static char const *has_rel_mcount(Elf_Shdr const *const relhdr, char const *const shstrtab, char const *const fname) { - if (w(relhdr->sh_type) != SHT_REL && w(relhdr->sh_type) != SHT_RELA) + if (SHT_REL != w(relhdr->sh_type) && SHT_RELA != w(relhdr->sh_type)) return NULL; return __has_rel_mcount(relhdr, shdr0, shstrtab, fname); } @@ -477,11 +383,9 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0, { unsigned totrelsz = 0; Elf_Shdr const *shdrp = shdr0; - char const *txtname; for (; nhdr; --nhdr, ++shdrp) { - txtname = has_rel_mcount(shdrp, shdr0, shstrtab, fname); - if (txtname && is_mcounted_section_name(txtname)) + if (has_rel_mcount(shdrp, shdr0, shstrtab, fname)) totrelsz += _w(shdrp->sh_size); } return totrelsz; @@ -517,7 +421,7 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { char const *const txtname = has_rel_mcount(relhdr, shdr0, shstrtab, fname); - if (txtname && is_mcounted_section_name(txtname)) { + if (txtname) { uint_t recval = 0; unsigned const recsym = find_secsym_ndx( w(relhdr->sh_info), txtname, &recval, @@ -528,12 +432,6 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) mlocp = sift_rel_mcount(mlocp, (void *)mlocp - (void *)mloc0, &mrelp, relhdr, ehdr, recsym, recval, reltype); - } else if (txtname && (warn_on_notrace_sect || make_nop)) { - /* - * This section is ignored by ftrace, but still - * has mcount calls. Convert them to nops now. - */ - nop_mcount(relhdr, ehdr, txtname); } } if (mloc0 != mlocp) { diff --git a/trunk/scripts/recordmcount.pl b/trunk/scripts/recordmcount.pl index 858966ab019c..4be0deea71ca 100755 --- a/trunk/scripts/recordmcount.pl +++ b/trunk/scripts/recordmcount.pl @@ -134,7 +134,6 @@ ".sched.text" => 1, ".spinlock.text" => 1, ".irqentry.text" => 1, - ".kprobes.text" => 1, ".text.unlikely" => 1, ); @@ -223,7 +222,6 @@ sub check_objcopy $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$"; $type = ".quad"; $alignment = 8; - $mcount_adjust = -1; # force flags for this arch $ld .= " -m elf_x86_64"; @@ -233,7 +231,6 @@ sub check_objcopy } elsif ($arch eq "i386") { $alignment = 4; - $mcount_adjust = -1; # force flags for this arch $ld .= " -m elf_i386"; @@ -243,14 +240,12 @@ sub check_objcopy } elsif ($arch eq "s390" && $bits == 32) { $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_32\\s+_mcount\$"; - $mcount_adjust = -4; $alignment = 4; $ld .= " -m elf_s390"; $cc .= " -m31"; } elsif ($arch eq "s390" && $bits == 64) { $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$"; - $mcount_adjust = -8; $alignment = 8; $type = ".quad"; $ld .= " -m elf64_s390"; diff --git a/trunk/security/selinux/hooks.c b/trunk/security/selinux/hooks.c index 8fb248843009..f7cf0ea6faea 100644 --- a/trunk/security/selinux/hooks.c +++ b/trunk/security/selinux/hooks.c @@ -1578,8 +1578,7 @@ static int may_create(struct inode *dir, return rc; if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) { - rc = security_transition_sid(sid, dsec->sid, tclass, - &dentry->d_name, &newsid); + rc = security_transition_sid(sid, dsec->sid, tclass, NULL, &newsid); if (rc) return rc; } diff --git a/trunk/security/selinux/ss/policydb.c b/trunk/security/selinux/ss/policydb.c index 7102457661d6..e7b850ad57ee 100644 --- a/trunk/security/selinux/ss/policydb.c +++ b/trunk/security/selinux/ss/policydb.c @@ -502,7 +502,7 @@ static int policydb_index(struct policydb *p) goto out; rc = flex_array_prealloc(p->type_val_to_struct_array, 0, - p->p_types.nprim, GFP_KERNEL | __GFP_ZERO); + p->p_types.nprim - 1, GFP_KERNEL | __GFP_ZERO); if (rc) goto out; @@ -519,7 +519,7 @@ static int policydb_index(struct policydb *p) goto out; rc = flex_array_prealloc(p->sym_val_to_name[i], - 0, p->symtab[i].nprim, + 0, p->symtab[i].nprim - 1, GFP_KERNEL | __GFP_ZERO); if (rc) goto out; @@ -1819,6 +1819,8 @@ static int filename_trans_read(struct policydb *p, void *fp) goto out; nel = le32_to_cpu(buf[0]); + printk(KERN_ERR "%s: nel=%d\n", __func__, nel); + last = p->filename_trans; while (last && last->next) last = last->next; @@ -1855,6 +1857,8 @@ static int filename_trans_read(struct policydb *p, void *fp) goto out; name[len] = 0; + printk(KERN_ERR "%s: ft=%p ft->name=%p ft->name=%s\n", __func__, ft, ft->name, ft->name); + rc = next_entry(buf, fp, sizeof(u32) * 4); if (rc) goto out; @@ -2371,7 +2375,7 @@ int policydb_read(struct policydb *p, void *fp) goto bad; /* preallocate so we don't have to worry about the put ever failing */ - rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim, + rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim - 1, GFP_KERNEL | __GFP_ZERO); if (rc) goto bad; diff --git a/trunk/sound/aoa/codecs/tas.c b/trunk/sound/aoa/codecs/tas.c index fd2188c3df2b..58804c7acfcf 100644 --- a/trunk/sound/aoa/codecs/tas.c +++ b/trunk/sound/aoa/codecs/tas.c @@ -170,7 +170,7 @@ static void tas_set_volume(struct tas *tas) /* analysing the volume and mixer tables shows * that they are similar enough when we shift * the mixer table down by 4 bits. The error - * is miniscule, in just one item the error + * is minuscule, in just one item the error * is 1, at a value of 0x07f17b (mixer table * value is 0x07f17a) */ tmp = tas_gaintable[left]; diff --git a/trunk/sound/pci/au88x0/au88x0_pcm.c b/trunk/sound/pci/au88x0/au88x0_pcm.c index 62e959120c44..33f0ba5559a7 100644 --- a/trunk/sound/pci/au88x0/au88x0_pcm.c +++ b/trunk/sound/pci/au88x0/au88x0_pcm.c @@ -44,10 +44,10 @@ static struct snd_pcm_hardware snd_vortex_playback_hw_adb = { .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 0x10000, - .period_bytes_min = 0x20, + .period_bytes_min = 0x1, .period_bytes_max = 0x1000, .periods_min = 2, - .periods_max = 1024, + .periods_max = 32, }; #ifndef CHIP_AU8820 @@ -140,9 +140,6 @@ static int snd_vortex_pcm_open(struct snd_pcm_substream *substream) SNDRV_PCM_HW_PARAM_PERIOD_BYTES)) < 0) return err; - snd_pcm_hw_constraint_step(runtime, 0, - SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 64); - if (VORTEX_PCM_TYPE(substream->pcm) != VORTEX_PCM_WT) { #ifndef CHIP_AU8820 if (VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_A3D) { diff --git a/trunk/sound/pci/hda/patch_realtek.c b/trunk/sound/pci/hda/patch_realtek.c index c82979a8cd09..d3bd2c10180f 100644 --- a/trunk/sound/pci/hda/patch_realtek.c +++ b/trunk/sound/pci/hda/patch_realtek.c @@ -1704,11 +1704,11 @@ static void alc_apply_fixup(struct hda_codec *codec, int action) codec->chip_name, fix->type); break; } - if (!fix->chained) + if (!fix[id].chained) break; if (++depth > 10) break; - id = fix->chain_id; + id = fix[id].chain_id; } } @@ -5645,7 +5645,6 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids, static struct snd_pci_quirk beep_white_list[] = { SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1), SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1), - SND_PCI_QUIRK(0x1043, 0x831a, "EeePC", 1), SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1), {} }; @@ -9864,7 +9863,6 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = { SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD), SND_PCI_QUIRK(0x10f1, 0x2350, "TYAN-S2350", ALC888_6ST_DELL), SND_PCI_QUIRK(0x108e, 0x534d, NULL, ALC883_3ST_6ch), - SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG), SND_PCI_QUIRK(0x1462, 0x0349, "MSI", ALC883_TARGA_2ch_DIG), SND_PCI_QUIRK(0x1462, 0x040d, "MSI", ALC883_TARGA_2ch_DIG), @@ -10701,6 +10699,7 @@ enum { PINFIX_LENOVO_Y530, PINFIX_PB_M5210, PINFIX_ACER_ASPIRE_7736, + PINFIX_GIGABYTE_880GM, }; static const struct alc_fixup alc882_fixups[] = { @@ -10732,6 +10731,13 @@ static const struct alc_fixup alc882_fixups[] = { .type = ALC_FIXUP_SKU, .v.sku = ALC_FIXUP_SKU_IGNORE, }, + [PINFIX_GIGABYTE_880GM] = { + .type = ALC_FIXUP_PINS, + .v.pins = (const struct alc_pincfg[]) { + { 0x14, 0x1114410 }, /* set as speaker */ + { } + } + }, }; static struct snd_pci_quirk alc882_fixup_tbl[] = { @@ -10739,6 +10745,7 @@ static struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", PINFIX_LENOVO_Y530), SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX), SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", PINFIX_ACER_ASPIRE_7736), + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte", PINFIX_GIGABYTE_880GM), {} }; @@ -18798,8 +18805,6 @@ static struct snd_pci_quirk alc662_cfg_tbl[] = { ALC662_3ST_6ch_DIG), SND_PCI_QUIRK(0x1179, 0xff6e, "Toshiba NB20x", ALC662_AUTO), SND_PCI_QUIRK(0x144d, 0xca00, "Samsung NC10", ALC272_SAMSUNG_NC10), - SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte 945GCM-S2L", - ALC662_3ST_6ch_DIG), SND_PCI_QUIRK(0x152d, 0x2304, "Quanta WH1", ALC663_ASUS_H13), SND_PCI_QUIRK(0x1565, 0x820f, "Biostar TA780G M2+", ALC662_3ST_6ch_DIG), SND_PCI_QUIRK(0x1631, 0xc10c, "PB RS65", ALC663_ASUS_M51VA), @@ -19473,7 +19478,7 @@ enum { ALC662_FIXUP_IDEAPAD, ALC272_FIXUP_MARIO, ALC662_FIXUP_CZC_P10T, - ALC662_FIXUP_SKU_IGNORE, + ALC662_FIXUP_GIGABYTE, }; static const struct alc_fixup alc662_fixups[] = { @@ -19502,17 +19507,20 @@ static const struct alc_fixup alc662_fixups[] = { {} } }, - [ALC662_FIXUP_SKU_IGNORE] = { - .type = ALC_FIXUP_SKU, - .v.sku = ALC_FIXUP_SKU_IGNORE, + [ALC662_FIXUP_GIGABYTE] = { + .type = ALC_FIXUP_PINS, + .v.pins = (const struct alc_pincfg[]) { + { 0x14, 0x1114410 }, /* set as speaker */ + { } + } }, }; static struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), - SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte", ALC662_FIXUP_GIGABYTE), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), diff --git a/trunk/sound/pci/hda/patch_via.c b/trunk/sound/pci/hda/patch_via.c index 0997031c48d2..1371b57c11e8 100644 --- a/trunk/sound/pci/hda/patch_via.c +++ b/trunk/sound/pci/hda/patch_via.c @@ -1292,18 +1292,14 @@ static void notify_aa_path_ctls(struct hda_codec *codec) { int i; struct snd_ctl_elem_id id; - const char *labels[] = {"Mic", "Front Mic", "Line", "Rear Mic"}; - struct snd_kcontrol *ctl; + const char *labels[] = {"Mic", "Front Mic", "Line"}; memset(&id, 0, sizeof(id)); id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; for (i = 0; i < ARRAY_SIZE(labels); i++) { sprintf(id.name, "%s Playback Volume", labels[i]); - ctl = snd_hda_find_mixer_ctl(codec, id.name); - if (ctl) - snd_ctl_notify(codec->bus->card, - SNDRV_CTL_EVENT_MASK_VALUE, - &ctl->id); + snd_ctl_notify(codec->bus->card, SNDRV_CTL_EVENT_MASK_VALUE, + &id); } } diff --git a/trunk/sound/soc/codecs/ssm2602.c b/trunk/sound/soc/codecs/ssm2602.c index b04d28039c16..2727befd158e 100644 --- a/trunk/sound/soc/codecs/ssm2602.c +++ b/trunk/sound/soc/codecs/ssm2602.c @@ -139,7 +139,7 @@ SOC_DOUBLE_R("Capture Volume", SSM2602_LINVOL, SSM2602_RINVOL, 0, 31, 0), SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1), SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0), -SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 8, 1, 0), +SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 7, 1, 0), SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1), SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1), @@ -602,7 +602,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = { .read = ssm2602_read_reg_cache, .write = ssm2602_write, .set_bias_level = ssm2602_set_bias_level, - .reg_cache_size = ARRAY_SIZE(ssm2602_reg), + .reg_cache_size = sizeof(ssm2602_reg), .reg_word_size = sizeof(u16), .reg_cache_default = ssm2602_reg, }; @@ -614,7 +614,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = { * low = 0x1a * high = 0x1b */ -static int __devinit ssm2602_i2c_probe(struct i2c_client *i2c, +static int ssm2602_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct ssm2602_priv *ssm2602; @@ -635,7 +635,7 @@ static int __devinit ssm2602_i2c_probe(struct i2c_client *i2c, return ret; } -static int __devexit ssm2602_i2c_remove(struct i2c_client *client) +static int ssm2602_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); kfree(i2c_get_clientdata(client)); @@ -655,7 +655,7 @@ static struct i2c_driver ssm2602_i2c_driver = { .owner = THIS_MODULE, }, .probe = ssm2602_i2c_probe, - .remove = __devexit_p(ssm2602_i2c_remove), + .remove = ssm2602_i2c_remove, .id_table = ssm2602_i2c_id, }; #endif diff --git a/trunk/sound/soc/codecs/uda134x.c b/trunk/sound/soc/codecs/uda134x.c index a7b8f301bad3..48ffd406a71d 100644 --- a/trunk/sound/soc/codecs/uda134x.c +++ b/trunk/sound/soc/codecs/uda134x.c @@ -601,7 +601,9 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = { .reg_cache_step = 1, .read = uda134x_read_reg_cache, .write = uda134x_write, +#ifdef POWER_OFF_ON_STANDBY .set_bias_level = uda134x_set_bias_level, +#endif }; static int __devinit uda134x_codec_probe(struct platform_device *pdev) diff --git a/trunk/sound/soc/codecs/wm8903.c b/trunk/sound/soc/codecs/wm8903.c index 824d1c8c8a35..f52b623bb692 100644 --- a/trunk/sound/soc/codecs/wm8903.c +++ b/trunk/sound/soc/codecs/wm8903.c @@ -692,7 +692,7 @@ SOC_ENUM("DRC Smoothing Threshold", drc_smoothing), SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup), SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT, - WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv), + WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 96, 0, digital_tlv), SOC_ENUM("ADC Companding Mode", adc_companding), SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0), diff --git a/trunk/sound/soc/davinci/davinci-mcasp.c b/trunk/sound/soc/davinci/davinci-mcasp.c index 4ddc6d3b6678..a5af834c8ef5 100644 --- a/trunk/sound/soc/davinci/davinci-mcasp.c +++ b/trunk/sound/soc/davinci/davinci-mcasp.c @@ -434,21 +434,17 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai, mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE); - mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, - ACLKX | AHCLKX | AFSX); + mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, (0x7 << 26)); break; case SND_SOC_DAIFMT_CBM_CFS: /* codec is clock master and frame slave */ - mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE); + mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE); mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE); - mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); + mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE); - mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG, - ACLKX | ACLKR); - mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, - AFSX | AFSR); + mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, (0x2d << 26)); break; case SND_SOC_DAIFMT_CBM_CFM: /* codec is clock and frame master */ @@ -458,8 +454,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai, mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE); - mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG, - ACLKX | AHCLKX | AFSX | ACLKR | AHCLKR | AFSR); + mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG, (0x3f << 26)); break; default: @@ -649,7 +644,7 @@ static void davinci_hw_param(struct davinci_audio_dev *dev, int stream) mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, mask); mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXORD); - if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32)) + if ((dev->tdm_slots >= 2) || (dev->tdm_slots <= 32)) mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG, FSXMOD(dev->tdm_slots), FSXMOD(0x1FF)); else @@ -665,7 +660,7 @@ static void davinci_hw_param(struct davinci_audio_dev *dev, int stream) AHCLKRE); mcasp_set_reg(dev->base + DAVINCI_MCASP_RXTDM_REG, mask); - if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32)) + if ((dev->tdm_slots >= 2) || (dev->tdm_slots <= 32)) mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG, FSRMOD(dev->tdm_slots), FSRMOD(0x1FF)); else diff --git a/trunk/sound/soc/jz4740/jz4740-i2s.c b/trunk/sound/soc/jz4740/jz4740-i2s.c index cd22a54b2f14..419bf4f5534a 100644 --- a/trunk/sound/soc/jz4740/jz4740-i2s.c +++ b/trunk/sound/soc/jz4740/jz4740-i2s.c @@ -133,7 +133,7 @@ static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream, struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; - if (dai->active) + if (!dai->active) return; conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); diff --git a/trunk/sound/soc/mid-x86/sst_platform.c b/trunk/sound/soc/mid-x86/sst_platform.c index 6b1f9d3bf34e..d567c322a2fb 100644 --- a/trunk/sound/soc/mid-x86/sst_platform.c +++ b/trunk/sound/soc/mid-x86/sst_platform.c @@ -376,11 +376,6 @@ static int sst_platform_pcm_hw_params(struct snd_pcm_substream *substream, return 0; } -static int sst_platform_pcm_hw_free(struct snd_pcm_substream *substream) -{ - return snd_pcm_lib_free_pages(substream); -} - static struct snd_pcm_ops sst_platform_ops = { .open = sst_platform_open, .close = sst_platform_close, @@ -389,7 +384,6 @@ static struct snd_pcm_ops sst_platform_ops = { .trigger = sst_platform_pcm_trigger, .pointer = sst_platform_pcm_pointer, .hw_params = sst_platform_pcm_hw_params, - .hw_free = sst_platform_pcm_hw_free, }; static void sst_pcm_free(struct snd_pcm *pcm) diff --git a/trunk/sound/soc/samsung/goni_wm8994.c b/trunk/sound/soc/samsung/goni_wm8994.c index 0e80daee8b6f..f6b3a3ce5919 100644 --- a/trunk/sound/soc/samsung/goni_wm8994.c +++ b/trunk/sound/soc/samsung/goni_wm8994.c @@ -236,18 +236,18 @@ static struct snd_soc_dai_link goni_dai[] = { .name = "WM8994", .stream_name = "WM8994 HiFi", .cpu_dai_name = "samsung-i2s.0", - .codec_dai_name = "wm8994-aif1", + .codec_dai_name = "wm8994-hifi", .platform_name = "samsung-audio", - .codec_name = "wm8994-codec.0-001a", + .codec_name = "wm8994-codec.0-0x1a", .init = goni_wm8994_init, .ops = &goni_hifi_ops, }, { .name = "WM8994 Voice", .stream_name = "Voice", .cpu_dai_name = "goni-voice-dai", - .codec_dai_name = "wm8994-aif2", + .codec_dai_name = "wm8994-voice", .platform_name = "samsung-audio", - .codec_name = "wm8994-codec.0-001a", + .codec_name = "wm8994-codec.0-0x1a", .ops = &goni_voice_ops, }, }; diff --git a/trunk/sound/soc/soc-core.c b/trunk/sound/soc/soc-core.c index dd55d1069468..d8562ce4de7a 100644 --- a/trunk/sound/soc/soc-core.c +++ b/trunk/sound/soc/soc-core.c @@ -3291,8 +3291,6 @@ int snd_soc_register_card(struct snd_soc_card *card) if (!card->name || !card->dev) return -EINVAL; - dev_set_drvdata(card->dev, card); - snd_soc_initialize_card_lists(card); soc_init_card_debugfs(card); diff --git a/trunk/sound/usb/format.c b/trunk/sound/usb/format.c index f079b5e2ab28..5b792d2c8061 100644 --- a/trunk/sound/usb/format.c +++ b/trunk/sound/usb/format.c @@ -176,11 +176,9 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof if (!rate) continue; /* C-Media CM6501 mislabels its 96 kHz altsetting */ - /* Terratec Aureon 7.1 USB C-Media 6206, too */ if (rate == 48000 && nr_rates == 1 && (chip->usb_id == USB_ID(0x0d8c, 0x0201) || - chip->usb_id == USB_ID(0x0d8c, 0x0102) || - chip->usb_id == USB_ID(0x0ccd, 0x00b1)) && + chip->usb_id == USB_ID(0x0d8c, 0x0102)) && fp->altsetting == 5 && fp->maxpacksize == 392) rate = 96000; /* Creative VF0470 Live Cam reports 16 kHz instead of 8kHz */ diff --git a/trunk/sound/usb/quirks.c b/trunk/sound/usb/quirks.c index 1b94ec3a3368..ec07e62e53f3 100644 --- a/trunk/sound/usb/quirks.c +++ b/trunk/sound/usb/quirks.c @@ -533,7 +533,6 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev, case USB_ID(0x0d8c, 0x0102): /* C-Media CM6206 / CM106-Like Sound Device */ - case USB_ID(0x0ccd, 0x00b1): /* Terratec Aureon 7.1 USB */ return snd_usb_cm6206_boot_quirk(dev); case USB_ID(0x133e, 0x0815): diff --git a/trunk/tools/perf/Documentation/perf-script-perl.txt b/trunk/tools/perf/Documentation/perf-script-perl.txt index 3152cca15501..5bb41e55a3ac 100644 --- a/trunk/tools/perf/Documentation/perf-script-perl.txt +++ b/trunk/tools/perf/Documentation/perf-script-perl.txt @@ -63,6 +63,7 @@ The format file for the sched_wakep event defines the following fields field:unsigned char common_flags; field:unsigned char common_preempt_count; field:int common_pid; + field:int common_lock_depth; field:char comm[TASK_COMM_LEN]; field:pid_t pid; diff --git a/trunk/tools/perf/Documentation/perf-script-python.txt b/trunk/tools/perf/Documentation/perf-script-python.txt index 471022069119..36b38277422c 100644 --- a/trunk/tools/perf/Documentation/perf-script-python.txt +++ b/trunk/tools/perf/Documentation/perf-script-python.txt @@ -463,6 +463,7 @@ The format file for the sched_wakep event defines the following fields field:unsigned char common_flags; field:unsigned char common_preempt_count; field:int common_pid; + field:int common_lock_depth; field:char comm[TASK_COMM_LEN]; field:pid_t pid; diff --git a/trunk/tools/perf/Documentation/perf-script.txt b/trunk/tools/perf/Documentation/perf-script.txt index 86c87e214b11..66f040b30729 100644 --- a/trunk/tools/perf/Documentation/perf-script.txt +++ b/trunk/tools/perf/Documentation/perf-script.txt @@ -113,61 +113,13 @@ OPTIONS Do various checks like samples ordering and lost events. -f:: ---fields:: +--fields Comma separated list of fields to print. Options are: comm, tid, pid, time, cpu, event, trace, sym. Field - list can be prepended with the type, trace, sw or hw, + list must be prepended with the type, trace, sw or hw, to indicate to which event type the field list applies. e.g., -f sw:comm,tid,time,sym and -f trace:time,cpu,trace - perf script -f - - is equivalent to: - - perf script -f trace: -f sw: -f hw: - - i.e., the specified fields apply to all event types if the type string - is not given. - - The arguments are processed in the order received. A later usage can - reset a prior request. e.g.: - - -f trace: -f comm,tid,time,sym - - The first -f suppresses trace events (field list is ""), but then the - second invocation sets the fields to comm,tid,time,sym. In this case a - warning is given to the user: - - "Overriding previous field request for all events." - - Alternativey, consider the order: - - -f comm,tid,time,sym -f trace: - - The first -f sets the fields for all events and the second -f - suppresses trace events. The user is given a warning message about - the override, and the result of the above is that only S/W and H/W - events are displayed with the given fields. - - For the 'wildcard' option if a user selected field is invalid for an - event type, a message is displayed to the user that the option is - ignored for that type. For example: - - $ perf script -f comm,tid,trace - 'trace' not valid for hardware events. Ignoring. - 'trace' not valid for software events. Ignoring. - - Alternatively, if the type is given an invalid field is specified it - is an error. For example: - - perf script -v -f sw:comm,tid,trace - 'trace' not valid for software events. - - At this point usage is displayed, and perf-script exits. - - Finally, a user may not set fields to none for all event types. - i.e., -f "" is not allowed. - -k:: --vmlinux=:: vmlinux pathname diff --git a/trunk/tools/perf/Makefile b/trunk/tools/perf/Makefile index 1455413ec7a7..207dee5c5b16 100644 --- a/trunk/tools/perf/Makefile +++ b/trunk/tools/perf/Makefile @@ -5,8 +5,6 @@ endif # The default target of this Makefile is... all: -include config/utilities.mak - ifneq ($(OUTPUT),) # check that the output directory actually exists OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) @@ -15,12 +13,6 @@ endif # Define V to have a more verbose compile. # -# Define PYTHON to point to the python binary if the default -# `python' is not correct; for example: PYTHON=python2 -# -# Define PYTHON_CONFIG to point to the python-config binary if -# the default `$(PYTHON)-config' is not correct. -# # Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8 # # Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72. @@ -43,21 +35,15 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ -e s/sh[234].*/sh/ ) -CC = $(CROSS_COMPILE)gcc -AR = $(CROSS_COMPILE)ar - # Additional ARCH settings for x86 ifeq ($(ARCH),i386) ARCH := x86 endif ifeq ($(ARCH),x86_64) - ARCH := x86 - IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1) - ifeq (${IS_X86_64}, 1) - RAW_ARCH := x86_64 - ARCH_CFLAGS := -DARCH_X86_64 - ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S - endif + RAW_ARCH := x86_64 + ARCH := x86 + ARCH_CFLAGS := -DARCH_X86_64 + ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S endif # @@ -133,6 +119,8 @@ lib = lib export prefix bindir sharedir sysconfdir +CC = $(CROSS_COMPILE)gcc +AR = $(CROSS_COMPILE)ar RM = rm -f MKDIR = mkdir FIND = find @@ -142,7 +130,7 @@ INSTALL = install # explicitly what architecture to check for. Fix this up for yours.. SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ --include config/feature-tests.mak +-include feature-tests.mak ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y) CFLAGS := $(CFLAGS) -fstack-protector-all @@ -177,10 +165,12 @@ grep-libs = $(filter -l%,$(1)) strip-libs = $(filter-out -l%,$(1)) $(OUTPUT)python/perf.so: $(PYRF_OBJS) - $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \ - --quiet build_ext \ - --build-lib='$(OUTPUT)python' \ - --build-temp='$(OUTPUT)python/temp' + $(QUIET_GEN)( \ + export CFLAGS="$(BASIC_CFLAGS)"; \ + python util/setup.py --quiet build_ext --build-lib='$(OUTPUT)python' \ + --build-temp='$(OUTPUT)python/temp' \ + ) + # # No Perl scripts right now: # @@ -485,74 +475,24 @@ else endif endif -disable-python = $(eval $(disable-python_code)) -define disable-python_code - BASIC_CFLAGS += -DNO_LIBPYTHON - $(if $(1),$(warning No $(1) was found)) - $(warning Python support won't be built) -endef - -override PYTHON := \ - $(call get-executable-or-default,PYTHON,python) - -ifndef PYTHON - $(call disable-python,python interpreter) - python-clean := +ifdef NO_LIBPYTHON + BASIC_CFLAGS += -DNO_LIBPYTHON else - - PYTHON_WORD := $(call shell-wordify,$(PYTHON)) - - python-clean := $(PYTHON_WORD) util/setup.py clean \ - --build-lib='$(OUTPUT)python' \ - --build-temp='$(OUTPUT)python/temp' - - ifdef NO_LIBPYTHON - $(call disable-python) - else - - override PYTHON_CONFIG := \ - $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON)-config) - - ifndef PYTHON_CONFIG - $(call disable-python,python-config tool) - else - - PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG)) - - PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) - PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) - PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) - PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) - FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) - - ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y) - $(call disable-python,Python.h (for Python 2.x)) - else - - ifneq ($(call try-cc,$(SOURCE_PYTHON_VERSION),$(FLAGS_PYTHON_EMBED)),y) - $(warning Python 3 is not yet supported; please set) - $(warning PYTHON and/or PYTHON_CONFIG appropriately.) - $(warning If you also have Python 2 installed, then) - $(warning try something like:) - $(warning $(and ,)) - $(warning $(and ,) make PYTHON=python2) - $(warning $(and ,)) - $(warning Otherwise, disable Python support entirely:) - $(warning $(and ,)) - $(warning $(and ,) make NO_LIBPYTHON=1) - $(warning $(and ,)) - $(error $(and ,)) - else - ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS) - EXTLIBS += $(PYTHON_EMBED_LIBADD) - LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o - LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o - LANG_BINDINGS += $(OUTPUT)python/perf.so - endif - - endif - endif - endif + PYTHON_EMBED_LDOPTS = $(shell python-config --ldflags 2>/dev/null) + PYTHON_EMBED_LDFLAGS = $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) + PYTHON_EMBED_LIBADD = $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) + PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null` + FLAGS_PYTHON_EMBED=$(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) + ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y) + msg := $(warning No Python.h found, install python-dev[el] to have python support in 'perf script' and to build the python bindings) + BASIC_CFLAGS += -DNO_LIBPYTHON + else + ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS) + EXTLIBS += $(PYTHON_EMBED_LIBADD) + LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o + LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o + LANG_BINDINGS += $(OUTPUT)python/perf.so + endif endif ifdef NO_DEMANGLE @@ -893,7 +833,8 @@ clean: $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(MAKE) -C Documentation/ clean $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS - $(python-clean) + @python util/setup.py clean --build-lib='$(OUTPUT)python' \ + --build-temp='$(OUTPUT)python/temp' .PHONY: all install clean strip .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell diff --git a/trunk/tools/perf/builtin-record.c b/trunk/tools/perf/builtin-record.c index 0974f957b8fa..416538248a4b 100644 --- a/trunk/tools/perf/builtin-record.c +++ b/trunk/tools/perf/builtin-record.c @@ -427,7 +427,7 @@ static void mmap_read_all(void) { int i; - for (i = 0; i < evsel_list->nr_mmaps; i++) { + for (i = 0; i < evsel_list->cpus->nr; i++) { if (evsel_list->mmap[i].base) mmap_read(&evsel_list->mmap[i]); } diff --git a/trunk/tools/perf/builtin-script.c b/trunk/tools/perf/builtin-script.c index 974f6d3f4e53..ac574ea23917 100644 --- a/trunk/tools/perf/builtin-script.c +++ b/trunk/tools/perf/builtin-script.c @@ -49,169 +49,57 @@ struct output_option { }; /* default set to maintain compatibility with current format */ -static struct { - bool user_set; - bool wildcard_set; - u64 fields; - u64 invalid_fields; -} output[PERF_TYPE_MAX] = { - - [PERF_TYPE_HARDWARE] = { - .user_set = false, - - .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | - PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | - PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, - - .invalid_fields = PERF_OUTPUT_TRACE, - }, - - [PERF_TYPE_SOFTWARE] = { - .user_set = false, - - .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | - PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | - PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, - - .invalid_fields = PERF_OUTPUT_TRACE, - }, - - [PERF_TYPE_TRACEPOINT] = { - .user_set = false, - - .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | - PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | - PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE, - }, - - [PERF_TYPE_RAW] = { - .user_set = false, - - .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | - PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | - PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, - - .invalid_fields = PERF_OUTPUT_TRACE, - }, +static u64 output_fields[PERF_TYPE_MAX] = { + [PERF_TYPE_HARDWARE] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ + PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ + PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, + + [PERF_TYPE_SOFTWARE] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ + PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ + PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, + + [PERF_TYPE_TRACEPOINT] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ + PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ + PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE, }; -static bool output_set_by_user(void) -{ - int j; - for (j = 0; j < PERF_TYPE_MAX; ++j) { - if (output[j].user_set) - return true; - } - return false; -} - -static const char *output_field2str(enum perf_output_field field) -{ - int i, imax = ARRAY_SIZE(all_output_options); - const char *str = ""; - - for (i = 0; i < imax; ++i) { - if (all_output_options[i].field == field) { - str = all_output_options[i].str; - break; - } - } - return str; -} +static bool output_set_by_user; -#define PRINT_FIELD(x) (output[attr->type].fields & PERF_OUTPUT_##x) +#define PRINT_FIELD(x) (output_fields[attr->type] & PERF_OUTPUT_##x) -static int perf_event_attr__check_stype(struct perf_event_attr *attr, - u64 sample_type, const char *sample_msg, - enum perf_output_field field) +static int perf_session__check_attr(struct perf_session *session, + struct perf_event_attr *attr) { - int type = attr->type; - const char *evname; - - if (attr->sample_type & sample_type) - return 0; - - if (output[type].user_set) { - evname = __event_name(attr->type, attr->config); - pr_err("Samples for '%s' event do not have %s attribute set. " - "Cannot print '%s' field.\n", - evname, sample_msg, output_field2str(field)); - return -1; - } - - /* user did not ask for it explicitly so remove from the default list */ - output[type].fields &= ~field; - evname = __event_name(attr->type, attr->config); - pr_debug("Samples for '%s' event do not have %s attribute set. " - "Skipping '%s' field.\n", - evname, sample_msg, output_field2str(field)); - - return 0; -} - -static int perf_evsel__check_attr(struct perf_evsel *evsel, - struct perf_session *session) -{ - struct perf_event_attr *attr = &evsel->attr; - if (PRINT_FIELD(TRACE) && !perf_session__has_traces(session, "record -R")) return -EINVAL; if (PRINT_FIELD(SYM)) { - if (perf_event_attr__check_stype(attr, PERF_SAMPLE_IP, "IP", - PERF_OUTPUT_SYM)) + if (!(session->sample_type & PERF_SAMPLE_IP)) { + pr_err("Samples do not contain IP data.\n"); return -EINVAL; - + } if (!no_callchain && - !(attr->sample_type & PERF_SAMPLE_CALLCHAIN)) + !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) symbol_conf.use_callchain = false; } if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) && - perf_event_attr__check_stype(attr, PERF_SAMPLE_TID, "TID", - PERF_OUTPUT_TID|PERF_OUTPUT_PID)) + !(session->sample_type & PERF_SAMPLE_TID)) { + pr_err("Samples do not contain TID/PID data.\n"); return -EINVAL; + } if (PRINT_FIELD(TIME) && - perf_event_attr__check_stype(attr, PERF_SAMPLE_TIME, "TIME", - PERF_OUTPUT_TIME)) + !(session->sample_type & PERF_SAMPLE_TIME)) { + pr_err("Samples do not contain timestamps.\n"); return -EINVAL; + } if (PRINT_FIELD(CPU) && - perf_event_attr__check_stype(attr, PERF_SAMPLE_CPU, "CPU", - PERF_OUTPUT_CPU)) + !(session->sample_type & PERF_SAMPLE_CPU)) { + pr_err("Samples do not contain cpu.\n"); return -EINVAL; - - return 0; -} - -/* - * verify all user requested events exist and the samples - * have the expected data - */ -static int perf_session__check_output_opt(struct perf_session *session) -{ - int j; - struct perf_evsel *evsel; - - for (j = 0; j < PERF_TYPE_MAX; ++j) { - evsel = perf_session__find_first_evtype(session, j); - - /* - * even if fields is set to 0 (ie., show nothing) event must - * exist if user explicitly includes it on the command line - */ - if (!evsel && output[j].user_set && !output[j].wildcard_set) { - pr_err("%s events do not exist. " - "Remove corresponding -f option to proceed.\n", - event_type(j)); - return -1; - } - - if (evsel && output[j].fields && - perf_evsel__check_attr(evsel, session)) - return -1; } return 0; @@ -280,7 +168,10 @@ static void process_event(union perf_event *event __unused, { struct perf_event_attr *attr = &evsel->attr; - if (output[attr->type].fields == 0) + if (output_fields[attr->type] == 0) + return; + + if (perf_session__check_attr(session, attr) < 0) return; print_sample_start(sample, thread, attr); @@ -560,7 +451,6 @@ static int parse_output_fields(const struct option *opt __used, { char *tok; int i, imax = sizeof(all_output_options) / sizeof(struct output_option); - int j; int rc = 0; char *str = strdup(arg); int type = -1; @@ -568,99 +458,52 @@ static int parse_output_fields(const struct option *opt __used, if (!str) return -ENOMEM; - /* first word can state for which event type the user is specifying - * the fields. If no type exists, the specified fields apply to all - * event types found in the file minus the invalid fields for a type. - */ - tok = strchr(str, ':'); - if (tok) { - *tok = '\0'; - tok++; - if (!strcmp(str, "hw")) - type = PERF_TYPE_HARDWARE; - else if (!strcmp(str, "sw")) - type = PERF_TYPE_SOFTWARE; - else if (!strcmp(str, "trace")) - type = PERF_TYPE_TRACEPOINT; - else if (!strcmp(str, "raw")) - type = PERF_TYPE_RAW; - else { - fprintf(stderr, "Invalid event type in field string.\n"); - return -EINVAL; - } - - if (output[type].user_set) - pr_warning("Overriding previous field request for %s events.\n", - event_type(type)); - - output[type].fields = 0; - output[type].user_set = true; - output[type].wildcard_set = false; - - } else { - tok = str; - if (strlen(str) == 0) { - fprintf(stderr, - "Cannot set fields to 'none' for all event types.\n"); - rc = -EINVAL; - goto out; - } - - if (output_set_by_user()) - pr_warning("Overriding previous field request for all events.\n"); + tok = strtok(str, ":"); + if (!tok) { + fprintf(stderr, + "Invalid field string - not prepended with type."); + return -EINVAL; + } - for (j = 0; j < PERF_TYPE_MAX; ++j) { - output[j].fields = 0; - output[j].user_set = true; - output[j].wildcard_set = true; - } + /* first word should state which event type user + * is specifying the fields + */ + if (!strcmp(tok, "hw")) + type = PERF_TYPE_HARDWARE; + else if (!strcmp(tok, "sw")) + type = PERF_TYPE_SOFTWARE; + else if (!strcmp(tok, "trace")) + type = PERF_TYPE_TRACEPOINT; + else { + fprintf(stderr, "Invalid event type in field string."); + return -EINVAL; } - tok = strtok(tok, ","); - while (tok) { + output_fields[type] = 0; + while (1) { + tok = strtok(NULL, ","); + if (!tok) + break; for (i = 0; i < imax; ++i) { - if (strcmp(tok, all_output_options[i].str) == 0) + if (strcmp(tok, all_output_options[i].str) == 0) { + output_fields[type] |= all_output_options[i].field; break; + } } if (i == imax) { - fprintf(stderr, "Invalid field requested.\n"); + fprintf(stderr, "Invalid field requested."); rc = -EINVAL; - goto out; - } - - if (type == -1) { - /* add user option to all events types for - * which it is valid - */ - for (j = 0; j < PERF_TYPE_MAX; ++j) { - if (output[j].invalid_fields & all_output_options[i].field) { - pr_warning("\'%s\' not valid for %s events. Ignoring.\n", - all_output_options[i].str, event_type(j)); - } else - output[j].fields |= all_output_options[i].field; - } - } else { - if (output[type].invalid_fields & all_output_options[i].field) { - fprintf(stderr, "\'%s\' not valid for %s events.\n", - all_output_options[i].str, event_type(type)); - - rc = -EINVAL; - goto out; - } - output[type].fields |= all_output_options[i].field; + break; } - - tok = strtok(NULL, ","); } - if (type >= 0) { - if (output[type].fields == 0) { - pr_debug("No fields requested for %s type. " - "Events will not be displayed.\n", event_type(type)); - } + if (output_fields[type] == 0) { + pr_debug("No fields requested for %s type. " + "Events will not be displayed\n", event_type(type)); } -out: + output_set_by_user = true; + free(str); return rc; } @@ -986,7 +829,7 @@ static const struct option options[] = { OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), OPT_CALLBACK('f', "fields", NULL, "str", - "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,sym", + "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace. Fields: comm,tid,pid,time,cpu,event,trace,sym", parse_output_fields), OPT_END() @@ -1177,7 +1020,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) struct stat perf_stat; int input; - if (output_set_by_user()) { + if (output_set_by_user) { fprintf(stderr, "custom fields not supported for generated scripts"); return -1; @@ -1217,11 +1060,6 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) pr_debug("perf script started with script %s\n\n", script_name); } - - err = perf_session__check_output_opt(session); - if (err < 0) - goto out; - err = __cmd_script(session); perf_session__delete(session); diff --git a/trunk/tools/perf/builtin-stat.c b/trunk/tools/perf/builtin-stat.c index a9f06715e44d..03f0e45f1479 100644 --- a/trunk/tools/perf/builtin-stat.c +++ b/trunk/tools/perf/builtin-stat.c @@ -6,28 +6,24 @@ * * Sample output: - $ perf stat ./hackbench 10 + $ perf stat ~/hackbench 10 + Time: 0.104 - Time: 0.118 + Performance counter stats for '/home/mingo/hackbench': - Performance counter stats for './hackbench 10': + 1255.538611 task clock ticks # 10.143 CPU utilization factor + 54011 context switches # 0.043 M/sec + 385 CPU migrations # 0.000 M/sec + 17755 pagefaults # 0.014 M/sec + 3808323185 CPU cycles # 3033.219 M/sec + 1575111190 instructions # 1254.530 M/sec + 17367895 cache references # 13.833 M/sec + 7674421 cache misses # 6.112 M/sec - 1708.761321 task-clock # 11.037 CPUs utilized - 41,190 context-switches # 0.024 M/sec - 6,735 CPU-migrations # 0.004 M/sec - 17,318 page-faults # 0.010 M/sec - 5,205,202,243 cycles # 3.046 GHz - 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle - 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle - 2,603,501,247 instructions # 0.50 insns per cycle - # 1.48 stalled cycles per insn - 484,357,498 branches # 283.455 M/sec - 6,388,934 branch-misses # 1.32% of all branches - - 0.154822978 seconds time elapsed + Wall-clock time elapsed: 123.786620 msecs * - * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar + * Copyright (C) 2008, Red Hat Inc, Ingo Molnar * * Improvements and fixes by: * @@ -50,7 +46,6 @@ #include "util/evlist.h" #include "util/evsel.h" #include "util/debug.h" -#include "util/color.h" #include "util/header.h" #include "util/cpumap.h" #include "util/thread.h" @@ -70,107 +65,14 @@ static struct perf_event_attr default_attrs[] = { { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, }; -/* - * Detailed stats (-d), covering the L1 and last level data caches: - */ -static struct perf_event_attr detailed_attrs[] = { - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_L1D << 0 | - (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_L1D << 0 | - (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_LL << 0 | - (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_LL << 0 | - (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, -}; - -/* - * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: - */ -static struct perf_event_attr very_detailed_attrs[] = { - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_L1I << 0 | - (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_L1I << 0 | - (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_DTLB << 0 | - (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_DTLB << 0 | - (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_ITLB << 0 | - (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_ITLB << 0 | - (PERF_COUNT_HW_CACHE_OP_READ << 8) | - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, - -}; - -/* - * Very, very detailed stats (-d -d -d), adding prefetch events: - */ -static struct perf_event_attr very_very_detailed_attrs[] = { - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_L1D << 0 | - (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | - (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, - - { .type = PERF_TYPE_HW_CACHE, - .config = - PERF_COUNT_HW_CACHE_L1D << 0 | - (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, -}; - - - struct perf_evlist *evsel_list; static bool system_wide = false; @@ -184,8 +86,6 @@ static pid_t target_pid = -1; static pid_t target_tid = -1; static pid_t child_pid = -1; static bool null_run = false; -static int detailed_run = 0; -static bool sync_run = false; static bool big_num = true; static int big_num_opt = -1; static const char *cpu_list; @@ -256,15 +156,7 @@ static double stddev_stats(struct stats *stats) struct stats runtime_nsecs_stats[MAX_NR_CPUS]; struct stats runtime_cycles_stats[MAX_NR_CPUS]; -struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS]; -struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS]; struct stats runtime_branches_stats[MAX_NR_CPUS]; -struct stats runtime_cacherefs_stats[MAX_NR_CPUS]; -struct stats runtime_l1_dcache_stats[MAX_NR_CPUS]; -struct stats runtime_l1_icache_stats[MAX_NR_CPUS]; -struct stats runtime_ll_cache_stats[MAX_NR_CPUS]; -struct stats runtime_itlb_cache_stats[MAX_NR_CPUS]; -struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS]; struct stats walltime_nsecs_stats; static int create_perf_stat_counter(struct perf_evsel *evsel) @@ -300,37 +192,6 @@ static inline int nsec_counter(struct perf_evsel *evsel) return 0; } -/* - * Update various tracking values we maintain to print - * more semantic information such as miss/hit ratios, - * instruction rates, etc: - */ -static void update_shadow_stats(struct perf_evsel *counter, u64 *count) -{ - if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) - update_stats(&runtime_nsecs_stats[0], count[0]); - else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) - update_stats(&runtime_cycles_stats[0], count[0]); - else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) - update_stats(&runtime_stalled_cycles_front_stats[0], count[0]); - else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) - update_stats(&runtime_stalled_cycles_back_stats[0], count[0]); - else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) - update_stats(&runtime_branches_stats[0], count[0]); - else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) - update_stats(&runtime_cacherefs_stats[0], count[0]); - else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) - update_stats(&runtime_l1_dcache_stats[0], count[0]); - else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I)) - update_stats(&runtime_l1_icache_stats[0], count[0]); - else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL)) - update_stats(&runtime_ll_cache_stats[0], count[0]); - else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB)) - update_stats(&runtime_dtlb_cache_stats[0], count[0]); - else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB)) - update_stats(&runtime_itlb_cache_stats[0], count[0]); -} - /* * Read out the results of a single counter: * aggregate counts across CPUs in system-wide mode @@ -356,7 +217,12 @@ static int read_counter_aggr(struct perf_evsel *counter) /* * Save the full runtime - to allow normalization during printout: */ - update_shadow_stats(counter, count); + if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) + update_stats(&runtime_nsecs_stats[0], count[0]); + if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) + update_stats(&runtime_cycles_stats[0], count[0]); + if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) + update_stats(&runtime_branches_stats[0], count[0]); return 0; } @@ -376,7 +242,12 @@ static int read_counter(struct perf_evsel *counter) count = counter->counts->cpu[cpu].values; - update_shadow_stats(counter, count); + if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) + update_stats(&runtime_nsecs_stats[cpu], count[0]); + if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) + update_stats(&runtime_cycles_stats[cpu], count[0]); + if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) + update_stats(&runtime_branches_stats[cpu], count[0]); } return 0; @@ -444,18 +315,13 @@ static int run_perf_stat(int argc __used, const char **argv) list_for_each_entry(counter, &evsel_list->entries, node) { if (create_perf_stat_counter(counter) < 0) { - if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) { - if (verbose) - ui__warning("%s event is not supported by the kernel.\n", - event_name(counter)); - continue; - } - - if (errno == EPERM || errno == EACCES) { + if (errno == -EPERM || errno == -EACCES) { error("You may not have permission to collect %sstats.\n" "\t Consider tweaking" " /proc/sys/kernel/perf_event_paranoid or running as root.", system_wide ? "system-wide " : ""); + } else if (errno == ENOENT) { + error("%s event is not supported. ", event_name(counter)); } else { error("open_counter returned with %d (%s). " "/bin/dmesg may provide additional information.\n", @@ -506,16 +372,6 @@ static int run_perf_stat(int argc __used, const char **argv) return WEXITSTATUS(status); } -static void print_noise_pct(double total, double avg) -{ - double pct = 0.0; - - if (avg) - pct = 100.0*total/avg; - - fprintf(stderr, " ( +-%6.2f%% )", pct); -} - static void print_noise(struct perf_evsel *evsel, double avg) { struct perf_stat *ps; @@ -524,14 +380,15 @@ static void print_noise(struct perf_evsel *evsel, double avg) return; ps = evsel->priv; - print_noise_pct(stddev_stats(&ps->res_stats[0]), avg); + fprintf(stderr, " ( +- %7.3f%% )", + 100 * stddev_stats(&ps->res_stats[0]) / avg); } static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) { double msecs = avg / 1e6; char cpustr[16] = { '\0', }; - const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-25s"; + const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-24s"; if (no_aggr) sprintf(cpustr, "CPU%*d%s", @@ -547,191 +404,8 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) return; if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) - fprintf(stderr, " # %8.3f CPUs utilized ", avg / avg_stats(&walltime_nsecs_stats)); -} - -static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg) -{ - double total, ratio = 0.0; - const char *color; - - total = avg_stats(&runtime_cycles_stats[cpu]); - - if (total) - ratio = avg / total * 100.0; - - color = PERF_COLOR_NORMAL; - if (ratio > 50.0) - color = PERF_COLOR_RED; - else if (ratio > 30.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 10.0) - color = PERF_COLOR_YELLOW; - - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%6.2f%%", ratio); - fprintf(stderr, " frontend cycles idle "); -} - -static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg) -{ - double total, ratio = 0.0; - const char *color; - - total = avg_stats(&runtime_cycles_stats[cpu]); - - if (total) - ratio = avg / total * 100.0; - - color = PERF_COLOR_NORMAL; - if (ratio > 75.0) - color = PERF_COLOR_RED; - else if (ratio > 50.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 20.0) - color = PERF_COLOR_YELLOW; - - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%6.2f%%", ratio); - fprintf(stderr, " backend cycles idle "); -} - -static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg) -{ - double total, ratio = 0.0; - const char *color; - - total = avg_stats(&runtime_branches_stats[cpu]); - - if (total) - ratio = avg / total * 100.0; - - color = PERF_COLOR_NORMAL; - if (ratio > 20.0) - color = PERF_COLOR_RED; - else if (ratio > 10.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 5.0) - color = PERF_COLOR_YELLOW; - - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%6.2f%%", ratio); - fprintf(stderr, " of all branches "); -} - -static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg) -{ - double total, ratio = 0.0; - const char *color; - - total = avg_stats(&runtime_l1_dcache_stats[cpu]); - - if (total) - ratio = avg / total * 100.0; - - color = PERF_COLOR_NORMAL; - if (ratio > 20.0) - color = PERF_COLOR_RED; - else if (ratio > 10.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 5.0) - color = PERF_COLOR_YELLOW; - - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%6.2f%%", ratio); - fprintf(stderr, " of all L1-dcache hits "); -} - -static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg) -{ - double total, ratio = 0.0; - const char *color; - - total = avg_stats(&runtime_l1_icache_stats[cpu]); - - if (total) - ratio = avg / total * 100.0; - - color = PERF_COLOR_NORMAL; - if (ratio > 20.0) - color = PERF_COLOR_RED; - else if (ratio > 10.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 5.0) - color = PERF_COLOR_YELLOW; - - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%6.2f%%", ratio); - fprintf(stderr, " of all L1-icache hits "); -} - -static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) -{ - double total, ratio = 0.0; - const char *color; - - total = avg_stats(&runtime_dtlb_cache_stats[cpu]); - - if (total) - ratio = avg / total * 100.0; - - color = PERF_COLOR_NORMAL; - if (ratio > 20.0) - color = PERF_COLOR_RED; - else if (ratio > 10.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 5.0) - color = PERF_COLOR_YELLOW; - - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%6.2f%%", ratio); - fprintf(stderr, " of all dTLB cache hits "); -} - -static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) -{ - double total, ratio = 0.0; - const char *color; - - total = avg_stats(&runtime_itlb_cache_stats[cpu]); - - if (total) - ratio = avg / total * 100.0; - - color = PERF_COLOR_NORMAL; - if (ratio > 20.0) - color = PERF_COLOR_RED; - else if (ratio > 10.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 5.0) - color = PERF_COLOR_YELLOW; - - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%6.2f%%", ratio); - fprintf(stderr, " of all iTLB cache hits "); -} - -static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) -{ - double total, ratio = 0.0; - const char *color; - - total = avg_stats(&runtime_ll_cache_stats[cpu]); - - if (total) - ratio = avg / total * 100.0; - - color = PERF_COLOR_NORMAL; - if (ratio > 20.0) - color = PERF_COLOR_RED; - else if (ratio > 10.0) - color = PERF_COLOR_MAGENTA; - else if (ratio > 5.0) - color = PERF_COLOR_YELLOW; - - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%6.2f%%", ratio); - fprintf(stderr, " of all LL-cache hits "); + fprintf(stderr, " # %10.3f CPUs ", + avg / avg_stats(&walltime_nsecs_stats)); } static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) @@ -743,9 +417,9 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (csv_output) fmt = "%s%.0f%s%s"; else if (big_num) - fmt = "%s%'18.0f%s%-25s"; + fmt = "%s%'18.0f%s%-24s"; else - fmt = "%s%18.0f%s%-25s"; + fmt = "%s%18.0f%s%-24s"; if (no_aggr) sprintf(cpustr, "CPU%*d%s", @@ -768,83 +442,23 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (total) ratio = avg / total; - fprintf(stderr, " # %5.2f insns per cycle ", ratio); - - total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]); - total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu])); - - if (total && avg) { - ratio = total / avg; - fprintf(stderr, "\n # %5.2f stalled cycles per insn", ratio); - } - + fprintf(stderr, " # %10.3f IPC ", ratio); } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && runtime_branches_stats[cpu].n != 0) { - print_branch_misses(cpu, evsel, avg); - } else if ( - evsel->attr.type == PERF_TYPE_HW_CACHE && - evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D | - ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | - ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && - runtime_l1_dcache_stats[cpu].n != 0) { - print_l1_dcache_misses(cpu, evsel, avg); - } else if ( - evsel->attr.type == PERF_TYPE_HW_CACHE && - evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I | - ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | - ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && - runtime_l1_icache_stats[cpu].n != 0) { - print_l1_icache_misses(cpu, evsel, avg); - } else if ( - evsel->attr.type == PERF_TYPE_HW_CACHE && - evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB | - ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | - ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && - runtime_dtlb_cache_stats[cpu].n != 0) { - print_dtlb_cache_misses(cpu, evsel, avg); - } else if ( - evsel->attr.type == PERF_TYPE_HW_CACHE && - evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB | - ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | - ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && - runtime_itlb_cache_stats[cpu].n != 0) { - print_itlb_cache_misses(cpu, evsel, avg); - } else if ( - evsel->attr.type == PERF_TYPE_HW_CACHE && - evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL | - ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | - ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && - runtime_ll_cache_stats[cpu].n != 0) { - print_ll_cache_misses(cpu, evsel, avg); - } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && - runtime_cacherefs_stats[cpu].n != 0) { - total = avg_stats(&runtime_cacherefs_stats[cpu]); + total = avg_stats(&runtime_branches_stats[cpu]); if (total) ratio = avg * 100 / total; - fprintf(stderr, " # %8.3f %% of all cache refs ", ratio); - - } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { - print_stalled_cycles_frontend(cpu, evsel, avg); - } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { - print_stalled_cycles_backend(cpu, evsel, avg); - } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { - total = avg_stats(&runtime_nsecs_stats[cpu]); + fprintf(stderr, " # %10.3f %% ", ratio); - if (total) - ratio = 1.0 * avg / total; - - fprintf(stderr, " # %8.3f GHz ", ratio); } else if (runtime_nsecs_stats[cpu].n != 0) { total = avg_stats(&runtime_nsecs_stats[cpu]); if (total) ratio = 1000.0 * avg / total; - fprintf(stderr, " # %8.3f M/sec ", ratio); - } else { - fprintf(stderr, " "); + fprintf(stderr, " # %10.3f M/sec", ratio); } } @@ -891,7 +505,8 @@ static void print_counter_aggr(struct perf_evsel *counter) avg_enabled = avg_stats(&ps->res_stats[1]); avg_running = avg_stats(&ps->res_stats[2]); - fprintf(stderr, " [%5.2f%%]", 100 * avg_running / avg_enabled); + fprintf(stderr, " (scaled from %.2f%%)", + 100 * avg_running / avg_enabled); } fprintf(stderr, "\n"); } @@ -933,8 +548,10 @@ static void print_counter(struct perf_evsel *counter) if (!csv_output) { print_noise(counter, 1.0); - if (run != ena) - fprintf(stderr, " (%.2f%%)", 100.0 * run / ena); + if (run != ena) { + fprintf(stderr, " (scaled from %.2f%%)", + 100.0 * run / ena); + } } fputc('\n', stderr); } @@ -974,14 +591,13 @@ static void print_stat(int argc, const char **argv) } if (!csv_output) { - if (!null_run) - fprintf(stderr, "\n"); - fprintf(stderr, " %17.9f seconds time elapsed", + fprintf(stderr, "\n"); + fprintf(stderr, " %18.9f seconds time elapsed", avg_stats(&walltime_nsecs_stats)/1e9); if (run_count > 1) { - fprintf(stderr, " "); - print_noise_pct(stddev_stats(&walltime_nsecs_stats), - avg_stats(&walltime_nsecs_stats)); + fprintf(stderr, " ( +- %7.3f%% )", + 100*stddev_stats(&walltime_nsecs_stats) / + avg_stats(&walltime_nsecs_stats)); } fprintf(stderr, "\n\n"); } @@ -1043,10 +659,6 @@ static const struct option options[] = { "repeat command and print average + stddev (max: 100)"), OPT_BOOLEAN('n', "null", &null_run, "null run - dont start any counters"), - OPT_INCR('d', "detailed", &detailed_run, - "detailed run - start a lot of events"), - OPT_BOOLEAN('S', "sync", &sync_run, - "call sync() before starting a run"), OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, "print large numbers with thousands\' separators", stat__set_big_num), @@ -1062,70 +674,6 @@ static const struct option options[] = { OPT_END() }; -/* - * Add default attributes, if there were no attributes specified or - * if -d/--detailed, -d -d or -d -d -d is used: - */ -static int add_default_attributes(void) -{ - struct perf_evsel *pos; - size_t attr_nr = 0; - size_t c; - - /* Set attrs if no event is selected and !null_run: */ - if (null_run) - return 0; - - if (!evsel_list->nr_entries) { - for (c = 0; c < ARRAY_SIZE(default_attrs); c++) { - pos = perf_evsel__new(default_attrs + c, c + attr_nr); - if (pos == NULL) - return -1; - perf_evlist__add(evsel_list, pos); - } - attr_nr += c; - } - - /* Detailed events get appended to the event list: */ - - if (detailed_run < 1) - return 0; - - /* Append detailed run extra attributes: */ - for (c = 0; c < ARRAY_SIZE(detailed_attrs); c++) { - pos = perf_evsel__new(detailed_attrs + c, c + attr_nr); - if (pos == NULL) - return -1; - perf_evlist__add(evsel_list, pos); - } - attr_nr += c; - - if (detailed_run < 2) - return 0; - - /* Append very detailed run extra attributes: */ - for (c = 0; c < ARRAY_SIZE(very_detailed_attrs); c++) { - pos = perf_evsel__new(very_detailed_attrs + c, c + attr_nr); - if (pos == NULL) - return -1; - perf_evlist__add(evsel_list, pos); - } - - if (detailed_run < 3) - return 0; - - /* Append very, very detailed run extra attributes: */ - for (c = 0; c < ARRAY_SIZE(very_very_detailed_attrs); c++) { - pos = perf_evsel__new(very_very_detailed_attrs + c, c + attr_nr); - if (pos == NULL) - return -1; - perf_evlist__add(evsel_list, pos); - } - - - return 0; -} - int cmd_stat(int argc, const char **argv, const char *prefix __used) { struct perf_evsel *pos; @@ -1171,8 +719,17 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) usage_with_options(stat_usage, options); } - if (add_default_attributes()) - goto out; + /* Set attrs and nr_counters if no event is selected and !null_run */ + if (!null_run && !evsel_list->nr_entries) { + size_t c; + + for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) { + pos = perf_evsel__new(&default_attrs[c], c); + if (pos == NULL) + goto out; + perf_evlist__add(evsel_list, pos); + } + } if (target_pid != -1) target_tid = target_pid; @@ -1216,10 +773,6 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) for (run_idx = 0; run_idx < run_count; run_idx++) { if (run_count != 1 && verbose) fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1); - - if (sync_run) - sync(); - status = run_perf_stat(argc, argv); } diff --git a/trunk/tools/perf/builtin-test.c b/trunk/tools/perf/builtin-test.c index 2f9a337b182f..11e3c8458362 100644 --- a/trunk/tools/perf/builtin-test.c +++ b/trunk/tools/perf/builtin-test.c @@ -549,7 +549,7 @@ static int test__basic_mmap(void) ++foo; } - while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { + while ((event = perf_evlist__read_on_cpu(evlist, 0)) != NULL) { struct perf_sample sample; if (event->header.type != PERF_RECORD_SAMPLE) { diff --git a/trunk/tools/perf/builtin-top.c b/trunk/tools/perf/builtin-top.c index ebfc7cf5f63b..7e3d6e310bf8 100644 --- a/trunk/tools/perf/builtin-top.c +++ b/trunk/tools/perf/builtin-top.c @@ -801,12 +801,12 @@ static void perf_event__process_sample(const union perf_event *event, } } -static void perf_session__mmap_read_idx(struct perf_session *self, int idx) +static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu) { struct perf_sample sample; union perf_event *event; - while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) { + while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) { perf_session__parse_sample(self, event, &sample); if (event->header.type == PERF_RECORD_SAMPLE) @@ -820,8 +820,8 @@ static void perf_session__mmap_read(struct perf_session *self) { int i; - for (i = 0; i < top.evlist->nr_mmaps; i++) - perf_session__mmap_read_idx(self, i); + for (i = 0; i < top.evlist->cpus->nr; i++) + perf_session__mmap_read_cpu(self, i); } static void start_counters(struct perf_evlist *evlist) diff --git a/trunk/tools/perf/config/utilities.mak b/trunk/tools/perf/config/utilities.mak deleted file mode 100644 index 8046182a19eb..000000000000 --- a/trunk/tools/perf/config/utilities.mak +++ /dev/null @@ -1,188 +0,0 @@ -# This allows us to work with the newline character: -define newline - - -endef -newline := $(newline) - -# nl-escape -# -# Usage: escape = $(call nl-escape[,escape]) -# -# This is used as the common way to specify -# what should replace a newline when escaping -# newlines; the default is a bizarre string. -# -nl-escape = $(or $(1),m822df3020w6a44id34bt574ctac44eb9f4n) - -# escape-nl -# -# Usage: escaped-text = $(call escape-nl,text[,escape]) -# -# GNU make's $(shell ...) function converts to a -# single space each newline character in the output -# produced during the expansion; this may not be -# desirable. -# -# The only solution is to change each newline into -# something that won't be converted, so that the -# information can be recovered later with -# $(call unescape-nl...) -# -escape-nl = $(subst $(newline),$(call nl-escape,$(2)),$(1)) - -# unescape-nl -# -# Usage: text = $(call unescape-nl,escaped-text[,escape]) -# -# See escape-nl. -# -unescape-nl = $(subst $(call nl-escape,$(2)),$(newline),$(1)) - -# shell-escape-nl -# -# Usage: $(shell some-command | $(call shell-escape-nl[,escape])) -# -# Use this to escape newlines from within a shell call; -# the default escape is a bizarre string. -# -# NOTE: The escape is used directly as a string constant -# in an `awk' program that is delimited by shell -# single-quotes, so be wary of the characters -# that are chosen. -# -define shell-escape-nl -awk 'NR==1 {t=$$0} NR>1 {t=t "$(nl-escape)" $$0} END {printf t}' -endef - -# shell-unescape-nl -# -# Usage: $(shell some-command | $(call shell-unescape-nl[,escape])) -# -# Use this to unescape newlines from within a shell call; -# the default escape is a bizarre string. -# -# NOTE: The escape is used directly as an extended regular -# expression constant in an `awk' program that is -# delimited by shell single-quotes, so be wary -# of the characters that are chosen. -# -# (The bash shell has a bug where `{gsub(...),...}' is -# misinterpreted as a brace expansion; this can be -# overcome by putting a space between `{' and `gsub'). -# -define shell-unescape-nl -awk 'NR==1 {t=$$0} NR>1 {t=t "\n" $$0} END { gsub(/$(nl-escape)/,"\n",t); printf t }' -endef - -# escape-for-shell-sq -# -# Usage: embeddable-text = $(call escape-for-shell-sq,text) -# -# This function produces text that is suitable for -# embedding in a shell string that is delimited by -# single-quotes. -# -escape-for-shell-sq = $(subst ','\'',$(1)) - -# shell-sq -# -# Usage: single-quoted-and-escaped-text = $(call shell-sq,text) -# -shell-sq = '$(escape-for-shell-sq)' - -# shell-wordify -# -# Usage: wordified-text = $(call shell-wordify,text) -# -# For instance: -# -# |define text -# |hello -# |world -# |endef -# | -# |target: -# | echo $(call shell-wordify,$(text)) -# -# At least GNU make gets confused by expanding a newline -# within the context of a command line of a makefile rule -# (this is in constrast to a `$(shell ...)' function call, -# which can handle it just fine). -# -# This function avoids the problem by producing a string -# that works as a shell word, regardless of whether or -# not it contains a newline. -# -# If the text to be wordified contains a newline, then -# an intrictate shell command substitution is constructed -# to render the text as a single line; when the shell -# processes the resulting escaped text, it transforms -# it into the original unescaped text. -# -# If the text does not contain a newline, then this function -# produces the same results as the `$(shell-sq)' function. -# -shell-wordify = $(if $(findstring $(newline),$(1)),$(_sw-esc-nl),$(shell-sq)) -define _sw-esc-nl -"$$(echo $(call escape-nl,$(shell-sq),$(2)) | $(call shell-unescape-nl,$(2)))" -endef - -# is-absolute -# -# Usage: bool-value = $(call is-absolute,path) -# -is-absolute = $(shell echo $(shell-sq) | grep ^/ -q && echo y) - -# lookup -# -# Usage: absolute-executable-path-or-empty = $(call lookup,path) -# -# (It's necessary to use `sh -c' because GNU make messes up by -# trying too hard and getting things wrong). -# -lookup = $(call unescape-nl,$(shell sh -c $(_l-sh))) -_l-sh = $(call shell-sq,command -v $(shell-sq) | $(call shell-escape-nl,)) - -# is-executable -# -# Usage: bool-value = $(call is-executable,path) -# -# (It's necessary to use `sh -c' because GNU make messes up by -# trying too hard and getting things wrong). -# -is-executable = $(call _is-executable-helper,$(shell-sq)) -_is-executable-helper = $(shell sh -c $(_is-executable-sh)) -_is-executable-sh = $(call shell-sq,test -f $(1) -a -x $(1) && echo y) - -# get-executable -# -# Usage: absolute-executable-path-or-empty = $(call get-executable,path) -# -# The goal is to get an absolute path for an executable; -# the `command -v' is defined by POSIX, but it's not -# necessarily very portable, so it's only used if -# relative path resolution is requested, as determined -# by the presence of a leading `/'. -# -get-executable = $(if $(1),$(if $(is-absolute),$(_ge-abspath),$(lookup))) -_ge-abspath = $(if $(is-executable),$(1)) - -# get-supplied-or-default-executable -# -# Usage: absolute-executable-path-or-empty = $(call get-executable-or-default,variable,default) -# -define get-executable-or-default -$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2))) -endef -_ge_attempt = $(or $(get-executable),$(_gea_warn),$(call _gea_err,$(2))) -_gea_warn = $(warning The path '$(1)' is not executable.) -_gea_err = $(if $(1),$(error Please set '$(1)' appropriately)) - -# try-cc -# Usage: option = $(call try-cc, source-to-build, cc-options) -try-cc = $(shell sh -c \ - 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \ - echo "$(1)" | \ - $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \ - rm -f "$$TMP"') diff --git a/trunk/tools/perf/config/feature-tests.mak b/trunk/tools/perf/feature-tests.mak similarity index 86% rename from trunk/tools/perf/config/feature-tests.mak rename to trunk/tools/perf/feature-tests.mak index 6170fd2531b5..b041ca67a2cb 100644 --- a/trunk/tools/perf/config/feature-tests.mak +++ b/trunk/tools/perf/feature-tests.mak @@ -79,15 +79,9 @@ endef endif ifndef NO_LIBPYTHON -define SOURCE_PYTHON_VERSION -#include -#if PY_VERSION_HEX >= 0x03000000 - #error -#endif -int main(void){} -endef define SOURCE_PYTHON_EMBED #include + int main(void) { Py_Initialize(); @@ -126,3 +120,11 @@ int main(void) return 0; } endef + +# try-cc +# Usage: option = $(call try-cc, source-to-build, cc-options) +try-cc = $(shell sh -c \ + 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \ + echo "$(1)" | \ + $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \ + rm -f "$$TMP"') diff --git a/trunk/tools/perf/util/evlist.c b/trunk/tools/perf/util/evlist.c index 23eb22b05d27..45da8d186b49 100644 --- a/trunk/tools/perf/util/evlist.c +++ b/trunk/tools/perf/util/evlist.c @@ -166,11 +166,11 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) return NULL; } -union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) +union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) { /* XXX Move this to perf.c, making it generally available */ unsigned int page_size = sysconf(_SC_PAGE_SIZE); - struct perf_mmap *md = &evlist->mmap[idx]; + struct perf_mmap *md = &evlist->mmap[cpu]; unsigned int head = perf_mmap__read_head(md); unsigned int old = md->prev; unsigned char *data = md->base + page_size; @@ -235,37 +235,31 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) void perf_evlist__munmap(struct perf_evlist *evlist) { - int i; + int cpu; - for (i = 0; i < evlist->nr_mmaps; i++) { - if (evlist->mmap[i].base != NULL) { - munmap(evlist->mmap[i].base, evlist->mmap_len); - evlist->mmap[i].base = NULL; + for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { + if (evlist->mmap[cpu].base != NULL) { + munmap(evlist->mmap[cpu].base, evlist->mmap_len); + evlist->mmap[cpu].base = NULL; } } - - free(evlist->mmap); - evlist->mmap = NULL; } int perf_evlist__alloc_mmap(struct perf_evlist *evlist) { - evlist->nr_mmaps = evlist->cpus->nr; - if (evlist->cpus->map[0] == -1) - evlist->nr_mmaps = evlist->threads->nr; - evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); + evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap)); return evlist->mmap != NULL ? 0 : -ENOMEM; } static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel, - int idx, int prot, int mask, int fd) + int cpu, int prot, int mask, int fd) { - evlist->mmap[idx].prev = 0; - evlist->mmap[idx].mask = mask; - evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, + evlist->mmap[cpu].prev = 0; + evlist->mmap[cpu].mask = mask; + evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, MAP_SHARED, fd, 0); - if (evlist->mmap[idx].base == MAP_FAILED) { - if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit) + if (evlist->mmap[cpu].base == MAP_FAILED) { + if (evlist->cpus->map[cpu] == -1 && evsel->attr.inherit) ui__warning("Inherit is not allowed on per-task " "events using mmap.\n"); return -1; @@ -275,86 +269,6 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev return 0; } -static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) -{ - struct perf_evsel *evsel; - int cpu, thread; - - for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { - int output = -1; - - for (thread = 0; thread < evlist->threads->nr; thread++) { - list_for_each_entry(evsel, &evlist->entries, node) { - int fd = FD(evsel, cpu, thread); - - if (output == -1) { - output = fd; - if (__perf_evlist__mmap(evlist, evsel, cpu, - prot, mask, output) < 0) - goto out_unmap; - } else { - if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) - goto out_unmap; - } - - if ((evsel->attr.read_format & PERF_FORMAT_ID) && - perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) - goto out_unmap; - } - } - } - - return 0; - -out_unmap: - for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { - if (evlist->mmap[cpu].base != NULL) { - munmap(evlist->mmap[cpu].base, evlist->mmap_len); - evlist->mmap[cpu].base = NULL; - } - } - return -1; -} - -static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) -{ - struct perf_evsel *evsel; - int thread; - - for (thread = 0; thread < evlist->threads->nr; thread++) { - int output = -1; - - list_for_each_entry(evsel, &evlist->entries, node) { - int fd = FD(evsel, 0, thread); - - if (output == -1) { - output = fd; - if (__perf_evlist__mmap(evlist, evsel, thread, - prot, mask, output) < 0) - goto out_unmap; - } else { - if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) - goto out_unmap; - } - - if ((evsel->attr.read_format & PERF_FORMAT_ID) && - perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) - goto out_unmap; - } - } - - return 0; - -out_unmap: - for (thread = 0; thread < evlist->threads->nr; thread++) { - if (evlist->mmap[thread].base != NULL) { - munmap(evlist->mmap[thread].base, evlist->mmap_len); - evlist->mmap[thread].base = NULL; - } - } - return -1; -} - /** perf_evlist__mmap - Create per cpu maps to receive events * * @evlist - list of events @@ -373,11 +287,11 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) { unsigned int page_size = sysconf(_SC_PAGE_SIZE); - int mask = pages * page_size - 1; - struct perf_evsel *evsel; + int mask = pages * page_size - 1, cpu; + struct perf_evsel *first_evsel, *evsel; const struct cpu_map *cpus = evlist->cpus; const struct thread_map *threads = evlist->threads; - int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); + int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) return -ENOMEM; @@ -387,18 +301,43 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) evlist->overwrite = overwrite; evlist->mmap_len = (pages + 1) * page_size; + first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node); list_for_each_entry(evsel, &evlist->entries, node) { if ((evsel->attr.read_format & PERF_FORMAT_ID) && evsel->sample_id == NULL && perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) return -ENOMEM; + + for (cpu = 0; cpu < cpus->nr; cpu++) { + for (thread = 0; thread < threads->nr; thread++) { + int fd = FD(evsel, cpu, thread); + + if (evsel->idx || thread) { + if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, + FD(first_evsel, cpu, 0)) != 0) + goto out_unmap; + } else if (__perf_evlist__mmap(evlist, evsel, cpu, + prot, mask, fd) < 0) + goto out_unmap; + + if ((evsel->attr.read_format & PERF_FORMAT_ID) && + perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) + goto out_unmap; + } + } } - if (evlist->cpus->map[0] == -1) - return perf_evlist__mmap_per_thread(evlist, prot, mask); + return 0; - return perf_evlist__mmap_per_cpu(evlist, prot, mask); +out_unmap: + for (cpu = 0; cpu < cpus->nr; cpu++) { + if (evlist->mmap[cpu].base != NULL) { + munmap(evlist->mmap[cpu].base, evlist->mmap_len); + evlist->mmap[cpu].base = NULL; + } + } + return -1; } int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, @@ -409,7 +348,7 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, if (evlist->threads == NULL) return -1; - if (cpu_list == NULL && target_tid != -1) + if (target_tid != -1) evlist->cpus = cpu_map__dummy_new(); else evlist->cpus = cpu_map__new(cpu_list); diff --git a/trunk/tools/perf/util/evlist.h b/trunk/tools/perf/util/evlist.h index 7109d7add14e..8b1cb7a4c5f1 100644 --- a/trunk/tools/perf/util/evlist.h +++ b/trunk/tools/perf/util/evlist.h @@ -17,7 +17,6 @@ struct perf_evlist { struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; int nr_entries; int nr_fds; - int nr_mmaps; int mmap_len; bool overwrite; union perf_event event_copy; @@ -47,7 +46,7 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); -union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); +union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu); int perf_evlist__alloc_mmap(struct perf_evlist *evlist); int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); diff --git a/trunk/tools/perf/util/include/asm/alternative-asm.h b/trunk/tools/perf/util/include/asm/alternative-asm.h deleted file mode 100644 index 6789d788d494..000000000000 --- a/trunk/tools/perf/util/include/asm/alternative-asm.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _PERF_ASM_ALTERNATIVE_ASM_H -#define _PERF_ASM_ALTERNATIVE_ASM_H - -/* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */ - -#define altinstruction_entry # - -#endif diff --git a/trunk/tools/perf/util/parse-events.c b/trunk/tools/perf/util/parse-events.c index 41982c373faf..952b4ae3d954 100644 --- a/trunk/tools/perf/util/parse-events.c +++ b/trunk/tools/perf/util/parse-events.c @@ -31,36 +31,34 @@ char debugfs_path[MAXPATHLEN]; #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x static struct event_symbol event_symbols[] = { - { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, - { CHW(STALLED_CYCLES_FRONTEND), "stalled-cycles-frontend", "idle-cycles-frontend" }, - { CHW(STALLED_CYCLES_BACKEND), "stalled-cycles-backend", "idle-cycles-backend" }, - { CHW(INSTRUCTIONS), "instructions", "" }, - { CHW(CACHE_REFERENCES), "cache-references", "" }, - { CHW(CACHE_MISSES), "cache-misses", "" }, - { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, - { CHW(BRANCH_MISSES), "branch-misses", "" }, - { CHW(BUS_CYCLES), "bus-cycles", "" }, - - { CSW(CPU_CLOCK), "cpu-clock", "" }, - { CSW(TASK_CLOCK), "task-clock", "" }, - { CSW(PAGE_FAULTS), "page-faults", "faults" }, - { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, - { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, - { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, - { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, - { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, - { CSW(EMULATION_FAULTS), "emulation-faults", "" }, + { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, + { CHW(INSTRUCTIONS), "instructions", "" }, + { CHW(CACHE_REFERENCES), "cache-references", "" }, + { CHW(CACHE_MISSES), "cache-misses", "" }, + { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, + { CHW(BRANCH_MISSES), "branch-misses", "" }, + { CHW(BUS_CYCLES), "bus-cycles", "" }, + + { CSW(CPU_CLOCK), "cpu-clock", "" }, + { CSW(TASK_CLOCK), "task-clock", "" }, + { CSW(PAGE_FAULTS), "page-faults", "faults" }, + { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, + { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, + { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, + { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, + { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, + { CSW(EMULATION_FAULTS), "emulation-faults", "" }, }; #define __PERF_EVENT_FIELD(config, name) \ ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) -#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) +#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) -#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) +#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) -static const char *hw_event_names[PERF_COUNT_HW_MAX] = { +static const char *hw_event_names[] = { "cycles", "instructions", "cache-references", @@ -68,13 +66,11 @@ static const char *hw_event_names[PERF_COUNT_HW_MAX] = { "branches", "branch-misses", "bus-cycles", - "stalled-cycles-frontend", - "stalled-cycles-backend", }; -static const char *sw_event_names[PERF_COUNT_SW_MAX] = { - "cpu-clock", - "task-clock", +static const char *sw_event_names[] = { + "cpu-clock-msecs", + "task-clock-msecs", "page-faults", "context-switches", "CPU-migrations", @@ -311,7 +307,7 @@ const char *__event_name(int type, u64 config) switch (type) { case PERF_TYPE_HARDWARE: - if (config < PERF_COUNT_HW_MAX && hw_event_names[config]) + if (config < PERF_COUNT_HW_MAX) return hw_event_names[config]; return "unknown-hardware"; @@ -337,7 +333,7 @@ const char *__event_name(int type, u64 config) } case PERF_TYPE_SOFTWARE: - if (config < PERF_COUNT_SW_MAX && sw_event_names[config]) + if (config < PERF_COUNT_SW_MAX) return sw_event_names[config]; return "unknown-software"; @@ -652,15 +648,13 @@ static int check_events(const char *str, unsigned int i) int n; n = strlen(event_symbols[i].symbol); - if (!strncasecmp(str, event_symbols[i].symbol, n)) + if (!strncmp(str, event_symbols[i].symbol, n)) return n; n = strlen(event_symbols[i].alias); - if (n) { - if (!strncasecmp(str, event_symbols[i].alias, n)) + if (n) + if (!strncmp(str, event_symbols[i].alias, n)) return n; - } - return 0; } @@ -724,22 +718,15 @@ parse_numeric_event(const char **strp, struct perf_event_attr *attr) return EVT_FAILED; } -static int +static enum event_result parse_event_modifier(const char **strp, struct perf_event_attr *attr) { const char *str = *strp; int exclude = 0; int eu = 0, ek = 0, eh = 0, precise = 0; - if (!*str) - return 0; - - if (*str == ',') - return 0; - if (*str++ != ':') - return -1; - + return 0; while (*str) { if (*str == 'u') { if (!exclude) @@ -760,16 +747,14 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr) ++str; } - if (str < *strp + 2) - return -1; - - *strp = str; - - attr->exclude_user = eu; - attr->exclude_kernel = ek; - attr->exclude_hv = eh; - attr->precise_ip = precise; - + if (str >= *strp + 2) { + *strp = str; + attr->exclude_user = eu; + attr->exclude_kernel = ek; + attr->exclude_hv = eh; + attr->precise_ip = precise; + return 1; + } return 0; } @@ -812,12 +797,7 @@ parse_event_symbols(const struct option *opt, const char **str, return EVT_FAILED; modifier: - if (parse_event_modifier(str, attr) < 0) { - fprintf(stderr, "invalid event modifier: '%s'\n", *str); - fprintf(stderr, "Run 'perf list' for a list of valid events and modifiers\n"); - - return EVT_FAILED; - } + parse_event_modifier(str, attr); return ret; } @@ -932,7 +912,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob) snprintf(evt_path, MAXPATHLEN, "%s:%s", sys_dirent.d_name, evt_dirent.d_name); - printf(" %-50s [%s]\n", evt_path, + printf(" %-42s [%s]\n", evt_path, event_type_descriptors[PERF_TYPE_TRACEPOINT]); } closedir(evt_dir); @@ -997,7 +977,7 @@ void print_events_type(u8 type) else snprintf(name, sizeof(name), "%s", syms->symbol); - printf(" %-50s [%s]\n", name, + printf(" %-42s [%s]\n", name, event_type_descriptors[type]); } } @@ -1015,10 +995,11 @@ int print_hwcache_events(const char *event_glob) for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { char *name = event_cache_name(type, op, i); - if (event_glob != NULL && !strglobmatch(name, event_glob)) + if (event_glob != NULL && + !strglobmatch(name, event_glob)) continue; - printf(" %-50s [%s]\n", name, + printf(" %-42s [%s]\n", name, event_type_descriptors[PERF_TYPE_HW_CACHE]); ++printed; } @@ -1028,16 +1009,14 @@ int print_hwcache_events(const char *event_glob) return printed; } -#define MAX_NAME_LEN 100 - /* * Print the help text for the event symbols: */ void print_events(const char *event_glob) { - unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0; struct event_symbol *syms = event_symbols; - char name[MAX_NAME_LEN]; + unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0; + char name[40]; printf("\n"); printf("List of pre-defined events (to be used in -e):\n"); @@ -1057,10 +1036,10 @@ void print_events(const char *event_glob) continue; if (strlen(syms->alias)) - snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); + sprintf(name, "%s OR %s", syms->symbol, syms->alias); else - strncpy(name, syms->symbol, MAX_NAME_LEN); - printf(" %-50s [%s]\n", name, + strcpy(name, syms->symbol); + printf(" %-42s [%s]\n", name, event_type_descriptors[type]); prev_type = type; @@ -1077,12 +1056,12 @@ void print_events(const char *event_glob) return; printf("\n"); - printf(" %-50s [%s]\n", + printf(" %-42s [%s]\n", "rNNN (see 'perf list --help' on how to encode it)", event_type_descriptors[PERF_TYPE_RAW]); printf("\n"); - printf(" %-50s [%s]\n", + printf(" %-42s [%s]\n", "mem:[:access]", event_type_descriptors[PERF_TYPE_BREAKPOINT]); printf("\n"); diff --git a/trunk/tools/perf/util/probe-finder.c b/trunk/tools/perf/util/probe-finder.c index 3b9d0b800d5c..b7c85ce466a1 100644 --- a/trunk/tools/perf/util/probe-finder.c +++ b/trunk/tools/perf/util/probe-finder.c @@ -1471,38 +1471,6 @@ static int find_probe_point_by_func(struct probe_finder *pf) return _param.retval; } -struct pubname_callback_param { - char *function; - char *file; - Dwarf_Die *cu_die; - Dwarf_Die *sp_die; - int found; -}; - -static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data) -{ - struct pubname_callback_param *param = data; - - if (dwarf_offdie(dbg, gl->die_offset, param->sp_die)) { - if (dwarf_tag(param->sp_die) != DW_TAG_subprogram) - return DWARF_CB_OK; - - if (die_compare_name(param->sp_die, param->function)) { - if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die)) - return DWARF_CB_OK; - - if (param->file && - strtailcmp(param->file, dwarf_decl_file(param->sp_die))) - return DWARF_CB_OK; - - param->found = 1; - return DWARF_CB_ABORT; - } - } - - return DWARF_CB_OK; -} - /* Find probe points from debuginfo */ static int find_probes(int fd, struct probe_finder *pf) { @@ -1530,28 +1498,6 @@ static int find_probes(int fd, struct probe_finder *pf) off = 0; line_list__init(&pf->lcache); - - /* Fastpath: lookup by function name from .debug_pubnames section */ - if (pp->function) { - struct pubname_callback_param pubname_param = { - .function = pp->function, - .file = pp->file, - .cu_die = &pf->cu_die, - .sp_die = &pf->sp_die, - .found = 0, - }; - struct dwarf_callback_param probe_param = { - .data = pf, - }; - - dwarf_getpubnames(dbg, pubname_search_cb, &pubname_param, 0); - if (pubname_param.found) { - ret = probe_point_search_cb(&pf->sp_die, &probe_param); - if (ret) - goto found; - } - } - /* Loop on CUs (Compilation Unit) */ while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) { /* Get the DIE(Debugging Information Entry) of this CU */ @@ -1579,8 +1525,6 @@ static int find_probes(int fd, struct probe_finder *pf) } off = noff; } - -found: line_list__free(&pf->lcache); if (dwfl) dwfl_end(dwfl); @@ -2002,22 +1946,6 @@ int find_line_range(int fd, struct line_range *lr) return -EBADF; } - /* Fastpath: lookup by function name from .debug_pubnames section */ - if (lr->function) { - struct pubname_callback_param pubname_param = { - .function = lr->function, .file = lr->file, - .cu_die = &lf.cu_die, .sp_die = &lf.sp_die, .found = 0}; - struct dwarf_callback_param line_range_param = { - .data = (void *)&lf, .retval = 0}; - - dwarf_getpubnames(dbg, pubname_search_cb, &pubname_param, 0); - if (pubname_param.found) { - line_range_search_cb(&lf.sp_die, &line_range_param); - if (lf.found) - goto found; - } - } - /* Loop on CUs (Compilation Unit) */ while (!lf.found && ret >= 0) { if (dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0) @@ -2046,7 +1974,6 @@ int find_line_range(int fd, struct line_range *lr) off = noff; } -found: /* Store comp_dir */ if (lf.found) { comp_dir = cu_get_comp_dir(&lf.cu_die); diff --git a/trunk/tools/perf/util/probe-finder.h b/trunk/tools/perf/util/probe-finder.h index 605730a366db..beaefc3c1223 100644 --- a/trunk/tools/perf/util/probe-finder.h +++ b/trunk/tools/perf/util/probe-finder.h @@ -49,7 +49,6 @@ struct probe_finder { Dwarf_Addr addr; /* Address */ const char *fname; /* Real file name */ Dwarf_Die cu_die; /* Current CU */ - Dwarf_Die sp_die; struct list_head lcache; /* Line cache for lazy match */ /* For variable searching */ @@ -84,7 +83,6 @@ struct line_finder { int lno_s; /* Start line number */ int lno_e; /* End line number */ Dwarf_Die cu_die; /* Current CU */ - Dwarf_Die sp_die; int found; }; diff --git a/trunk/tools/perf/util/python.c b/trunk/tools/perf/util/python.c index b5c7d818001c..f5e38451fdc5 100644 --- a/trunk/tools/perf/util/python.c +++ b/trunk/tools/perf/util/python.c @@ -680,7 +680,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, &cpu, &sample_id_all)) return NULL; - event = perf_evlist__mmap_read(evlist, cpu); + event = perf_evlist__read_on_cpu(evlist, cpu); if (event != NULL) { struct perf_evsel *first; PyObject *pyevent = pyrf_event__new(event); @@ -810,9 +810,6 @@ static struct { { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS }, { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS }, - { "COUNT_HW_STALLED_CYCLES_FRONTEND", PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, - { "COUNT_HW_STALLED_CYCLES_BACKEND", PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, - { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK }, { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK }, { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS }, diff --git a/trunk/tools/perf/util/session.c b/trunk/tools/perf/util/session.c index fff66741f18d..caa224522fea 100644 --- a/trunk/tools/perf/util/session.c +++ b/trunk/tools/perf/util/session.c @@ -1156,18 +1156,6 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) return ret; } -struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, - unsigned int type) -{ - struct perf_evsel *pos; - - list_for_each_entry(pos, &session->evlist->entries, node) { - if (pos->attr.type == type) - return pos; - } - return NULL; -} - void perf_session__print_symbols(union perf_event *event, struct perf_sample *sample, struct perf_session *session) diff --git a/trunk/tools/perf/util/session.h b/trunk/tools/perf/util/session.h index 8daaa2d15396..1ac481fc1100 100644 --- a/trunk/tools/perf/util/session.h +++ b/trunk/tools/perf/util/session.h @@ -162,9 +162,6 @@ static inline int perf_session__parse_sample(struct perf_session *session, session->sample_id_all, sample); } -struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, - unsigned int type); - void perf_session__print_symbols(union perf_event *event, struct perf_sample *sample, struct perf_session *session); diff --git a/trunk/tools/perf/util/symbol.c b/trunk/tools/perf/util/symbol.c index 516876dfbe52..f06c10f092ba 100644 --- a/trunk/tools/perf/util/symbol.c +++ b/trunk/tools/perf/util/symbol.c @@ -31,13 +31,13 @@ #define NT_GNU_BUILD_ID 3 #endif -static bool dso__build_id_equal(const struct dso *dso, u8 *build_id); +static bool dso__build_id_equal(const struct dso *self, u8 *build_id); static int elf_read_build_id(Elf *elf, void *bf, size_t size); static void dsos__add(struct list_head *head, struct dso *dso); static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); -static int dso__load_kernel_sym(struct dso *dso, struct map *map, +static int dso__load_kernel_sym(struct dso *self, struct map *map, symbol_filter_t filter); -static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, +static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, symbol_filter_t filter); static int vmlinux_path__nr_entries; static char **vmlinux_path; @@ -49,27 +49,27 @@ struct symbol_conf symbol_conf = { .symfs = "", }; -int dso__name_len(const struct dso *dso) +int dso__name_len(const struct dso *self) { if (verbose) - return dso->long_name_len; + return self->long_name_len; - return dso->short_name_len; + return self->short_name_len; } -bool dso__loaded(const struct dso *dso, enum map_type type) +bool dso__loaded(const struct dso *self, enum map_type type) { - return dso->loaded & (1 << type); + return self->loaded & (1 << type); } -bool dso__sorted_by_name(const struct dso *dso, enum map_type type) +bool dso__sorted_by_name(const struct dso *self, enum map_type type) { - return dso->sorted_by_name & (1 << type); + return self->sorted_by_name & (1 << type); } -static void dso__set_sorted_by_name(struct dso *dso, enum map_type type) +static void dso__set_sorted_by_name(struct dso *self, enum map_type type) { - dso->sorted_by_name |= (1 << type); + self->sorted_by_name |= (1 << type); } bool symbol_type__is_a(char symbol_type, enum map_type map_type) @@ -84,9 +84,9 @@ bool symbol_type__is_a(char symbol_type, enum map_type map_type) } } -static void symbols__fixup_end(struct rb_root *symbols) +static void symbols__fixup_end(struct rb_root *self) { - struct rb_node *nd, *prevnd = rb_first(symbols); + struct rb_node *nd, *prevnd = rb_first(self); struct symbol *curr, *prev; if (prevnd == NULL) @@ -107,10 +107,10 @@ static void symbols__fixup_end(struct rb_root *symbols) curr->end = roundup(curr->start, 4096); } -static void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) +static void __map_groups__fixup_end(struct map_groups *self, enum map_type type) { struct map *prev, *curr; - struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]); + struct rb_node *nd, *prevnd = rb_first(&self->maps[type]); if (prevnd == NULL) return; @@ -130,128 +130,128 @@ static void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) curr->end = ~0ULL; } -static void map_groups__fixup_end(struct map_groups *mg) +static void map_groups__fixup_end(struct map_groups *self) { int i; for (i = 0; i < MAP__NR_TYPES; ++i) - __map_groups__fixup_end(mg, i); + __map_groups__fixup_end(self, i); } static struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) { size_t namelen = strlen(name) + 1; - struct symbol *sym = calloc(1, (symbol_conf.priv_size + - sizeof(*sym) + namelen)); - if (sym == NULL) + struct symbol *self = calloc(1, (symbol_conf.priv_size + + sizeof(*self) + namelen)); + if (self == NULL) return NULL; if (symbol_conf.priv_size) - sym = ((void *)sym) + symbol_conf.priv_size; + self = ((void *)self) + symbol_conf.priv_size; + + self->start = start; + self->end = len ? start + len - 1 : start; + self->binding = binding; + self->namelen = namelen - 1; - sym->start = start; - sym->end = len ? start + len - 1 : start; - sym->binding = binding; - sym->namelen = namelen - 1; + pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", __func__, name, start, self->end); - pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", - __func__, name, start, sym->end); - memcpy(sym->name, name, namelen); + memcpy(self->name, name, namelen); - return sym; + return self; } -void symbol__delete(struct symbol *sym) +void symbol__delete(struct symbol *self) { - free(((void *)sym) - symbol_conf.priv_size); + free(((void *)self) - symbol_conf.priv_size); } -static size_t symbol__fprintf(struct symbol *sym, FILE *fp) +static size_t symbol__fprintf(struct symbol *self, FILE *fp) { return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n", - sym->start, sym->end, - sym->binding == STB_GLOBAL ? 'g' : - sym->binding == STB_LOCAL ? 'l' : 'w', - sym->name); + self->start, self->end, + self->binding == STB_GLOBAL ? 'g' : + self->binding == STB_LOCAL ? 'l' : 'w', + self->name); } -void dso__set_long_name(struct dso *dso, char *name) +void dso__set_long_name(struct dso *self, char *name) { if (name == NULL) return; - dso->long_name = name; - dso->long_name_len = strlen(name); + self->long_name = name; + self->long_name_len = strlen(name); } -static void dso__set_short_name(struct dso *dso, const char *name) +static void dso__set_short_name(struct dso *self, const char *name) { if (name == NULL) return; - dso->short_name = name; - dso->short_name_len = strlen(name); + self->short_name = name; + self->short_name_len = strlen(name); } -static void dso__set_basename(struct dso *dso) +static void dso__set_basename(struct dso *self) { - dso__set_short_name(dso, basename(dso->long_name)); + dso__set_short_name(self, basename(self->long_name)); } struct dso *dso__new(const char *name) { - struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); + struct dso *self = calloc(1, sizeof(*self) + strlen(name) + 1); - if (dso != NULL) { + if (self != NULL) { int i; - strcpy(dso->name, name); - dso__set_long_name(dso, dso->name); - dso__set_short_name(dso, dso->name); + strcpy(self->name, name); + dso__set_long_name(self, self->name); + dso__set_short_name(self, self->name); for (i = 0; i < MAP__NR_TYPES; ++i) - dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; - dso->symtab_type = SYMTAB__NOT_FOUND; - dso->loaded = 0; - dso->sorted_by_name = 0; - dso->has_build_id = 0; - dso->kernel = DSO_TYPE_USER; - INIT_LIST_HEAD(&dso->node); + self->symbols[i] = self->symbol_names[i] = RB_ROOT; + self->symtab_type = SYMTAB__NOT_FOUND; + self->loaded = 0; + self->sorted_by_name = 0; + self->has_build_id = 0; + self->kernel = DSO_TYPE_USER; + INIT_LIST_HEAD(&self->node); } - return dso; + return self; } -static void symbols__delete(struct rb_root *symbols) +static void symbols__delete(struct rb_root *self) { struct symbol *pos; - struct rb_node *next = rb_first(symbols); + struct rb_node *next = rb_first(self); while (next) { pos = rb_entry(next, struct symbol, rb_node); next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, symbols); + rb_erase(&pos->rb_node, self); symbol__delete(pos); } } -void dso__delete(struct dso *dso) +void dso__delete(struct dso *self) { int i; for (i = 0; i < MAP__NR_TYPES; ++i) - symbols__delete(&dso->symbols[i]); - if (dso->sname_alloc) - free((char *)dso->short_name); - if (dso->lname_alloc) - free(dso->long_name); - free(dso); + symbols__delete(&self->symbols[i]); + if (self->sname_alloc) + free((char *)self->short_name); + if (self->lname_alloc) + free(self->long_name); + free(self); } -void dso__set_build_id(struct dso *dso, void *build_id) +void dso__set_build_id(struct dso *self, void *build_id) { - memcpy(dso->build_id, build_id, sizeof(dso->build_id)); - dso->has_build_id = 1; + memcpy(self->build_id, build_id, sizeof(self->build_id)); + self->has_build_id = 1; } -static void symbols__insert(struct rb_root *symbols, struct symbol *sym) +static void symbols__insert(struct rb_root *self, struct symbol *sym) { - struct rb_node **p = &symbols->rb_node; + struct rb_node **p = &self->rb_node; struct rb_node *parent = NULL; const u64 ip = sym->start; struct symbol *s; @@ -265,17 +265,17 @@ static void symbols__insert(struct rb_root *symbols, struct symbol *sym) p = &(*p)->rb_right; } rb_link_node(&sym->rb_node, parent, p); - rb_insert_color(&sym->rb_node, symbols); + rb_insert_color(&sym->rb_node, self); } -static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) +static struct symbol *symbols__find(struct rb_root *self, u64 ip) { struct rb_node *n; - if (symbols == NULL) + if (self == NULL) return NULL; - n = symbols->rb_node; + n = self->rb_node; while (n) { struct symbol *s = rb_entry(n, struct symbol, rb_node); @@ -296,9 +296,9 @@ struct symbol_name_rb_node { struct symbol sym; }; -static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym) +static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym) { - struct rb_node **p = &symbols->rb_node; + struct rb_node **p = &self->rb_node; struct rb_node *parent = NULL; struct symbol_name_rb_node *symn, *s; @@ -313,29 +313,27 @@ static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym) p = &(*p)->rb_right; } rb_link_node(&symn->rb_node, parent, p); - rb_insert_color(&symn->rb_node, symbols); + rb_insert_color(&symn->rb_node, self); } -static void symbols__sort_by_name(struct rb_root *symbols, - struct rb_root *source) +static void symbols__sort_by_name(struct rb_root *self, struct rb_root *source) { struct rb_node *nd; for (nd = rb_first(source); nd; nd = rb_next(nd)) { struct symbol *pos = rb_entry(nd, struct symbol, rb_node); - symbols__insert_by_name(symbols, pos); + symbols__insert_by_name(self, pos); } } -static struct symbol *symbols__find_by_name(struct rb_root *symbols, - const char *name) +static struct symbol *symbols__find_by_name(struct rb_root *self, const char *name) { struct rb_node *n; - if (symbols == NULL) + if (self == NULL) return NULL; - n = symbols->rb_node; + n = self->rb_node; while (n) { struct symbol_name_rb_node *s; @@ -355,29 +353,29 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, return NULL; } -struct symbol *dso__find_symbol(struct dso *dso, +struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr) { - return symbols__find(&dso->symbols[type], addr); + return symbols__find(&self->symbols[type], addr); } -struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, +struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, const char *name) { - return symbols__find_by_name(&dso->symbol_names[type], name); + return symbols__find_by_name(&self->symbol_names[type], name); } -void dso__sort_by_name(struct dso *dso, enum map_type type) +void dso__sort_by_name(struct dso *self, enum map_type type) { - dso__set_sorted_by_name(dso, type); - return symbols__sort_by_name(&dso->symbol_names[type], - &dso->symbols[type]); + dso__set_sorted_by_name(self, type); + return symbols__sort_by_name(&self->symbol_names[type], + &self->symbols[type]); } -int build_id__sprintf(const u8 *build_id, int len, char *bf) +int build_id__sprintf(const u8 *self, int len, char *bf) { char *bid = bf; - const u8 *raw = build_id; + const u8 *raw = self; int i; for (i = 0; i < len; ++i) { @@ -386,25 +384,24 @@ int build_id__sprintf(const u8 *build_id, int len, char *bf) bid += 2; } - return raw - build_id; + return raw - self; } -size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) +size_t dso__fprintf_buildid(struct dso *self, FILE *fp) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; - build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); + build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); return fprintf(fp, "%s", sbuild_id); } -size_t dso__fprintf_symbols_by_name(struct dso *dso, - enum map_type type, FILE *fp) +size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp) { size_t ret = 0; struct rb_node *nd; struct symbol_name_rb_node *pos; - for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) { + for (nd = rb_first(&self->symbol_names[type]); nd; nd = rb_next(nd)) { pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); fprintf(fp, "%s\n", pos->sym.name); } @@ -412,18 +409,18 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso, return ret; } -size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) +size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp) { struct rb_node *nd; - size_t ret = fprintf(fp, "dso: %s (", dso->short_name); + size_t ret = fprintf(fp, "dso: %s (", self->short_name); - if (dso->short_name != dso->long_name) - ret += fprintf(fp, "%s, ", dso->long_name); + if (self->short_name != self->long_name) + ret += fprintf(fp, "%s, ", self->long_name); ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], - dso->loaded ? "" : "NOT "); - ret += dso__fprintf_buildid(dso, fp); + self->loaded ? "" : "NOT "); + ret += dso__fprintf_buildid(self, fp); ret += fprintf(fp, ")\n"); - for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { + for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) { struct symbol *pos = rb_entry(nd, struct symbol, rb_node); ret += symbol__fprintf(pos, fp); } @@ -546,10 +543,10 @@ static int map__process_kallsym_symbol(void *arg, const char *name, * so that we can in the next step set the symbol ->end address and then * call kernel_maps__split_kallsyms. */ -static int dso__load_all_kallsyms(struct dso *dso, const char *filename, +static int dso__load_all_kallsyms(struct dso *self, const char *filename, struct map *map) { - struct process_kallsyms_args args = { .map = map, .dso = dso, }; + struct process_kallsyms_args args = { .map = map, .dso = self, }; return kallsyms__parse(filename, &args, map__process_kallsym_symbol); } @@ -558,7 +555,7 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename, * kernel range is broken in several maps, named [kernel].N, as we don't have * the original ELF section names vmlinux have. */ -static int dso__split_kallsyms(struct dso *dso, struct map *map, +static int dso__split_kallsyms(struct dso *self, struct map *map, symbol_filter_t filter) { struct map_groups *kmaps = map__kmap(map)->kmaps; @@ -566,7 +563,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, struct map *curr_map = map; struct symbol *pos; int count = 0, moved = 0; - struct rb_root *root = &dso->symbols[map->type]; + struct rb_root *root = &self->symbols[map->type]; struct rb_node *next = rb_first(root); int kernel_range = 0; @@ -585,7 +582,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, if (strcmp(curr_map->dso->short_name, module)) { if (curr_map != map && - dso->kernel == DSO_TYPE_GUEST_KERNEL && + self->kernel == DSO_TYPE_GUEST_KERNEL && machine__is_default_guest(machine)) { /* * We assume all symbols of a module are @@ -621,14 +618,14 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, pos->end = curr_map->map_ip(curr_map, pos->end); } else if (curr_map != map) { char dso_name[PATH_MAX]; - struct dso *ndso; + struct dso *dso; if (count == 0) { curr_map = map; goto filter_symbol; } - if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + if (self->kernel == DSO_TYPE_GUEST_KERNEL) snprintf(dso_name, sizeof(dso_name), "[guest.kernel].%d", kernel_range++); @@ -637,15 +634,15 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, "[kernel].%d", kernel_range++); - ndso = dso__new(dso_name); - if (ndso == NULL) + dso = dso__new(dso_name); + if (dso == NULL) return -1; - ndso->kernel = dso->kernel; + dso->kernel = self->kernel; - curr_map = map__new2(pos->start, ndso, map->type); + curr_map = map__new2(pos->start, dso, map->type); if (curr_map == NULL) { - dso__delete(ndso); + dso__delete(dso); return -1; } @@ -668,7 +665,7 @@ discard_symbol: rb_erase(&pos->rb_node, root); } if (curr_map != map && - dso->kernel == DSO_TYPE_GUEST_KERNEL && + self->kernel == DSO_TYPE_GUEST_KERNEL && machine__is_default_guest(kmaps->machine)) { dso__set_loaded(curr_map->dso, curr_map->type); } @@ -676,21 +673,21 @@ discard_symbol: rb_erase(&pos->rb_node, root); return count + moved; } -int dso__load_kallsyms(struct dso *dso, const char *filename, +int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map, symbol_filter_t filter) { - if (dso__load_all_kallsyms(dso, filename, map) < 0) + if (dso__load_all_kallsyms(self, filename, map) < 0) return -1; - if (dso->kernel == DSO_TYPE_GUEST_KERNEL) - dso->symtab_type = SYMTAB__GUEST_KALLSYMS; + if (self->kernel == DSO_TYPE_GUEST_KERNEL) + self->symtab_type = SYMTAB__GUEST_KALLSYMS; else - dso->symtab_type = SYMTAB__KALLSYMS; + self->symtab_type = SYMTAB__KALLSYMS; - return dso__split_kallsyms(dso, map, filter); + return dso__split_kallsyms(self, map, filter); } -static int dso__load_perf_map(struct dso *dso, struct map *map, +static int dso__load_perf_map(struct dso *self, struct map *map, symbol_filter_t filter) { char *line = NULL; @@ -698,7 +695,7 @@ static int dso__load_perf_map(struct dso *dso, struct map *map, FILE *file; int nr_syms = 0; - file = fopen(dso->long_name, "r"); + file = fopen(self->long_name, "r"); if (file == NULL) goto out_failure; @@ -736,7 +733,7 @@ static int dso__load_perf_map(struct dso *dso, struct map *map, if (filter && filter(map, sym)) symbol__delete(sym); else { - symbols__insert(&dso->symbols[map->type], sym); + symbols__insert(&self->symbols[map->type], sym); nr_syms++; } } @@ -755,7 +752,7 @@ static int dso__load_perf_map(struct dso *dso, struct map *map, /** * elf_symtab__for_each_symbol - iterate thru all the symbols * - * @syms: struct elf_symtab instance to iterate + * @self: struct elf_symtab instance to iterate * @idx: uint32_t idx * @sym: GElf_Sym iterator */ @@ -855,7 +852,7 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, * And always look at the original dso, not at debuginfo packages, that * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). */ -static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, +static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, symbol_filter_t filter) { uint32_t nr_rel_entries, idx; @@ -874,7 +871,7 @@ static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, char name[PATH_MAX]; snprintf(name, sizeof(name), "%s%s", - symbol_conf.symfs, dso->long_name); + symbol_conf.symfs, self->long_name); fd = open(name, O_RDONLY); if (fd < 0) goto out; @@ -950,7 +947,7 @@ static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, if (filter && filter(map, f)) symbol__delete(f); else { - symbols__insert(&dso->symbols[map->type], f); + symbols__insert(&self->symbols[map->type], f); ++nr; } } @@ -972,7 +969,7 @@ static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, if (filter && filter(map, f)) symbol__delete(f); else { - symbols__insert(&dso->symbols[map->type], f); + symbols__insert(&self->symbols[map->type], f); ++nr; } } @@ -988,30 +985,29 @@ static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, return nr; out: pr_debug("%s: problems reading %s PLT info.\n", - __func__, dso->long_name); + __func__, self->long_name); return 0; } -static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) +static bool elf_sym__is_a(GElf_Sym *self, enum map_type type) { switch (type) { case MAP__FUNCTION: - return elf_sym__is_function(sym); + return elf_sym__is_function(self); case MAP__VARIABLE: - return elf_sym__is_object(sym); + return elf_sym__is_object(self); default: return false; } } -static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, - enum map_type type) +static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type) { switch (type) { case MAP__FUNCTION: - return elf_sec__is_text(shdr, secstrs); + return elf_sec__is_text(self, secstrs); case MAP__VARIABLE: - return elf_sec__is_data(shdr, secstrs); + return elf_sec__is_data(self, secstrs); default: return false; } @@ -1036,13 +1032,13 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) return -1; } -static int dso__load_sym(struct dso *dso, struct map *map, const char *name, +static int dso__load_sym(struct dso *self, struct map *map, const char *name, int fd, symbol_filter_t filter, int kmodule, int want_symtab) { - struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; + struct kmap *kmap = self->kernel ? map__kmap(map) : NULL; struct map *curr_map = map; - struct dso *curr_dso = dso; + struct dso *curr_dso = self; Elf_Data *symstrs, *secstrs; uint32_t nr_syms; int err = -1; @@ -1068,14 +1064,14 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, } /* Always reject images with a mismatched build-id: */ - if (dso->has_build_id) { + if (self->has_build_id) { u8 build_id[BUILD_ID_SIZE]; if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) != BUILD_ID_SIZE) goto out_elf_end; - if (!dso__build_id_equal(dso, build_id)) + if (!dso__build_id_equal(self, build_id)) goto out_elf_end; } @@ -1116,14 +1112,13 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, nr_syms = shdr.sh_size / shdr.sh_entsize; memset(&sym, 0, sizeof(sym)); - if (dso->kernel == DSO_TYPE_USER) { - dso->adjust_symbols = (ehdr.e_type == ET_EXEC || + if (self->kernel == DSO_TYPE_USER) { + self->adjust_symbols = (ehdr.e_type == ET_EXEC || elf_section_by_name(elf, &ehdr, &shdr, ".gnu.prelink_undo", NULL) != NULL); - } else { - dso->adjust_symbols = 0; - } + } else self->adjust_symbols = 0; + elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { struct symbol *f; const char *elf_name = elf_sym__name(&sym, symstrs); @@ -1173,22 +1168,22 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, (sym.st_value & 1)) --sym.st_value; - if (dso->kernel != DSO_TYPE_USER || kmodule) { + if (self->kernel != DSO_TYPE_USER || kmodule) { char dso_name[PATH_MAX]; if (strcmp(section_name, (curr_dso->short_name + - dso->short_name_len)) == 0) + self->short_name_len)) == 0) goto new_symbol; if (strcmp(section_name, ".text") == 0) { curr_map = map; - curr_dso = dso; + curr_dso = self; goto new_symbol; } snprintf(dso_name, sizeof(dso_name), - "%s%s", dso->short_name, section_name); + "%s%s", self->short_name, section_name); curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); if (curr_map == NULL) { @@ -1200,9 +1195,9 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, curr_dso = dso__new(dso_name); if (curr_dso == NULL) goto out_elf_end; - curr_dso->kernel = dso->kernel; - curr_dso->long_name = dso->long_name; - curr_dso->long_name_len = dso->long_name_len; + curr_dso->kernel = self->kernel; + curr_dso->long_name = self->long_name; + curr_dso->long_name_len = self->long_name_len; curr_map = map__new2(start, curr_dso, map->type); if (curr_map == NULL) { @@ -1211,9 +1206,9 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, } curr_map->map_ip = identity__map_ip; curr_map->unmap_ip = identity__map_ip; - curr_dso->symtab_type = dso->symtab_type; + curr_dso->symtab_type = self->symtab_type; map_groups__insert(kmap->kmaps, curr_map); - dsos__add(&dso->node, curr_dso); + dsos__add(&self->node, curr_dso); dso__set_loaded(curr_dso, map->type); } else curr_dso = curr_map->dso; @@ -1255,7 +1250,7 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, * For misannotated, zeroed, ASM function sizes. */ if (nr > 0) { - symbols__fixup_end(&dso->symbols[map->type]); + symbols__fixup_end(&self->symbols[map->type]); if (kmap) { /* * We need to fixup this here too because we create new @@ -1271,9 +1266,9 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, return err; } -static bool dso__build_id_equal(const struct dso *dso, u8 *build_id) +static bool dso__build_id_equal(const struct dso *self, u8 *build_id) { - return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; + return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0; } bool __dsos__read_build_ids(struct list_head *head, bool with_hits) @@ -1434,7 +1429,7 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size) return err; } -char dso__symtab_origin(const struct dso *dso) +char dso__symtab_origin(const struct dso *self) { static const char origin[] = { [SYMTAB__KALLSYMS] = 'k', @@ -1449,12 +1444,12 @@ char dso__symtab_origin(const struct dso *dso) [SYMTAB__GUEST_KMODULE] = 'G', }; - if (dso == NULL || dso->symtab_type == SYMTAB__NOT_FOUND) + if (self == NULL || self->symtab_type == SYMTAB__NOT_FOUND) return '!'; - return origin[dso->symtab_type]; + return origin[self->symtab_type]; } -int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) { int size = PATH_MAX; char *name; @@ -1464,12 +1459,12 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) const char *root_dir; int want_symtab; - dso__set_loaded(dso, map->type); + dso__set_loaded(self, map->type); - if (dso->kernel == DSO_TYPE_KERNEL) - return dso__load_kernel_sym(dso, map, filter); - else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) - return dso__load_guest_kernel_sym(dso, map, filter); + if (self->kernel == DSO_TYPE_KERNEL) + return dso__load_kernel_sym(self, map, filter); + else if (self->kernel == DSO_TYPE_GUEST_KERNEL) + return dso__load_guest_kernel_sym(self, map, filter); if (map->groups && map->groups->machine) machine = map->groups->machine; @@ -1480,11 +1475,11 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) if (!name) return -1; - dso->adjust_symbols = 0; + self->adjust_symbols = 0; - if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { - ret = dso__load_perf_map(dso, map, filter); - dso->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT : + if (strncmp(self->name, "/tmp/perf-", 10) == 0) { + ret = dso__load_perf_map(self, map, filter); + self->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT : SYMTAB__NOT_FOUND; return ret; } @@ -1495,33 +1490,33 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) */ want_symtab = 1; restart: - for (dso->symtab_type = SYMTAB__BUILD_ID_CACHE; - dso->symtab_type != SYMTAB__NOT_FOUND; - dso->symtab_type++) { - switch (dso->symtab_type) { + for (self->symtab_type = SYMTAB__BUILD_ID_CACHE; + self->symtab_type != SYMTAB__NOT_FOUND; + self->symtab_type++) { + switch (self->symtab_type) { case SYMTAB__BUILD_ID_CACHE: /* skip the locally configured cache if a symfs is given */ if (symbol_conf.symfs[0] || - (dso__build_id_filename(dso, name, size) == NULL)) { + (dso__build_id_filename(self, name, size) == NULL)) { continue; } break; case SYMTAB__FEDORA_DEBUGINFO: snprintf(name, size, "%s/usr/lib/debug%s.debug", - symbol_conf.symfs, dso->long_name); + symbol_conf.symfs, self->long_name); break; case SYMTAB__UBUNTU_DEBUGINFO: snprintf(name, size, "%s/usr/lib/debug%s", - symbol_conf.symfs, dso->long_name); + symbol_conf.symfs, self->long_name); break; case SYMTAB__BUILDID_DEBUGINFO: { char build_id_hex[BUILD_ID_SIZE * 2 + 1]; - if (!dso->has_build_id) + if (!self->has_build_id) continue; - build_id__sprintf(dso->build_id, - sizeof(dso->build_id), + build_id__sprintf(self->build_id, + sizeof(self->build_id), build_id_hex); snprintf(name, size, "%s/usr/lib/debug/.build-id/%.2s/%s.debug", @@ -1530,7 +1525,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) break; case SYMTAB__SYSTEM_PATH_DSO: snprintf(name, size, "%s%s", - symbol_conf.symfs, dso->long_name); + symbol_conf.symfs, self->long_name); break; case SYMTAB__GUEST_KMODULE: if (map->groups && machine) @@ -1538,12 +1533,12 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) else root_dir = ""; snprintf(name, size, "%s%s%s", symbol_conf.symfs, - root_dir, dso->long_name); + root_dir, self->long_name); break; case SYMTAB__SYSTEM_PATH_KMODULE: snprintf(name, size, "%s%s", symbol_conf.symfs, - dso->long_name); + self->long_name); break; default:; } @@ -1553,7 +1548,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) if (fd < 0) continue; - ret = dso__load_sym(dso, map, name, fd, filter, 0, + ret = dso__load_sym(self, map, name, fd, filter, 0, want_symtab); close(fd); @@ -1565,8 +1560,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) continue; if (ret > 0) { - int nr_plt = dso__synthesize_plt_symbols(dso, map, - filter); + int nr_plt = dso__synthesize_plt_symbols(self, map, filter); if (nr_plt > 0) ret += nr_plt; break; @@ -1583,17 +1577,17 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) } free(name); - if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) + if (ret < 0 && strstr(self->name, " (deleted)") != NULL) return 0; return ret; } -struct map *map_groups__find_by_name(struct map_groups *mg, +struct map *map_groups__find_by_name(struct map_groups *self, enum map_type type, const char *name) { struct rb_node *nd; - for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { + for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { struct map *map = rb_entry(nd, struct map, rb_node); if (map->dso && strcmp(map->dso->short_name, name) == 0) @@ -1603,28 +1597,28 @@ struct map *map_groups__find_by_name(struct map_groups *mg, return NULL; } -static int dso__kernel_module_get_build_id(struct dso *dso, - const char *root_dir) +static int dso__kernel_module_get_build_id(struct dso *self, + const char *root_dir) { char filename[PATH_MAX]; /* * kernel module short names are of the form "[module]" and * we need just "module" here. */ - const char *name = dso->short_name + 1; + const char *name = self->short_name + 1; snprintf(filename, sizeof(filename), "%s/sys/module/%.*s/notes/.note.gnu.build-id", root_dir, (int)strlen(name) - 1, name); - if (sysfs__read_build_id(filename, dso->build_id, - sizeof(dso->build_id)) == 0) - dso->has_build_id = true; + if (sysfs__read_build_id(filename, self->build_id, + sizeof(self->build_id)) == 0) + self->has_build_id = true; return 0; } -static int map_groups__set_modules_path_dir(struct map_groups *mg, +static int map_groups__set_modules_path_dir(struct map_groups *self, const char *dir_name) { struct dirent *dent; @@ -1652,7 +1646,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg, snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); - ret = map_groups__set_modules_path_dir(mg, path); + ret = map_groups__set_modules_path_dir(self, path); if (ret < 0) goto out; } else { @@ -1667,8 +1661,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg, (int)(dot - dent->d_name), dent->d_name); strxfrchar(dso_name, '-', '_'); - map = map_groups__find_by_name(mg, MAP__FUNCTION, - dso_name); + map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name); if (map == NULL) continue; @@ -1718,20 +1711,20 @@ static char *get_kernel_version(const char *root_dir) return strdup(name); } -static int machine__set_modules_path(struct machine *machine) +static int machine__set_modules_path(struct machine *self) { char *version; char modules_path[PATH_MAX]; - version = get_kernel_version(machine->root_dir); + version = get_kernel_version(self->root_dir); if (!version) return -1; snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", - machine->root_dir, version); + self->root_dir, version); free(version); - return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); + return map_groups__set_modules_path_dir(&self->kmaps, modules_path); } /* @@ -1741,23 +1734,23 @@ static int machine__set_modules_path(struct machine *machine) */ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) { - struct map *map = calloc(1, (sizeof(*map) + - (dso->kernel ? sizeof(struct kmap) : 0))); - if (map != NULL) { + struct map *self = calloc(1, (sizeof(*self) + + (dso->kernel ? sizeof(struct kmap) : 0))); + if (self != NULL) { /* * ->end will be filled after we load all the symbols */ - map__init(map, type, start, 0, 0, dso); + map__init(self, type, start, 0, 0, dso); } - return map; + return self; } -struct map *machine__new_module(struct machine *machine, u64 start, +struct map *machine__new_module(struct machine *self, u64 start, const char *filename) { struct map *map; - struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); + struct dso *dso = __dsos__findnew(&self->kernel_dsos, filename); if (dso == NULL) return NULL; @@ -1766,15 +1759,15 @@ struct map *machine__new_module(struct machine *machine, u64 start, if (map == NULL) return NULL; - if (machine__is_host(machine)) + if (machine__is_host(self)) dso->symtab_type = SYMTAB__SYSTEM_PATH_KMODULE; else dso->symtab_type = SYMTAB__GUEST_KMODULE; - map_groups__insert(&machine->kmaps, map); + map_groups__insert(&self->kmaps, map); return map; } -static int machine__create_modules(struct machine *machine) +static int machine__create_modules(struct machine *self) { char *line = NULL; size_t n; @@ -1783,10 +1776,10 @@ static int machine__create_modules(struct machine *machine) const char *modules; char path[PATH_MAX]; - if (machine__is_default_guest(machine)) + if (machine__is_default_guest(self)) modules = symbol_conf.default_guest_modules; else { - sprintf(path, "%s/proc/modules", machine->root_dir); + sprintf(path, "%s/proc/modules", self->root_dir); modules = path; } @@ -1822,16 +1815,16 @@ static int machine__create_modules(struct machine *machine) *sep = '\0'; snprintf(name, sizeof(name), "[%s]", line); - map = machine__new_module(machine, start, name); + map = machine__new_module(self, start, name); if (map == NULL) goto out_delete_line; - dso__kernel_module_get_build_id(map->dso, machine->root_dir); + dso__kernel_module_get_build_id(map->dso, self->root_dir); } free(line); fclose(file); - return machine__set_modules_path(machine); + return machine__set_modules_path(self); out_delete_line: free(line); @@ -1839,7 +1832,7 @@ static int machine__create_modules(struct machine *machine) return -1; } -int dso__load_vmlinux(struct dso *dso, struct map *map, +int dso__load_vmlinux(struct dso *self, struct map *map, const char *vmlinux, symbol_filter_t filter) { int err = -1, fd; @@ -1851,9 +1844,9 @@ int dso__load_vmlinux(struct dso *dso, struct map *map, if (fd < 0) return -1; - dso__set_long_name(dso, (char *)vmlinux); - dso__set_loaded(dso, map->type); - err = dso__load_sym(dso, map, symfs_vmlinux, fd, filter, 0, 0); + dso__set_long_name(self, (char *)vmlinux); + dso__set_loaded(self, map->type); + err = dso__load_sym(self, map, symfs_vmlinux, fd, filter, 0, 0); close(fd); if (err > 0) @@ -1862,7 +1855,7 @@ int dso__load_vmlinux(struct dso *dso, struct map *map, return err; } -int dso__load_vmlinux_path(struct dso *dso, struct map *map, +int dso__load_vmlinux_path(struct dso *self, struct map *map, symbol_filter_t filter) { int i, err = 0; @@ -1871,20 +1864,20 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map, pr_debug("Looking at the vmlinux_path (%d entries long)\n", vmlinux_path__nr_entries + 1); - filename = dso__build_id_filename(dso, NULL, 0); + filename = dso__build_id_filename(self, NULL, 0); if (filename != NULL) { - err = dso__load_vmlinux(dso, map, filename, filter); + err = dso__load_vmlinux(self, map, filename, filter); if (err > 0) { - dso__set_long_name(dso, filename); + dso__set_long_name(self, filename); goto out; } free(filename); } for (i = 0; i < vmlinux_path__nr_entries; ++i) { - err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter); + err = dso__load_vmlinux(self, map, vmlinux_path[i], filter); if (err > 0) { - dso__set_long_name(dso, strdup(vmlinux_path[i])); + dso__set_long_name(self, strdup(vmlinux_path[i])); break; } } @@ -1892,7 +1885,7 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map, return err; } -static int dso__load_kernel_sym(struct dso *dso, struct map *map, +static int dso__load_kernel_sym(struct dso *self, struct map *map, symbol_filter_t filter) { int err; @@ -1919,10 +1912,10 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, } if (symbol_conf.vmlinux_name != NULL) { - err = dso__load_vmlinux(dso, map, + err = dso__load_vmlinux(self, map, symbol_conf.vmlinux_name, filter); if (err > 0) { - dso__set_long_name(dso, + dso__set_long_name(self, strdup(symbol_conf.vmlinux_name)); goto out_fixup; } @@ -1930,7 +1923,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, } if (vmlinux_path != NULL) { - err = dso__load_vmlinux_path(dso, map, filter); + err = dso__load_vmlinux_path(self, map, filter); if (err > 0) goto out_fixup; } @@ -1944,13 +1937,13 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, * we have a build-id, so check if it is the same as the running kernel, * using it if it is. */ - if (dso->has_build_id) { + if (self->has_build_id) { u8 kallsyms_build_id[BUILD_ID_SIZE]; char sbuild_id[BUILD_ID_SIZE * 2 + 1]; if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id, sizeof(kallsyms_build_id)) == 0) { - if (dso__build_id_equal(dso, kallsyms_build_id)) { + if (dso__build_id_equal(self, kallsyms_build_id)) { kallsyms_filename = "/proc/kallsyms"; goto do_kallsyms; } @@ -1959,7 +1952,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, * Now look if we have it on the build-id cache in * $HOME/.debug/[kernel.kallsyms]. */ - build_id__sprintf(dso->build_id, sizeof(dso->build_id), + build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); if (asprintf(&kallsyms_allocated_filename, @@ -1986,7 +1979,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, } do_kallsyms: - err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); + err = dso__load_kallsyms(self, kallsyms_filename, map, filter); if (err > 0) pr_debug("Using %s for symbols\n", kallsyms_filename); free(kallsyms_allocated_filename); @@ -1994,7 +1987,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, if (err > 0) { out_fixup: if (kallsyms_filename != NULL) - dso__set_long_name(dso, strdup("[kernel.kallsyms]")); + dso__set_long_name(self, strdup("[kernel.kallsyms]")); map__fixup_start(map); map__fixup_end(map); } @@ -2002,8 +1995,8 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, return err; } -static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, - symbol_filter_t filter) +static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, + symbol_filter_t filter) { int err; const char *kallsyms_filename = NULL; @@ -2023,7 +2016,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, * Or use file guest_kallsyms inputted by user on commandline */ if (symbol_conf.default_guest_vmlinux_name != NULL) { - err = dso__load_vmlinux(dso, map, + err = dso__load_vmlinux(self, map, symbol_conf.default_guest_vmlinux_name, filter); goto out_try_fixup; } @@ -2036,7 +2029,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, kallsyms_filename = path; } - err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); + err = dso__load_kallsyms(self, kallsyms_filename, map, filter); if (err > 0) pr_debug("Using %s for symbols\n", kallsyms_filename); @@ -2044,7 +2037,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, if (err > 0) { if (kallsyms_filename != NULL) { machine__mmap_name(machine, path, sizeof(path)); - dso__set_long_name(dso, strdup(path)); + dso__set_long_name(self, strdup(path)); } map__fixup_start(map); map__fixup_end(map); @@ -2097,12 +2090,12 @@ size_t __dsos__fprintf(struct list_head *head, FILE *fp) return ret; } -size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp) +size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp) { struct rb_node *nd; size_t ret = 0; - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { + for (nd = rb_first(self); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret += __dsos__fprintf(&pos->kernel_dsos, fp); ret += __dsos__fprintf(&pos->user_dsos, fp); @@ -2126,20 +2119,18 @@ static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, return ret; } -size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, - bool with_hits) +size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits) { - return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, with_hits) + - __dsos__fprintf_buildid(&machine->user_dsos, fp, with_hits); + return __dsos__fprintf_buildid(&self->kernel_dsos, fp, with_hits) + + __dsos__fprintf_buildid(&self->user_dsos, fp, with_hits); } -size_t machines__fprintf_dsos_buildid(struct rb_root *machines, - FILE *fp, bool with_hits) +size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits) { struct rb_node *nd; size_t ret = 0; - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { + for (nd = rb_first(self); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret += machine__fprintf_dsos_buildid(pos, fp, with_hits); } @@ -2148,59 +2139,59 @@ size_t machines__fprintf_dsos_buildid(struct rb_root *machines, struct dso *dso__new_kernel(const char *name) { - struct dso *dso = dso__new(name ?: "[kernel.kallsyms]"); + struct dso *self = dso__new(name ?: "[kernel.kallsyms]"); - if (dso != NULL) { - dso__set_short_name(dso, "[kernel]"); - dso->kernel = DSO_TYPE_KERNEL; + if (self != NULL) { + dso__set_short_name(self, "[kernel]"); + self->kernel = DSO_TYPE_KERNEL; } - return dso; + return self; } static struct dso *dso__new_guest_kernel(struct machine *machine, const char *name) { char bf[PATH_MAX]; - struct dso *dso = dso__new(name ?: machine__mmap_name(machine, bf, - sizeof(bf))); - if (dso != NULL) { - dso__set_short_name(dso, "[guest.kernel]"); - dso->kernel = DSO_TYPE_GUEST_KERNEL; + struct dso *self = dso__new(name ?: machine__mmap_name(machine, bf, sizeof(bf))); + + if (self != NULL) { + dso__set_short_name(self, "[guest.kernel]"); + self->kernel = DSO_TYPE_GUEST_KERNEL; } - return dso; + return self; } -void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) +void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine) { char path[PATH_MAX]; if (machine__is_default_guest(machine)) return; sprintf(path, "%s/sys/kernel/notes", machine->root_dir); - if (sysfs__read_build_id(path, dso->build_id, - sizeof(dso->build_id)) == 0) - dso->has_build_id = true; + if (sysfs__read_build_id(path, self->build_id, + sizeof(self->build_id)) == 0) + self->has_build_id = true; } -static struct dso *machine__create_kernel(struct machine *machine) +static struct dso *machine__create_kernel(struct machine *self) { const char *vmlinux_name = NULL; struct dso *kernel; - if (machine__is_host(machine)) { + if (machine__is_host(self)) { vmlinux_name = symbol_conf.vmlinux_name; kernel = dso__new_kernel(vmlinux_name); } else { - if (machine__is_default_guest(machine)) + if (machine__is_default_guest(self)) vmlinux_name = symbol_conf.default_guest_vmlinux_name; - kernel = dso__new_guest_kernel(machine, vmlinux_name); + kernel = dso__new_guest_kernel(self, vmlinux_name); } if (kernel != NULL) { - dso__read_running_kernel_build_id(kernel, machine); - dsos__add(&machine->kernel_dsos, kernel); + dso__read_running_kernel_build_id(kernel, self); + dsos__add(&self->kernel_dsos, kernel); } return kernel; } @@ -2245,43 +2236,41 @@ static u64 machine__get_kernel_start_addr(struct machine *machine) return args.start; } -int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) +int __machine__create_kernel_maps(struct machine *self, struct dso *kernel) { enum map_type type; - u64 start = machine__get_kernel_start_addr(machine); + u64 start = machine__get_kernel_start_addr(self); for (type = 0; type < MAP__NR_TYPES; ++type) { struct kmap *kmap; - machine->vmlinux_maps[type] = map__new2(start, kernel, type); - if (machine->vmlinux_maps[type] == NULL) + self->vmlinux_maps[type] = map__new2(start, kernel, type); + if (self->vmlinux_maps[type] == NULL) return -1; - machine->vmlinux_maps[type]->map_ip = - machine->vmlinux_maps[type]->unmap_ip = - identity__map_ip; - kmap = map__kmap(machine->vmlinux_maps[type]); - kmap->kmaps = &machine->kmaps; - map_groups__insert(&machine->kmaps, - machine->vmlinux_maps[type]); + self->vmlinux_maps[type]->map_ip = + self->vmlinux_maps[type]->unmap_ip = identity__map_ip; + + kmap = map__kmap(self->vmlinux_maps[type]); + kmap->kmaps = &self->kmaps; + map_groups__insert(&self->kmaps, self->vmlinux_maps[type]); } return 0; } -void machine__destroy_kernel_maps(struct machine *machine) +void machine__destroy_kernel_maps(struct machine *self) { enum map_type type; for (type = 0; type < MAP__NR_TYPES; ++type) { struct kmap *kmap; - if (machine->vmlinux_maps[type] == NULL) + if (self->vmlinux_maps[type] == NULL) continue; - kmap = map__kmap(machine->vmlinux_maps[type]); - map_groups__remove(&machine->kmaps, - machine->vmlinux_maps[type]); + kmap = map__kmap(self->vmlinux_maps[type]); + map_groups__remove(&self->kmaps, self->vmlinux_maps[type]); if (kmap->ref_reloc_sym) { /* * ref_reloc_sym is shared among all maps, so free just @@ -2295,25 +2284,25 @@ void machine__destroy_kernel_maps(struct machine *machine) kmap->ref_reloc_sym = NULL; } - map__delete(machine->vmlinux_maps[type]); - machine->vmlinux_maps[type] = NULL; + map__delete(self->vmlinux_maps[type]); + self->vmlinux_maps[type] = NULL; } } -int machine__create_kernel_maps(struct machine *machine) +int machine__create_kernel_maps(struct machine *self) { - struct dso *kernel = machine__create_kernel(machine); + struct dso *kernel = machine__create_kernel(self); if (kernel == NULL || - __machine__create_kernel_maps(machine, kernel) < 0) + __machine__create_kernel_maps(self, kernel) < 0) return -1; - if (symbol_conf.use_modules && machine__create_modules(machine) < 0) + if (symbol_conf.use_modules && machine__create_modules(self) < 0) pr_debug("Problems creating module maps, continuing anyway...\n"); /* * Now that we have all the maps created, just set the ->end of them: */ - map_groups__fixup_end(&machine->kmaps); + map_groups__fixup_end(&self->kmaps); return 0; } @@ -2377,11 +2366,11 @@ static int vmlinux_path__init(void) return -1; } -size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) +size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp) { int i; size_t printed = 0; - struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; + struct dso *kdso = self->vmlinux_maps[MAP__FUNCTION]->dso; if (kdso->has_build_id) { char filename[PATH_MAX]; @@ -2478,9 +2467,9 @@ void symbol__exit(void) symbol_conf.initialized = false; } -int machines__create_kernel_maps(struct rb_root *machines, pid_t pid) +int machines__create_kernel_maps(struct rb_root *self, pid_t pid) { - struct machine *machine = machines__findnew(machines, pid); + struct machine *machine = machines__findnew(self, pid); if (machine == NULL) return -1; @@ -2531,7 +2520,7 @@ char *strxfrchar(char *s, char from, char to) return s; } -int machines__create_guest_kernel_maps(struct rb_root *machines) +int machines__create_guest_kernel_maps(struct rb_root *self) { int ret = 0; struct dirent **namelist = NULL; @@ -2542,7 +2531,7 @@ int machines__create_guest_kernel_maps(struct rb_root *machines) if (symbol_conf.default_guest_vmlinux_name || symbol_conf.default_guest_modules || symbol_conf.default_guest_kallsyms) { - machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); + machines__create_kernel_maps(self, DEFAULT_GUEST_KERNEL_ID); } if (symbol_conf.guestmount) { @@ -2563,7 +2552,7 @@ int machines__create_guest_kernel_maps(struct rb_root *machines) pr_debug("Can't access file %s\n", path); goto failure; } - machines__create_kernel_maps(machines, pid); + machines__create_kernel_maps(self, pid); } failure: free(namelist); @@ -2572,23 +2561,23 @@ int machines__create_guest_kernel_maps(struct rb_root *machines) return ret; } -void machines__destroy_guest_kernel_maps(struct rb_root *machines) +void machines__destroy_guest_kernel_maps(struct rb_root *self) { - struct rb_node *next = rb_first(machines); + struct rb_node *next = rb_first(self); while (next) { struct machine *pos = rb_entry(next, struct machine, rb_node); next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, machines); + rb_erase(&pos->rb_node, self); machine__delete(pos); } } -int machine__load_kallsyms(struct machine *machine, const char *filename, +int machine__load_kallsyms(struct machine *self, const char *filename, enum map_type type, symbol_filter_t filter) { - struct map *map = machine->vmlinux_maps[type]; + struct map *map = self->vmlinux_maps[type]; int ret = dso__load_kallsyms(map->dso, filename, map, filter); if (ret > 0) { @@ -2598,16 +2587,16 @@ int machine__load_kallsyms(struct machine *machine, const char *filename, * kernel, with modules between them, fixup the end of all * sections. */ - __map_groups__fixup_end(&machine->kmaps, type); + __map_groups__fixup_end(&self->kmaps, type); } return ret; } -int machine__load_vmlinux_path(struct machine *machine, enum map_type type, +int machine__load_vmlinux_path(struct machine *self, enum map_type type, symbol_filter_t filter) { - struct map *map = machine->vmlinux_maps[type]; + struct map *map = self->vmlinux_maps[type]; int ret = dso__load_vmlinux_path(map->dso, map, filter); if (ret > 0) { diff --git a/trunk/tools/perf/util/symbol.h b/trunk/tools/perf/util/symbol.h index 242de0101a86..713b0b40cc4a 100644 --- a/trunk/tools/perf/util/symbol.h +++ b/trunk/tools/perf/util/symbol.h @@ -62,7 +62,7 @@ struct symbol { char name[0]; }; -void symbol__delete(struct symbol *sym); +void symbol__delete(struct symbol *self); struct strlist; @@ -96,9 +96,9 @@ struct symbol_conf { extern struct symbol_conf symbol_conf; -static inline void *symbol__priv(struct symbol *sym) +static inline void *symbol__priv(struct symbol *self) { - return ((void *)sym) - symbol_conf.priv_size; + return ((void *)self) - symbol_conf.priv_size; } struct ref_reloc_sym { @@ -155,45 +155,43 @@ struct dso { struct dso *dso__new(const char *name); struct dso *dso__new_kernel(const char *name); -void dso__delete(struct dso *dso); +void dso__delete(struct dso *self); -int dso__name_len(const struct dso *dso); +int dso__name_len(const struct dso *self); -bool dso__loaded(const struct dso *dso, enum map_type type); -bool dso__sorted_by_name(const struct dso *dso, enum map_type type); +bool dso__loaded(const struct dso *self, enum map_type type); +bool dso__sorted_by_name(const struct dso *self, enum map_type type); -static inline void dso__set_loaded(struct dso *dso, enum map_type type) +static inline void dso__set_loaded(struct dso *self, enum map_type type) { - dso->loaded |= (1 << type); + self->loaded |= (1 << type); } -void dso__sort_by_name(struct dso *dso, enum map_type type); +void dso__sort_by_name(struct dso *self, enum map_type type); struct dso *__dsos__findnew(struct list_head *head, const char *name); -int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter); -int dso__load_vmlinux(struct dso *dso, struct map *map, +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); +int dso__load_vmlinux(struct dso *self, struct map *map, const char *vmlinux, symbol_filter_t filter); -int dso__load_vmlinux_path(struct dso *dso, struct map *map, +int dso__load_vmlinux_path(struct dso *self, struct map *map, symbol_filter_t filter); -int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, +int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map, symbol_filter_t filter); -int machine__load_kallsyms(struct machine *machine, const char *filename, +int machine__load_kallsyms(struct machine *self, const char *filename, enum map_type type, symbol_filter_t filter); -int machine__load_vmlinux_path(struct machine *machine, enum map_type type, +int machine__load_vmlinux_path(struct machine *self, enum map_type type, symbol_filter_t filter); size_t __dsos__fprintf(struct list_head *head, FILE *fp); -size_t machine__fprintf_dsos_buildid(struct machine *machine, - FILE *fp, bool with_hits); -size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp); -size_t machines__fprintf_dsos_buildid(struct rb_root *machines, - FILE *fp, bool with_hits); -size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); -size_t dso__fprintf_symbols_by_name(struct dso *dso, - enum map_type type, FILE *fp); -size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); +size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits); +size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp); +size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits); + +size_t dso__fprintf_buildid(struct dso *self, FILE *fp); +size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp); +size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp); enum symtab_type { SYMTAB__KALLSYMS = 0, @@ -209,36 +207,34 @@ enum symtab_type { SYMTAB__NOT_FOUND, }; -char dso__symtab_origin(const struct dso *dso); -void dso__set_long_name(struct dso *dso, char *name); -void dso__set_build_id(struct dso *dso, void *build_id); -void dso__read_running_kernel_build_id(struct dso *dso, - struct machine *machine); -struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, - u64 addr); -struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, +char dso__symtab_origin(const struct dso *self); +void dso__set_long_name(struct dso *self, char *name); +void dso__set_build_id(struct dso *self, void *build_id); +void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine); +struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr); +struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, const char *name); int filename__read_build_id(const char *filename, void *bf, size_t size); int sysfs__read_build_id(const char *filename, void *bf, size_t size); bool __dsos__read_build_ids(struct list_head *head, bool with_hits); -int build_id__sprintf(const u8 *build_id, int len, char *bf); +int build_id__sprintf(const u8 *self, int len, char *bf); int kallsyms__parse(const char *filename, void *arg, int (*process_symbol)(void *arg, const char *name, char type, u64 start, u64 end)); -void machine__destroy_kernel_maps(struct machine *machine); -int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); -int machine__create_kernel_maps(struct machine *machine); +void machine__destroy_kernel_maps(struct machine *self); +int __machine__create_kernel_maps(struct machine *self, struct dso *kernel); +int machine__create_kernel_maps(struct machine *self); -int machines__create_kernel_maps(struct rb_root *machines, pid_t pid); -int machines__create_guest_kernel_maps(struct rb_root *machines); -void machines__destroy_guest_kernel_maps(struct rb_root *machines); +int machines__create_kernel_maps(struct rb_root *self, pid_t pid); +int machines__create_guest_kernel_maps(struct rb_root *self); +void machines__destroy_guest_kernel_maps(struct rb_root *self); int symbol__init(void); void symbol__exit(void); bool symbol_type__is_a(char symbol_type, enum map_type map_type); -size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); +size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp); #endif /* __PERF_SYMBOL */