From ed026a06907eafd35e15ce9153b2aa075383da1b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 3 Dec 2012 11:52:49 -0500 Subject: [PATCH] --- yaml --- r: 345555 b: refs/heads/master c: fbed600af159b9dce78dd74c4bff56b40bb19d47 h: refs/heads/master i: 345553: 00d4979f5bb9c27007955207db7a14d6d4b0abf8 345551: 78921d971fb8bd50ba12db5be9828d1c4196362d v: v3 --- [refs] | 2 +- trunk/CREDITS | 5 + trunk/Documentation/DMA-attributes.txt | 9 - trunk/Documentation/DocBook/drm.tmpl | 39 +- trunk/Documentation/arm64/memory.txt | 12 +- trunk/Documentation/cgroups/memory.txt | 4 + .../bindings/gpu/nvidia,tegra20-host1x.txt | 191 -- .../devicetree/bindings/net/mdio-gpio.txt | 9 +- trunk/Documentation/filesystems/proc.txt | 16 +- trunk/Documentation/kref.txt | 88 - .../networking/netdev-features.txt | 2 +- trunk/Documentation/networking/vxlan.txt | 4 +- trunk/MAINTAINERS | 105 +- trunk/Makefile | 2 +- trunk/arch/alpha/kernel/osf_sys.c | 6 +- trunk/arch/arm/boot/Makefile | 10 +- trunk/arch/arm/boot/dts/tegra30.dtsi | 4 +- trunk/arch/arm/include/asm/io.h | 4 +- trunk/arch/arm/include/asm/sched_clock.h | 2 - trunk/arch/arm/include/asm/vfpmacros.h | 12 +- trunk/arch/arm/include/uapi/asm/hwcap.h | 3 +- trunk/arch/arm/kernel/sched_clock.c | 18 +- trunk/arch/arm/mach-at91/at91rm9200_devices.c | 2 +- .../arch/arm/mach-at91/at91sam9260_devices.c | 2 +- .../arch/arm/mach-at91/at91sam9261_devices.c | 2 +- .../arch/arm/mach-at91/at91sam9263_devices.c | 2 +- .../arch/arm/mach-at91/at91sam9g45_devices.c | 12 +- trunk/arch/arm/mach-davinci/dm644x.c | 3 +- trunk/arch/arm/mach-exynos/dma.c | 3 + trunk/arch/arm/mach-exynos/include/mach/map.h | 1 + trunk/arch/arm/mach-highbank/system.c | 3 +- trunk/arch/arm/mach-imx/clk-gate2.c | 2 +- trunk/arch/arm/mach-imx/ehci-imx25.c | 2 +- trunk/arch/arm/mach-imx/ehci-imx35.c | 2 +- trunk/arch/arm/mach-omap2/board-igep0020.c | 5 + .../arm/mach-omap2/clockdomains44xx_data.c | 2 +- .../arm/mach-omap2/common-board-devices.c | 34 +- trunk/arch/arm/mach-omap2/devices.c | 79 + trunk/arch/arm/mach-omap2/omap_hwmod.c | 63 +- .../arm/mach-omap2/omap_hwmod_44xx_data.c | 36 + trunk/arch/arm/mach-omap2/twl-common.c | 3 +- trunk/arch/arm/mach-omap2/vc.c | 2 +- trunk/arch/arm/mach-pxa/hx4700.c | 8 +- trunk/arch/arm/mach-pxa/spitz_pm.c | 8 +- trunk/arch/arm/mm/alignment.c | 2 +- trunk/arch/arm/mm/dma-mapping.c | 41 +- trunk/arch/arm/plat-omap/i2c.c | 21 + .../arm/plat-omap/include/plat/omap_hwmod.h | 6 + trunk/arch/arm/tools/Makefile | 2 +- trunk/arch/arm/vfp/vfpmodule.c | 9 +- trunk/arch/arm/xen/enlighten.c | 11 + trunk/arch/arm64/Kconfig | 1 + trunk/arch/arm64/include/asm/elf.h | 5 +- trunk/arch/arm64/include/asm/fpsimd.h | 5 +- trunk/arch/arm64/include/asm/io.h | 10 +- trunk/arch/arm64/include/asm/pgtable-hwdef.h | 6 +- trunk/arch/arm64/include/asm/pgtable.h | 40 +- trunk/arch/arm64/include/asm/processor.h | 2 + trunk/arch/arm64/include/asm/unistd.h | 1 - trunk/arch/arm64/kernel/perf_event.c | 10 +- trunk/arch/arm64/kernel/process.c | 18 - trunk/arch/arm64/kernel/smp.c | 3 +- trunk/arch/arm64/mm/init.c | 2 +- trunk/arch/h8300/include/asm/cache.h | 3 +- trunk/arch/ia64/mm/init.c | 1 - trunk/arch/m68k/include/asm/signal.h | 6 +- .../mips/cavium-octeon/executive/cvmx-l2c.c | 1 + trunk/arch/mips/fw/arc/misc.c | 1 + trunk/arch/mips/include/asm/bitops.h | 128 +- trunk/arch/mips/include/asm/compat.h | 2 +- trunk/arch/mips/include/asm/io.h | 1 + trunk/arch/mips/include/asm/irqflags.h | 207 +- trunk/arch/mips/include/asm/thread_info.h | 6 - trunk/arch/mips/kernel/setup.c | 26 +- trunk/arch/mips/lib/Makefile | 5 +- trunk/arch/mips/lib/bitops.c | 179 ++ trunk/arch/mips/lib/mips-atomic.c | 176 ++ trunk/arch/mips/mti-malta/malta-platform.c | 3 +- trunk/arch/parisc/kernel/signal32.c | 6 +- trunk/arch/parisc/kernel/sys_parisc.c | 2 + trunk/arch/powerpc/boot/dts/mpc5200b.dtsi | 6 + trunk/arch/powerpc/boot/dts/o2d.dtsi | 6 - trunk/arch/powerpc/boot/dts/pcm030.dts | 7 +- .../arch/powerpc/platforms/52xx/mpc52xx_pic.c | 9 +- trunk/arch/powerpc/platforms/pseries/eeh_pe.c | 2 +- trunk/arch/powerpc/platforms/pseries/msi.c | 3 +- trunk/arch/s390/Kconfig | 1 + trunk/arch/s390/include/asm/cio.h | 2 + trunk/arch/s390/include/asm/compat.h | 2 +- trunk/arch/s390/include/asm/pgtable.h | 35 +- trunk/arch/s390/include/asm/topology.h | 3 + trunk/arch/s390/include/uapi/asm/ptrace.h | 4 +- trunk/arch/s390/kernel/compat_signal.c | 14 +- trunk/arch/s390/kernel/sclp.S | 8 +- trunk/arch/s390/kernel/signal.c | 14 +- trunk/arch/s390/kernel/topology.c | 6 +- trunk/arch/s390/lib/uaccess_pt.c | 2 +- trunk/arch/s390/mm/gup.c | 7 +- trunk/arch/sparc/Kconfig | 1 + trunk/arch/sparc/crypto/Makefile | 16 +- trunk/arch/sparc/crypto/aes_glue.c | 2 + trunk/arch/sparc/crypto/camellia_glue.c | 2 + trunk/arch/sparc/crypto/crc32c_glue.c | 2 + trunk/arch/sparc/crypto/des_glue.c | 2 + trunk/arch/sparc/crypto/md5_glue.c | 2 + trunk/arch/sparc/crypto/sha1_glue.c | 2 + trunk/arch/sparc/crypto/sha256_glue.c | 2 + trunk/arch/sparc/crypto/sha512_glue.c | 2 + trunk/arch/sparc/include/asm/atomic_64.h | 4 +- trunk/arch/sparc/include/asm/backoff.h | 69 +- trunk/arch/sparc/include/asm/compat.h | 5 +- trunk/arch/sparc/include/asm/processor_64.h | 17 +- trunk/arch/sparc/include/asm/prom.h | 8 + trunk/arch/sparc/include/asm/thread_info_64.h | 5 + trunk/arch/sparc/include/asm/ttable.h | 24 +- trunk/arch/sparc/include/uapi/asm/unistd.h | 7 +- trunk/arch/sparc/kernel/entry.h | 7 + trunk/arch/sparc/kernel/leon_kernel.c | 6 +- trunk/arch/sparc/kernel/perf_event.c | 22 +- trunk/arch/sparc/kernel/process_64.c | 42 +- trunk/arch/sparc/kernel/ptrace_64.c | 4 +- trunk/arch/sparc/kernel/setup_64.c | 21 + trunk/arch/sparc/kernel/signal_64.c | 4 +- trunk/arch/sparc/kernel/sys_sparc_64.c | 5 + trunk/arch/sparc/kernel/systbls_32.S | 1 + trunk/arch/sparc/kernel/systbls_64.S | 2 + trunk/arch/sparc/kernel/unaligned_64.c | 36 +- trunk/arch/sparc/kernel/visemul.c | 23 +- trunk/arch/sparc/kernel/vmlinux.lds.S | 5 + trunk/arch/sparc/kernel/winfixup.S | 2 + trunk/arch/sparc/lib/atomic_64.S | 16 +- trunk/arch/sparc/lib/ksyms.c | 1 + trunk/arch/sparc/math-emu/math_64.c | 2 +- trunk/arch/unicore32/Kconfig | 7 +- trunk/arch/unicore32/include/asm/Kbuild | 1 - trunk/arch/unicore32/include/asm/bug.h | 5 - trunk/arch/unicore32/include/asm/cmpxchg.h | 2 +- trunk/arch/unicore32/include/asm/kvm_para.h | 1 - trunk/arch/unicore32/include/asm/processor.h | 5 - trunk/arch/unicore32/include/asm/ptrace.h | 76 +- trunk/arch/unicore32/include/uapi/asm/Kbuild | 7 + .../include/{ => uapi}/asm/byteorder.h | 0 .../arch/unicore32/include/uapi/asm/ptrace.h | 90 + .../include/{ => uapi}/asm/sigcontext.h | 0 .../unicore32/include/{ => uapi}/asm/unistd.h | 1 + trunk/arch/unicore32/kernel/entry.S | 20 +- trunk/arch/unicore32/kernel/process.c | 58 +- trunk/arch/unicore32/kernel/setup.h | 6 + trunk/arch/unicore32/kernel/sys.c | 63 - trunk/arch/unicore32/mm/fault.c | 37 +- trunk/arch/x86/boot/compressed/eboot.c | 2 + trunk/arch/x86/boot/header.S | 3 - trunk/arch/x86/include/asm/ptrace.h | 15 +- trunk/arch/x86/include/asm/xen/hypercall.h | 21 +- trunk/arch/x86/kernel/cpu/amd.c | 14 + trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c | 2 +- trunk/arch/x86/kernel/cpu/mcheck/mce_intel.c | 31 +- trunk/arch/x86/kernel/entry_64.S | 14 +- trunk/arch/x86/kernel/microcode_amd.c | 8 +- trunk/arch/x86/kernel/ptrace.c | 30 + trunk/arch/x86/kvm/cpuid.h | 3 + trunk/arch/x86/kvm/vmx.c | 11 +- trunk/arch/x86/kvm/x86.c | 3 + trunk/arch/x86/mm/tlb.c | 2 +- trunk/arch/x86/pci/ce4100.c | 13 + trunk/arch/x86/platform/ce4100/ce4100.c | 24 + trunk/block/blk-exec.c | 8 +- trunk/crypto/cryptd.c | 11 +- trunk/drivers/ata/ahci_platform.c | 2 +- trunk/drivers/ata/libata-acpi.c | 11 +- trunk/drivers/ata/libata-core.c | 4 + trunk/drivers/ata/libata-scsi.c | 2 + trunk/drivers/ata/pata_arasan_cf.c | 8 +- trunk/drivers/ata/sata_highbank.c | 4 +- trunk/drivers/ata/sata_svw.c | 35 + trunk/drivers/base/platform.c | 7 + trunk/drivers/base/power/qos.c | 2 +- trunk/drivers/block/aoe/aoecmd.c | 2 +- trunk/drivers/block/floppy.c | 5 +- trunk/drivers/block/mtip32xx/mtip32xx.c | 18 +- trunk/drivers/block/mtip32xx/mtip32xx.h | 6 +- trunk/drivers/bluetooth/ath3k.c | 1 + trunk/drivers/bluetooth/btusb.c | 1 + trunk/drivers/bus/omap-ocp2scp.c | 68 +- trunk/drivers/char/agp/intel-agp.h | 91 + trunk/drivers/char/agp/intel-gtt.c | 320 +- trunk/drivers/clk/ux500/u8500_clk.c | 50 +- trunk/drivers/edac/amd64_edac.h | 2 +- trunk/drivers/edac/edac_stub.c | 2 +- trunk/drivers/edac/mce_amd_inj.c | 4 +- trunk/drivers/firewire/sbp2.c | 2 + trunk/drivers/gpio/Kconfig | 4 +- trunk/drivers/gpio/gpio-mcp23s08.c | 6 +- trunk/drivers/gpio/gpio-mvebu.c | 23 + trunk/drivers/gpu/drm/Kconfig | 2 - trunk/drivers/gpu/drm/Makefile | 6 +- trunk/drivers/gpu/drm/ast/ast_ttm.c | 2 +- trunk/drivers/gpu/drm/cirrus/cirrus_drv.c | 13 +- trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c | 2 +- trunk/drivers/gpu/drm/drm_crtc.c | 63 +- trunk/drivers/gpu/drm/drm_crtc_helper.c | 161 +- .../{drm_dp_helper.c => drm_dp_i2c_helper.c} | 146 +- trunk/drivers/gpu/drm/drm_edid.c | 29 +- trunk/drivers/gpu/drm/drm_fb_helper.c | 76 +- trunk/drivers/gpu/drm/drm_fops.c | 44 +- trunk/drivers/gpu/drm/drm_hashtab.c | 38 +- trunk/drivers/gpu/drm/drm_ioctl.c | 3 - trunk/drivers/gpu/drm/drm_irq.c | 120 +- trunk/drivers/gpu/drm/drm_modes.c | 8 +- trunk/drivers/gpu/drm/drm_pci.c | 2 +- trunk/drivers/gpu/drm/drm_stub.c | 37 +- trunk/drivers/gpu/drm/drm_sysfs.c | 6 +- trunk/drivers/gpu/drm/exynos/Kconfig | 6 - trunk/drivers/gpu/drm/exynos/Makefile | 1 - trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c | 94 +- trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h | 4 +- .../drivers/gpu/drm/exynos/exynos_drm_crtc.c | 5 - .../gpu/drm/exynos/exynos_drm_dmabuf.c | 84 +- trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c | 23 +- trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h | 14 +- .../gpu/drm/exynos/exynos_drm_encoder.c | 43 +- .../gpu/drm/exynos/exynos_drm_encoder.h | 1 - trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c | 56 +- .../drivers/gpu/drm/exynos/exynos_drm_fbdev.c | 34 +- .../drivers/gpu/drm/exynos/exynos_drm_fimd.c | 60 +- trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c | 495 +--- trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c | 435 ++- trunk/drivers/gpu/drm/exynos/exynos_drm_gem.h | 56 +- .../drivers/gpu/drm/exynos/exynos_drm_hdmi.c | 15 - .../drivers/gpu/drm/exynos/exynos_drm_hdmi.h | 1 - .../drivers/gpu/drm/exynos/exynos_drm_iommu.c | 150 - .../drivers/gpu/drm/exynos/exynos_drm_iommu.h | 85 - .../drivers/gpu/drm/exynos/exynos_drm_vidi.c | 20 +- trunk/drivers/gpu/drm/exynos/exynos_hdmi.c | 237 +- trunk/drivers/gpu/drm/exynos/exynos_mixer.c | 93 +- trunk/drivers/gpu/drm/exynos/regs-hdmi.h | 17 +- trunk/drivers/gpu/drm/gma500/cdv_device.c | 4 +- trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c | 2 +- trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c | 6 +- trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c | 10 +- .../drivers/gpu/drm/gma500/mdfld_dsi_output.c | 12 +- .../gpu/drm/gma500/mdfld_intel_display.c | 2 +- trunk/drivers/gpu/drm/gma500/oaktrail.h | 6 - trunk/drivers/gpu/drm/gma500/oaktrail_crtc.c | 10 +- .../drivers/gpu/drm/gma500/oaktrail_device.c | 2 +- trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c | 365 +-- trunk/drivers/gpu/drm/gma500/oaktrail_lvds.c | 8 +- trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c | 10 +- trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c | 24 +- trunk/drivers/gpu/drm/i2c/ch7006_drv.c | 20 +- trunk/drivers/gpu/drm/i915/i915_debugfs.c | 52 +- trunk/drivers/gpu/drm/i915/i915_dma.c | 86 +- trunk/drivers/gpu/drm/i915/i915_drv.c | 131 +- trunk/drivers/gpu/drm/i915/i915_drv.h | 454 ++- trunk/drivers/gpu/drm/i915/i915_gem.c | 196 +- trunk/drivers/gpu/drm/i915/i915_gem_context.c | 2 +- .../gpu/drm/i915/i915_gem_execbuffer.c | 36 +- trunk/drivers/gpu/drm/i915/i915_gem_gtt.c | 416 +-- trunk/drivers/gpu/drm/i915/i915_irq.c | 80 +- trunk/drivers/gpu/drm/i915/i915_reg.h | 297 +- trunk/drivers/gpu/drm/i915/i915_suspend.c | 763 +++-- trunk/drivers/gpu/drm/i915/i915_sysfs.c | 43 +- trunk/drivers/gpu/drm/i915/i915_trace.h | 10 +- trunk/drivers/gpu/drm/i915/intel_bios.c | 14 +- trunk/drivers/gpu/drm/i915/intel_crt.c | 51 +- trunk/drivers/gpu/drm/i915/intel_ddi.c | 1074 ++----- trunk/drivers/gpu/drm/i915/intel_display.c | 1671 +++-------- trunk/drivers/gpu/drm/i915/intel_dp.c | 961 +++---- trunk/drivers/gpu/drm/i915/intel_drv.h | 117 +- trunk/drivers/gpu/drm/i915/intel_hdmi.c | 133 +- trunk/drivers/gpu/drm/i915/intel_i2c.c | 9 +- trunk/drivers/gpu/drm/i915/intel_lvds.c | 225 +- trunk/drivers/gpu/drm/i915/intel_modes.c | 11 +- trunk/drivers/gpu/drm/i915/intel_opregion.c | 2 - trunk/drivers/gpu/drm/i915/intel_panel.c | 77 +- trunk/drivers/gpu/drm/i915/intel_pm.c | 501 ++-- trunk/drivers/gpu/drm/i915/intel_ringbuffer.c | 126 +- trunk/drivers/gpu/drm/i915/intel_ringbuffer.h | 6 +- trunk/drivers/gpu/drm/i915/intel_sdvo.c | 136 +- trunk/drivers/gpu/drm/i915/intel_sprite.c | 101 +- trunk/drivers/gpu/drm/i915/intel_tv.c | 21 +- trunk/drivers/gpu/drm/mgag200/mgag200_main.c | 4 +- trunk/drivers/gpu/drm/mgag200/mgag200_ttm.c | 2 +- trunk/drivers/gpu/drm/nouveau/Makefile | 38 +- .../gpu/drm/nouveau/core/core/engctx.c | 15 - .../gpu/drm/nouveau/core/core/falcon.c | 247 -- .../gpu/drm/nouveau/core/core/gpuobj.c | 4 +- trunk/drivers/gpu/drm/nouveau/core/core/mm.c | 17 +- .../gpu/drm/nouveau/core/engine/bsp/nv84.c | 108 +- .../gpu/drm/nouveau/core/engine/bsp/nvc0.c | 110 - .../gpu/drm/nouveau/core/engine/bsp/nve0.c | 110 - .../gpu/drm/nouveau/core/engine/copy/nva3.c | 124 +- .../gpu/drm/nouveau/core/engine/copy/nvc0.c | 167 +- .../gpu/drm/nouveau/core/engine/copy/nve0.c | 54 +- .../gpu/drm/nouveau/core/engine/crypt/nv84.c | 46 +- .../gpu/drm/nouveau/core/engine/crypt/nv98.c | 83 +- .../drm/nouveau/core/engine/disp/dacnv50.c | 88 - .../drm/nouveau/core/engine/disp/hdanva3.c | 48 - .../drm/nouveau/core/engine/disp/hdanvd0.c | 53 - .../drm/nouveau/core/engine/disp/hdminv84.c | 66 - .../drm/nouveau/core/engine/disp/hdminva3.c | 66 - .../drm/nouveau/core/engine/disp/hdminvd0.c | 62 - .../gpu/drm/nouveau/core/engine/disp/nv50.c | 1173 +------- .../gpu/drm/nouveau/core/engine/disp/nv50.h | 142 - .../gpu/drm/nouveau/core/engine/disp/nv84.c | 98 - .../gpu/drm/nouveau/core/engine/disp/nv94.c | 109 - .../gpu/drm/nouveau/core/engine/disp/nva0.c | 88 - .../gpu/drm/nouveau/core/engine/disp/nva3.c | 111 - .../gpu/drm/nouveau/core/engine/disp/nvd0.c | 884 +----- .../gpu/drm/nouveau/core/engine/disp/nve0.c | 94 - .../drm/nouveau/core/engine/disp/sornv50.c | 112 - .../drm/nouveau/core/engine/disp/sornv94.c | 190 -- .../drm/nouveau/core/engine/disp/sornvd0.c | 126 - .../gpu/drm/nouveau/core/engine/dmaobj/base.c | 71 +- .../gpu/drm/nouveau/core/engine/dmaobj/nv04.c | 68 +- .../gpu/drm/nouveau/core/engine/dmaobj/nv50.c | 126 +- .../gpu/drm/nouveau/core/engine/dmaobj/nvc0.c | 104 +- .../gpu/drm/nouveau/core/engine/dmaobj/nvd0.c | 122 - .../gpu/drm/nouveau/core/engine/fifo/base.c | 19 +- .../gpu/drm/nouveau/core/engine/fifo/nv04.c | 17 +- .../gpu/drm/nouveau/core/engine/fifo/nv10.c | 6 +- .../gpu/drm/nouveau/core/engine/fifo/nv17.c | 8 +- .../gpu/drm/nouveau/core/engine/fifo/nv40.c | 8 +- .../gpu/drm/nouveau/core/engine/fifo/nv50.c | 36 +- .../gpu/drm/nouveau/core/engine/fifo/nv84.c | 60 +- .../gpu/drm/nouveau/core/engine/fifo/nvc0.c | 26 +- .../gpu/drm/nouveau/core/engine/fifo/nve0.c | 21 +- .../drm/nouveau/core/engine/graph/ctxnv40.c | 12 +- .../gpu/drm/nouveau/core/engine/graph/nv04.c | 184 +- .../gpu/drm/nouveau/core/engine/graph/nv10.c | 10 +- .../gpu/drm/nouveau/core/engine/graph/nv20.c | 6 +- .../gpu/drm/nouveau/core/engine/graph/nv40.c | 46 +- .../gpu/drm/nouveau/core/engine/graph/nv40.h | 2 +- .../gpu/drm/nouveau/core/engine/graph/nv50.c | 83 +- .../gpu/drm/nouveau/core/engine/graph/nvc0.c | 2 +- .../gpu/drm/nouveau/core/engine/graph/regs.h | 5 - .../gpu/drm/nouveau/core/engine/mpeg/nv31.c | 6 +- .../gpu/drm/nouveau/core/engine/mpeg/nv40.c | 2 +- .../gpu/drm/nouveau/core/engine/mpeg/nv50.c | 1 + .../gpu/drm/nouveau/core/engine/ppp/nv98.c | 107 +- .../gpu/drm/nouveau/core/engine/ppp/nvc0.c | 110 - .../drm/nouveau/core/engine/software/nv04.c | 4 +- .../drm/nouveau/core/engine/software/nv10.c | 2 +- .../drm/nouveau/core/engine/software/nv50.c | 10 +- .../drm/nouveau/core/engine/software/nvc0.c | 10 +- .../gpu/drm/nouveau/core/engine/vp/nv84.c | 108 +- .../gpu/drm/nouveau/core/engine/vp/nvc0.c | 110 - .../gpu/drm/nouveau/core/engine/vp/nve0.c | 110 - .../gpu/drm/nouveau/core/include/core/class.h | 225 -- .../drm/nouveau/core/include/core/engctx.h | 3 - .../drm/nouveau/core/include/core/falcon.h | 81 - .../drm/nouveau/core/include/core/gpuobj.h | 4 +- .../gpu/drm/nouveau/core/include/core/mm.h | 6 - .../drm/nouveau/core/include/core/object.h | 55 +- .../drm/nouveau/core/include/core/parent.h | 2 +- .../gpu/drm/nouveau/core/include/engine/bsp.h | 41 +- .../drm/nouveau/core/include/engine/copy.h | 39 +- .../drm/nouveau/core/include/engine/crypt.h | 39 + .../drm/nouveau/core/include/engine/disp.h | 5 - .../drm/nouveau/core/include/engine/dmaobj.h | 29 +- .../drm/nouveau/core/include/engine/fifo.h | 6 +- .../gpu/drm/nouveau/core/include/engine/ppp.h | 40 +- .../gpu/drm/nouveau/core/include/engine/vp.h | 41 +- .../nouveau/core/include/subdev/bios/dcb.h | 34 +- .../nouveau/core/include/subdev/bios/disp.h | 48 - .../drm/nouveau/core/include/subdev/bios/dp.h | 32 +- .../drm/nouveau/core/include/subdev/clock.h | 3 +- .../gpu/drm/nouveau/core/include/subdev/fb.h | 43 +- .../gpu/drm/nouveau/core/subdev/bar/base.c | 4 +- .../gpu/drm/nouveau/core/subdev/bios/base.c | 32 +- .../gpu/drm/nouveau/core/subdev/bios/dcb.c | 65 +- .../gpu/drm/nouveau/core/subdev/bios/disp.c | 178 -- .../gpu/drm/nouveau/core/subdev/bios/dp.c | 182 +- .../gpu/drm/nouveau/core/subdev/bios/gpio.c | 4 +- .../gpu/drm/nouveau/core/subdev/bios/init.c | 13 +- .../gpu/drm/nouveau/core/subdev/clock/nva3.c | 19 + .../gpu/drm/nouveau/core/subdev/clock/nvc0.c | 1 + .../gpu/drm/nouveau/core/subdev/device/base.c | 28 +- .../gpu/drm/nouveau/core/subdev/device/nv10.c | 4 +- .../gpu/drm/nouveau/core/subdev/device/nv20.c | 6 +- .../gpu/drm/nouveau/core/subdev/device/nv30.c | 6 +- .../gpu/drm/nouveau/core/subdev/device/nv40.c | 28 +- .../gpu/drm/nouveau/core/subdev/device/nv50.c | 26 +- .../gpu/drm/nouveau/core/subdev/device/nvc0.c | 64 +- .../gpu/drm/nouveau/core/subdev/device/nve0.c | 17 +- .../drm/nouveau/core/subdev/devinit/nv50.c | 34 +- .../gpu/drm/nouveau/core/subdev/fb/base.c | 92 +- .../gpu/drm/nouveau/core/subdev/fb/nv04.c | 62 +- .../gpu/drm/nouveau/core/subdev/fb/nv10.c | 52 +- .../gpu/drm/nouveau/core/subdev/fb/nv1a.c | 89 - .../gpu/drm/nouveau/core/subdev/fb/nv20.c | 86 +- .../gpu/drm/nouveau/core/subdev/fb/nv25.c | 81 - .../gpu/drm/nouveau/core/subdev/fb/nv30.c | 51 +- .../gpu/drm/nouveau/core/subdev/fb/nv35.c | 82 - .../gpu/drm/nouveau/core/subdev/fb/nv36.c | 82 - .../gpu/drm/nouveau/core/subdev/fb/nv40.c | 131 +- .../gpu/drm/nouveau/core/subdev/fb/nv41.c | 106 - .../gpu/drm/nouveau/core/subdev/fb/nv44.c | 114 - .../gpu/drm/nouveau/core/subdev/fb/nv46.c | 79 - .../gpu/drm/nouveau/core/subdev/fb/nv47.c | 66 - .../gpu/drm/nouveau/core/subdev/fb/nv49.c | 84 - .../gpu/drm/nouveau/core/subdev/fb/nv4e.c | 72 - .../gpu/drm/nouveau/core/subdev/fb/nv50.c | 393 ++- .../gpu/drm/nouveau/core/subdev/fb/nvc0.c | 126 +- .../gpu/drm/nouveau/core/subdev/i2c/aux.c | 2 +- .../drm/nouveau/core/subdev/instmem/nv04.c | 8 +- .../drm/nouveau/core/subdev/instmem/nv40.c | 4 +- .../drm/nouveau/core/subdev/instmem/nv50.c | 4 +- .../gpu/drm/nouveau/core/subdev/mc/base.c | 8 +- .../gpu/drm/nouveau/core/subdev/mc/nv50.c | 1 - .../gpu/drm/nouveau/core/subdev/mc/nv98.c | 1 - .../gpu/drm/nouveau/core/subdev/mc/nvc0.c | 1 - .../gpu/drm/nouveau/core/subdev/vm/nv41.c | 2 +- trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c | 29 +- trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c | 30 +- trunk/drivers/gpu/drm/nouveau/nouveau_acpi.h | 4 - trunk/drivers/gpu/drm/nouveau/nouveau_bios.c | 235 +- trunk/drivers/gpu/drm/nouveau/nouveau_bios.h | 9 + trunk/drivers/gpu/drm/nouveau/nouveau_bo.c | 10 +- trunk/drivers/gpu/drm/nouveau/nouveau_chan.c | 6 +- .../gpu/drm/nouveau/nouveau_connector.c | 27 +- .../gpu/drm/nouveau/nouveau_connector.h | 16 - trunk/drivers/gpu/drm/nouveau/nouveau_crtc.h | 10 + .../drivers/gpu/drm/nouveau/nouveau_display.c | 25 +- trunk/drivers/gpu/drm/nouveau/nouveau_dp.c | 141 +- trunk/drivers/gpu/drm/nouveau/nouveau_drm.c | 101 +- trunk/drivers/gpu/drm/nouveau/nouveau_drm.h | 4 +- .../drivers/gpu/drm/nouveau/nouveau_encoder.h | 7 +- trunk/drivers/gpu/drm/nouveau/nouveau_hdmi.c | 261 ++ trunk/drivers/gpu/drm/nouveau/nouveau_irq.c | 12 + trunk/drivers/gpu/drm/nouveau/nouveau_prime.c | 4 + trunk/drivers/gpu/drm/nouveau/nouveau_vga.c | 5 +- trunk/drivers/gpu/drm/nouveau/nv04_crtc.c | 6 +- trunk/drivers/gpu/drm/nouveau/nv04_display.c | 5 + trunk/drivers/gpu/drm/nouveau/nv10_fence.c | 7 +- trunk/drivers/gpu/drm/nouveau/nv17_tv.c | 16 +- trunk/drivers/gpu/drm/nouveau/nv50_crtc.c | 764 +++++ trunk/drivers/gpu/drm/nouveau/nv50_cursor.c | 136 + trunk/drivers/gpu/drm/nouveau/nv50_dac.c | 321 +++ trunk/drivers/gpu/drm/nouveau/nv50_display.c | 2561 +++++------------ trunk/drivers/gpu/drm/nouveau/nv50_display.h | 71 +- trunk/drivers/gpu/drm/nouveau/nv50_evo.c | 403 +++ trunk/drivers/gpu/drm/nouveau/nv50_evo.h | 120 + trunk/drivers/gpu/drm/nouveau/nv50_fence.c | 5 +- trunk/drivers/gpu/drm/nouveau/nv50_pm.c | 2 +- trunk/drivers/gpu/drm/nouveau/nv50_sor.c | 530 ++++ trunk/drivers/gpu/drm/nouveau/nvc0_fence.c | 28 +- trunk/drivers/gpu/drm/nouveau/nvd0_display.c | 2141 ++++++++++++++ trunk/drivers/gpu/drm/radeon/atombios_crtc.c | 44 +- trunk/drivers/gpu/drm/radeon/atombios_dp.c | 149 +- .../gpu/drm/radeon/atombios_encoders.c | 2 +- trunk/drivers/gpu/drm/radeon/evergreen.c | 4 +- trunk/drivers/gpu/drm/radeon/evergreen_cs.c | 3 + trunk/drivers/gpu/drm/radeon/evergreend.h | 4 + trunk/drivers/gpu/drm/radeon/r600.c | 8 +- trunk/drivers/gpu/drm/radeon/r600_cp.c | 7 +- trunk/drivers/gpu/drm/radeon/radeon_agp.c | 5 +- .../gpu/drm/radeon/radeon_connectors.c | 62 +- trunk/drivers/gpu/drm/radeon/radeon_drv.c | 13 +- trunk/drivers/gpu/drm/radeon/radeon_gart.c | 2 +- trunk/drivers/gpu/drm/radeon/radeon_mode.h | 2 +- trunk/drivers/gpu/drm/radeon/radeon_object.c | 8 +- trunk/drivers/gpu/drm/radeon/radeon_object.h | 2 +- trunk/drivers/gpu/drm/radeon/radeon_ttm.c | 9 +- trunk/drivers/gpu/drm/radeon/rv770d.h | 48 + trunk/drivers/gpu/drm/radeon/si.c | 1 + trunk/drivers/gpu/drm/radeon/sid.h | 1 + .../drivers/gpu/drm/shmobile/shmob_drm_crtc.c | 2 +- trunk/drivers/gpu/drm/tegra/Kconfig | 23 - trunk/drivers/gpu/drm/tegra/Makefile | 7 - trunk/drivers/gpu/drm/tegra/dc.c | 834 ------ trunk/drivers/gpu/drm/tegra/dc.h | 388 --- trunk/drivers/gpu/drm/tegra/drm.c | 115 - trunk/drivers/gpu/drm/tegra/drm.h | 234 -- trunk/drivers/gpu/drm/tegra/fb.c | 56 - trunk/drivers/gpu/drm/tegra/hdmi.c | 1334 --------- trunk/drivers/gpu/drm/tegra/hdmi.h | 575 ---- trunk/drivers/gpu/drm/tegra/host1x.c | 325 --- trunk/drivers/gpu/drm/tegra/output.c | 272 -- trunk/drivers/gpu/drm/tegra/rgb.c | 228 -- trunk/drivers/gpu/drm/ttm/ttm_bo.c | 55 +- trunk/drivers/gpu/drm/ttm/ttm_bo_util.c | 2 + trunk/drivers/gpu/drm/ttm/ttm_bo_vm.c | 4 +- trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c | 6 +- trunk/drivers/gpu/drm/ttm/ttm_memory.c | 1 + trunk/drivers/gpu/drm/ttm/ttm_object.c | 51 +- trunk/drivers/gpu/drm/ttm/ttm_page_alloc.c | 5 +- trunk/drivers/gpu/drm/ttm/ttm_tt.c | 4 - trunk/drivers/gpu/drm/udl/udl_connector.c | 14 +- trunk/drivers/gpu/drm/vmwgfx/Makefile | 3 +- .../gpu/drm/vmwgfx/svga3d_surfacedefs.h | 909 ------ trunk/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 23 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | 274 -- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | 11 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 97 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 153 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 913 ++---- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 2 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 9 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 2 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | 21 +- .../drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 2109 +++++++++----- .../gpu/drm/vmwgfx/vmwgfx_resource_priv.h | 84 - trunk/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 4 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 893 ------ trunk/drivers/gpu/vga/vga_switcheroo.c | 6 + trunk/drivers/hid/hid-microsoft.c | 6 +- trunk/drivers/hid/hidraw.c | 69 +- trunk/drivers/hwmon/asb100.c | 2 +- trunk/drivers/hwmon/w83627ehf.c | 1 + trunk/drivers/hwmon/w83627hf.c | 2 +- trunk/drivers/hwmon/w83781d.c | 2 +- trunk/drivers/hwmon/w83791d.c | 2 +- trunk/drivers/hwmon/w83792d.c | 2 +- trunk/drivers/hwmon/w83l786ng.c | 2 +- trunk/drivers/i2c/busses/i2c-at91.c | 7 +- trunk/drivers/i2c/busses/i2c-mxs.c | 2 + trunk/drivers/i2c/busses/i2c-omap.c | 36 +- trunk/drivers/i2c/busses/i2c-s3c2410.c | 1 + trunk/drivers/i2c/muxes/i2c-mux-pinctrl.c | 2 +- trunk/drivers/input/input-mt.c | 4 + trunk/drivers/input/mousedev.c | 4 +- trunk/drivers/input/touchscreen/ads7846.c | 6 +- trunk/drivers/iommu/intel-iommu.c | 4 +- trunk/drivers/iommu/tegra-smmu.c | 1 + trunk/drivers/irqchip/irq-bcm2835.c | 3 +- trunk/drivers/isdn/Kconfig | 2 +- trunk/drivers/isdn/i4l/Kconfig | 2 +- trunk/drivers/isdn/i4l/isdn_common.c | 4 - trunk/drivers/leds/ledtrig-cpu.c | 21 - trunk/drivers/md/dm.c | 8 +- trunk/drivers/md/md.c | 27 +- trunk/drivers/md/raid10.c | 131 +- trunk/drivers/md/raid5.c | 79 +- trunk/drivers/mmc/host/dw_mmc-exynos.c | 8 +- trunk/drivers/mmc/host/dw_mmc-pltfm.c | 6 +- trunk/drivers/mmc/host/dw_mmc-pltfm.h | 2 +- trunk/drivers/mmc/host/dw_mmc.c | 62 +- trunk/drivers/mmc/host/mxcmmc.c | 2 +- trunk/drivers/mmc/host/omap_hsmmc.c | 19 +- trunk/drivers/mmc/host/sdhci-dove.c | 38 +- trunk/drivers/mmc/host/sdhci-of-esdhc.c | 11 + trunk/drivers/mmc/host/sdhci-pci.c | 2 +- trunk/drivers/mmc/host/sdhci-pltfm.c | 7 + trunk/drivers/mmc/host/sdhci-s3c.c | 30 +- trunk/drivers/mmc/host/sdhci.c | 44 +- trunk/drivers/mmc/host/sdhci.h | 1 + trunk/drivers/mmc/host/sh_mmcif.c | 2 +- trunk/drivers/mtd/devices/slram.c | 2 +- trunk/drivers/mtd/nand/nand_base.c | 10 +- trunk/drivers/mtd/ofpart.c | 2 +- trunk/drivers/mtd/onenand/onenand_base.c | 2 +- trunk/drivers/net/bonding/bond_main.c | 7 + trunk/drivers/net/ethernet/8390/ne.c | 1 + .../ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 2 +- .../net/ethernet/broadcom/bnx2x/bnx2x_link.c | 4 +- .../net/ethernet/broadcom/bnx2x/bnx2x_main.c | 11 +- .../net/ethernet/chelsio/cxgb4/t4_hw.c | 6 +- .../drivers/net/ethernet/freescale/gianfar.c | 5 +- trunk/drivers/net/ethernet/jme.c | 36 +- trunk/drivers/net/ethernet/marvell/skge.c | 2 +- trunk/drivers/net/ethernet/micrel/ksz884x.c | 20 +- trunk/drivers/net/ethernet/realtek/8139cp.c | 22 +- trunk/drivers/net/ethernet/realtek/r8169.c | 5 + trunk/drivers/net/ethernet/sis/sis900.c | 2 +- trunk/drivers/net/ethernet/smsc/smsc911x.c | 17 +- trunk/drivers/net/ethernet/tile/tilegx.c | 2 +- .../net/ethernet/xilinx/xilinx_axienet_main.c | 16 +- .../drivers/net/ethernet/xscale/ixp4xx_eth.c | 8 +- trunk/drivers/net/irda/sir_dev.c | 2 +- trunk/drivers/net/phy/mdio-bitbang.c | 1 - trunk/drivers/net/phy/mdio-gpio.c | 11 +- trunk/drivers/net/team/team_mode_broadcast.c | 6 +- trunk/drivers/net/usb/cdc_eem.c | 3 +- trunk/drivers/net/usb/cdc_ncm.c | 22 +- trunk/drivers/net/usb/smsc95xx.c | 5 +- trunk/drivers/net/usb/usbnet.c | 8 +- trunk/drivers/net/vxlan.c | 10 +- trunk/drivers/net/wan/ixp4xx_hss.c | 8 +- trunk/drivers/net/wireless/ath/ath9k/hw.c | 2 +- trunk/drivers/net/wireless/b43legacy/pio.c | 2 +- .../wireless/brcm80211/brcmfmac/wl_cfg80211.c | 2 +- .../net/wireless/iwlwifi/dvm/mac80211.c | 16 +- trunk/drivers/net/wireless/iwlwifi/dvm/main.c | 2 +- trunk/drivers/net/wireless/iwlwifi/pcie/rx.c | 23 +- trunk/drivers/net/wireless/iwlwifi/pcie/tx.c | 8 - trunk/drivers/net/wireless/mwifiex/cmdevt.c | 11 +- trunk/drivers/net/wireless/mwifiex/sdio.c | 11 +- .../net/wireless/rtlwifi/rtl8192cu/sw.c | 1 + trunk/drivers/net/xen-netfront.c | 98 +- trunk/drivers/nfc/pn533.c | 25 +- trunk/drivers/pci/bus.c | 3 - trunk/drivers/pci/pci-driver.c | 12 +- trunk/drivers/pci/pci-sysfs.c | 34 - trunk/drivers/pci/pci.c | 32 + trunk/drivers/pci/pci.h | 2 + trunk/drivers/pci/pcie/aer/aerdrv_core.c | 20 +- trunk/drivers/pci/pcie/portdrv_core.c | 3 +- trunk/drivers/pci/proc.c | 8 + trunk/drivers/pinctrl/Kconfig | 4 +- trunk/drivers/pinctrl/spear/pinctrl-spear.c | 2 +- .../drivers/pinctrl/spear/pinctrl-spear1310.c | 365 ++- .../drivers/pinctrl/spear/pinctrl-spear1340.c | 41 +- .../drivers/pinctrl/spear/pinctrl-spear320.c | 8 +- .../drivers/pinctrl/spear/pinctrl-spear3xx.h | 1 + trunk/drivers/rapidio/rio.c | 2 +- trunk/drivers/regulator/core.c | 33 +- trunk/drivers/s390/char/con3215.c | 12 +- trunk/drivers/s390/cio/css.h | 3 - trunk/drivers/s390/cio/device.c | 8 +- trunk/drivers/s390/cio/idset.c | 3 +- trunk/drivers/s390/net/qeth_core_main.c | 24 +- trunk/drivers/s390/net/qeth_l2_main.c | 13 +- trunk/drivers/scsi/isci/request.c | 2 +- trunk/drivers/scsi/qlogicpti.c | 13 +- trunk/drivers/scsi/scsi.c | 45 + trunk/drivers/scsi/scsi_lib.c | 22 +- trunk/drivers/scsi/sd.c | 202 +- trunk/drivers/scsi/sd.h | 7 + trunk/drivers/staging/android/android_alarm.h | 4 +- trunk/drivers/tty/hvc/hvc_console.c | 7 - trunk/drivers/tty/serial/max310x.c | 1 + trunk/drivers/usb/core/hcd.c | 16 + trunk/drivers/usb/early/ehci-dbgp.c | 15 +- trunk/drivers/usb/gadget/u_ether.c | 3 +- trunk/drivers/usb/host/ehci-ls1x.c | 2 +- trunk/drivers/usb/host/ohci-xls.c | 2 +- trunk/drivers/usb/musb/musb_gadget.c | 30 +- trunk/drivers/usb/musb/ux500.c | 2 +- trunk/drivers/usb/otg/Kconfig | 4 +- trunk/drivers/usb/serial/keyspan.c | 3 +- trunk/drivers/usb/serial/option.c | 9 + trunk/drivers/usb/serial/usb_wwan.c | 10 +- trunk/drivers/usb/storage/scsiglue.c | 6 + trunk/drivers/video/omap2/dss/dsi.c | 13 +- trunk/drivers/video/omap2/dss/dss.c | 18 +- trunk/drivers/video/omap2/dss/hdmi.c | 4 +- .../drivers/video/omap2/omapfb/omapfb-ioctl.c | 2 +- trunk/drivers/virtio/virtio.c | 4 +- trunk/drivers/xen/Makefile | 1 + trunk/drivers/xen/events.c | 2 +- trunk/drivers/xen/fallback.c | 80 + trunk/drivers/xen/privcmd.c | 18 +- trunk/fs/cifs/cifsacl.c | 49 +- trunk/fs/cifs/dir.c | 11 +- trunk/fs/eventpoll.c | 38 +- trunk/fs/ext3/balloc.c | 5 +- trunk/fs/file.c | 1 - trunk/fs/gfs2/file.c | 14 +- trunk/fs/gfs2/lops.c | 16 +- trunk/fs/gfs2/quota.c | 7 +- trunk/fs/gfs2/rgrp.c | 33 +- trunk/fs/gfs2/super.c | 3 +- trunk/fs/gfs2/trans.c | 8 + trunk/fs/jffs2/file.c | 39 +- trunk/fs/notify/fanotify/fanotify.c | 1 + trunk/fs/notify/fanotify/fanotify_user.c | 3 +- trunk/fs/proc/base.c | 109 + trunk/fs/pstore/platform.c | 3 +- trunk/fs/reiserfs/inode.c | 10 +- trunk/fs/reiserfs/stree.c | 4 + trunk/fs/reiserfs/super.c | 60 +- trunk/fs/ubifs/find.c | 12 +- trunk/fs/ubifs/lprops.c | 6 + trunk/fs/ubifs/ubifs.h | 3 + trunk/fs/xfs/xfs_alloc.c | 43 +- trunk/fs/xfs/xfs_alloc.h | 3 - trunk/fs/xfs/xfs_alloc_btree.c | 2 + trunk/fs/xfs/xfs_aops.c | 54 +- trunk/fs/xfs/xfs_attr_leaf.c | 20 +- trunk/fs/xfs/xfs_bmap.c | 63 +- trunk/fs/xfs/xfs_bmap.h | 9 +- trunk/fs/xfs/xfs_buf.c | 14 +- trunk/fs/xfs/xfs_buf_item.c | 18 + trunk/fs/xfs/xfs_fsops.c | 21 +- trunk/fs/xfs/xfs_ialloc.c | 1 + trunk/fs/xfs/xfs_inode.c | 3 +- trunk/fs/xfs/xfs_ioctl.c | 2 +- trunk/fs/xfs/xfs_iomap.c | 4 +- trunk/fs/xfs/xfs_log.c | 19 +- trunk/fs/xfs/xfs_log_recover.c | 2 +- trunk/include/drm/drmP.h | 3 - trunk/include/drm/drm_crtc.h | 18 +- trunk/include/drm/drm_crtc_helper.h | 3 - trunk/include/drm/drm_dp_helper.h | 39 - trunk/include/drm/drm_hashtab.h | 14 - trunk/include/drm/drm_pciids.h | 1 + trunk/include/drm/intel-gtt.h | 7 +- trunk/include/drm/ttm/ttm_bo_api.h | 30 +- trunk/include/drm/ttm/ttm_bo_driver.h | 26 +- trunk/include/drm/ttm/ttm_execbuf_util.h | 3 + trunk/include/drm/ttm/ttm_memory.h | 2 + trunk/include/drm/ttm/ttm_object.h | 4 - trunk/include/linux/clk-provider.h | 4 +- trunk/include/linux/dma-attrs.h | 1 - trunk/include/linux/i2c-omap.h | 1 + trunk/include/linux/kref.h | 21 - trunk/include/linux/mm.h | 4 - trunk/include/linux/mmc/dw_mmc.h | 6 +- trunk/include/linux/mmc/sdhci.h | 1 + trunk/include/linux/mmzone.h | 2 +- trunk/include/linux/of_address.h | 4 + .../linux/platform_data/omap_ocp2scp.h | 31 + trunk/include/linux/ptp_clock_kernel.h | 3 +- trunk/include/linux/rio.h | 2 + trunk/include/linux/spi/ads7846.h | 5 +- trunk/include/net/xfrm.h | 2 +- trunk/include/scsi/scsi_device.h | 4 + trunk/include/uapi/drm/drm.h | 1 - trunk/include/uapi/drm/exynos_drm.h | 13 +- trunk/include/uapi/drm/i915_drm.h | 6 - trunk/include/uapi/linux/eventpoll.h | 1 - trunk/include/uapi/linux/oom.h | 9 + trunk/include/xen/hvm.h | 34 +- trunk/kernel/futex.c | 41 +- trunk/kernel/module.c | 27 +- trunk/lib/mpi/longlong.h | 19 +- trunk/mm/bootmem.c | 10 +- trunk/mm/highmem.c | 2 +- trunk/mm/memcontrol.c | 67 +- trunk/mm/memory.c | 10 +- trunk/mm/memory_hotplug.c | 7 - trunk/mm/mmap.c | 2 + trunk/mm/mmzone.c | 6 +- trunk/mm/nobootmem.c | 3 - trunk/mm/page_alloc.c | 38 +- trunk/mm/shmem.c | 18 +- trunk/mm/swapfile.c | 4 +- trunk/mm/vmscan.c | 27 +- trunk/net/batman-adv/soft-interface.c | 12 +- trunk/net/batman-adv/translation-table.c | 15 +- trunk/net/bluetooth/hci_core.c | 4 +- trunk/net/bluetooth/mgmt.c | 12 +- trunk/net/bluetooth/smp.c | 2 +- trunk/net/core/dev.c | 6 +- trunk/net/core/dev_addr_lists.c | 3 +- trunk/net/core/net-sysfs.c | 20 + trunk/net/core/rtnetlink.c | 3 +- trunk/net/ipv4/inet_diag.c | 5 +- trunk/net/ipv4/ip_sockglue.c | 35 +- trunk/net/ipv4/ip_vti.c | 5 + trunk/net/ipv4/route.c | 9 +- trunk/net/ipv4/tcp.c | 4 +- trunk/net/ipv4/tcp_input.c | 15 +- trunk/net/ipv4/tcp_metrics.c | 12 +- trunk/net/ipv4/tcp_output.c | 4 + trunk/net/ipv4/xfrm4_policy.c | 13 +- trunk/net/ipv6/inet6_connection_sock.c | 3 +- trunk/net/ipv6/ip6_gre.c | 8 +- trunk/net/ipv6/ipv6_sockglue.c | 1 + trunk/net/ipv6/ndisc.c | 3 +- trunk/net/mac80211/cfg.c | 3 + trunk/net/mac80211/ibss.c | 8 +- trunk/net/mac80211/ieee80211_i.h | 2 + trunk/net/mac80211/main.c | 6 +- trunk/net/mac80211/scan.c | 2 +- trunk/net/mac80211/sta_info.c | 11 +- trunk/net/mac80211/status.c | 9 + trunk/net/mac80211/tx.c | 9 +- trunk/net/mac80211/util.c | 2 + trunk/net/netfilter/ipset/ip_set_hash_ip.c | 4 +- .../net/netfilter/ipset/ip_set_hash_ipport.c | 7 +- .../netfilter/ipset/ip_set_hash_ipportip.c | 7 +- .../netfilter/ipset/ip_set_hash_ipportnet.c | 7 +- trunk/net/netfilter/nfnetlink_cttimeout.c | 3 +- trunk/net/nfc/llcp/llcp.c | 2 +- trunk/net/sched/sch_qfq.c | 109 +- trunk/net/sctp/proc.c | 8 +- trunk/net/tipc/handler.c | 1 - trunk/net/wireless/reg.c | 5 +- trunk/scripts/Makefile.modinst | 3 +- trunk/scripts/checkpatch.pl | 6 +- trunk/scripts/kconfig/expr.h | 5 +- trunk/scripts/kconfig/list.h | 91 + trunk/scripts/kconfig/lkc_proto.h | 4 +- trunk/scripts/kconfig/mconf.c | 6 +- trunk/scripts/kconfig/menu.c | 14 +- trunk/scripts/sign-file | 6 +- trunk/security/device_cgroup.c | 18 +- trunk/security/selinux/netnode.c | 3 +- trunk/sound/core/oss/mixer_oss.c | 1 + trunk/sound/core/oss/pcm_oss.c | 1 + trunk/sound/core/pcm_native.c | 6 +- trunk/sound/core/sound.c | 2 +- trunk/sound/core/sound_oss.c | 2 +- trunk/sound/i2c/other/ak4113.c | 2 +- trunk/sound/i2c/other/ak4114.c | 2 +- trunk/sound/i2c/other/ak4117.c | 2 +- trunk/sound/pci/es1968.c | 11 +- trunk/sound/pci/fm801.c | 11 +- trunk/sound/pci/hda/hda_codec.c | 13 +- trunk/sound/pci/hda/hda_codec.h | 1 + trunk/sound/pci/hda/hda_intel.c | 41 +- trunk/sound/pci/hda/patch_analog.c | 1 + trunk/sound/pci/hda/patch_cirrus.c | 22 +- trunk/sound/pci/hda/patch_realtek.c | 28 +- trunk/sound/pci/hda/patch_via.c | 36 +- trunk/sound/pci/rme9652/hdspm.c | 5 +- trunk/sound/soc/codecs/arizona.c | 4 +- trunk/sound/soc/codecs/cs4271.c | 11 +- trunk/sound/soc/codecs/cs42l52.c | 5 +- trunk/sound/soc/codecs/wm5102.c | 552 +++- trunk/sound/soc/codecs/wm8978.c | 2 +- trunk/sound/soc/codecs/wm8994.c | 2 +- trunk/sound/soc/kirkwood/kirkwood-dma.c | 3 +- trunk/sound/soc/kirkwood/kirkwood-i2s.c | 74 +- trunk/sound/soc/mxs/mxs-saif.c | 17 +- trunk/sound/soc/samsung/Kconfig | 2 + trunk/sound/soc/samsung/bells.c | 2 +- trunk/sound/soc/soc-core.c | 5 +- trunk/sound/soc/soc-dapm.c | 2 +- trunk/sound/usb/card.c | 6 +- trunk/sound/usb/endpoint.c | 13 + trunk/sound/usb/endpoint.h | 1 + trunk/sound/usb/midi.c | 8 +- trunk/sound/usb/pcm.c | 5 +- trunk/tools/power/x86/turbostat/turbostat.c | 28 +- trunk/tools/testing/selftests/Makefile | 2 +- trunk/tools/testing/selftests/epoll/Makefile | 11 - .../testing/selftests/epoll/test_epoll.c | 344 --- 820 files changed, 19660 insertions(+), 28503 deletions(-) delete mode 100644 trunk/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt create mode 100644 trunk/arch/mips/lib/bitops.c create mode 100644 trunk/arch/mips/lib/mips-atomic.c delete mode 100644 trunk/arch/unicore32/include/asm/kvm_para.h rename trunk/arch/unicore32/include/{ => uapi}/asm/byteorder.h (100%) create mode 100644 trunk/arch/unicore32/include/uapi/asm/ptrace.h rename trunk/arch/unicore32/include/{ => uapi}/asm/sigcontext.h (100%) rename trunk/arch/unicore32/include/{ => uapi}/asm/unistd.h (92%) rename trunk/drivers/gpu/drm/{drm_dp_helper.c => drm_dp_i2c_helper.c} (58%) delete mode 100644 trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.c delete mode 100644 trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.h delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/core/falcon.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/include/core/falcon.h delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c create mode 100644 trunk/drivers/gpu/drm/nouveau/nouveau_hdmi.c create mode 100644 trunk/drivers/gpu/drm/nouveau/nv50_crtc.c create mode 100644 trunk/drivers/gpu/drm/nouveau/nv50_cursor.c create mode 100644 trunk/drivers/gpu/drm/nouveau/nv50_dac.c create mode 100644 trunk/drivers/gpu/drm/nouveau/nv50_evo.c create mode 100644 trunk/drivers/gpu/drm/nouveau/nv50_evo.h create mode 100644 trunk/drivers/gpu/drm/nouveau/nv50_sor.c create mode 100644 trunk/drivers/gpu/drm/nouveau/nvd0_display.c delete mode 100644 trunk/drivers/gpu/drm/tegra/Kconfig delete mode 100644 trunk/drivers/gpu/drm/tegra/Makefile delete mode 100644 trunk/drivers/gpu/drm/tegra/dc.c delete mode 100644 trunk/drivers/gpu/drm/tegra/dc.h delete mode 100644 trunk/drivers/gpu/drm/tegra/drm.c delete mode 100644 trunk/drivers/gpu/drm/tegra/drm.h delete mode 100644 trunk/drivers/gpu/drm/tegra/fb.c delete mode 100644 trunk/drivers/gpu/drm/tegra/hdmi.c delete mode 100644 trunk/drivers/gpu/drm/tegra/hdmi.h delete mode 100644 trunk/drivers/gpu/drm/tegra/host1x.c delete mode 100644 trunk/drivers/gpu/drm/tegra/output.c delete mode 100644 trunk/drivers/gpu/drm/tegra/rgb.c delete mode 100644 trunk/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h delete mode 100644 trunk/drivers/gpu/drm/vmwgfx/vmwgfx_context.c delete mode 100644 trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h delete mode 100644 trunk/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c create mode 100644 trunk/drivers/xen/fallback.c create mode 100644 trunk/include/linux/platform_data/omap_ocp2scp.h create mode 100644 trunk/scripts/kconfig/list.h delete mode 100644 trunk/tools/testing/selftests/epoll/Makefile delete mode 100644 trunk/tools/testing/selftests/epoll/test_epoll.c diff --git a/[refs] b/[refs] index 37e32cf6fedd..59769d06a78d 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 8de9e417757fb9f130f55a38f4ee7027b60de1c7 +refs/heads/master: fbed600af159b9dce78dd74c4bff56b40bb19d47 diff --git a/trunk/CREDITS b/trunk/CREDITS index d8fe12a9421f..2346b09ca8bb 100644 --- a/trunk/CREDITS +++ b/trunk/CREDITS @@ -1823,6 +1823,11 @@ S: Kattreinstr 38 S: D-64295 S: Germany +N: Avi Kivity +E: avi.kivity@gmail.com +D: Kernel-based Virtual Machine (KVM) +S: Ra'annana, Israel + N: Andi Kleen E: andi@firstfloor.org U: http://www.halobates.de diff --git a/trunk/Documentation/DMA-attributes.txt b/trunk/Documentation/DMA-attributes.txt index e59480db9ee0..f50309081ac7 100644 --- a/trunk/Documentation/DMA-attributes.txt +++ b/trunk/Documentation/DMA-attributes.txt @@ -91,12 +91,3 @@ transferred to 'device' domain. This attribute can be also used for dma_unmap_{single,page,sg} functions family to force buffer to stay in device domain after releasing a mapping for it. Use this attribute with care! - -DMA_ATTR_FORCE_CONTIGUOUS -------------------------- - -By default DMA-mapping subsystem is allowed to assemble the buffer -allocated by dma_alloc_attrs() function from individual pages if it can -be mapped as contiguous chunk into device dma address space. By -specifing this attribute the allocated buffer is forced to be contiguous -also in physical memory. diff --git a/trunk/Documentation/DocBook/drm.tmpl b/trunk/Documentation/DocBook/drm.tmpl index 4ee2304f82f9..b0300529ab13 100644 --- a/trunk/Documentation/DocBook/drm.tmpl +++ b/trunk/Documentation/DocBook/drm.tmpl @@ -1141,13 +1141,23 @@ int max_width, max_height; the page_flip operation will be called with a non-NULL event argument pointing to a drm_pending_vblank_event instance. Upon page - flip completion the driver must call drm_send_vblank_event - to fill in the event and send to wake up any waiting processes. - This can be performed with + flip completion the driver must fill the + event::event + sequence, tv_sec + and tv_usec fields with the associated + vertical blanking count and timestamp, add the event to the + drm_file list of events to be signaled, and wake + up any waiting process. This can be performed with event.sequence = drm_vblank_count_and_time(..., &now); + event->event.tv_sec = now.tv_sec; + event->event.tv_usec = now.tv_usec; + spin_lock_irqsave(&dev->event_lock, flags); - ... - drm_send_vblank_event(dev, pipe, event); + list_add_tail(&event->base.link, &event->base.file_priv->event_list); + wake_up_interruptible(&event->base.file_priv->event_wait); spin_unlock_irqrestore(&dev->event_lock, flags); ]]> @@ -1611,10 +1621,10 @@ void intel_crt_init(struct drm_device *dev) - + - Mode Setting Helper Functions + Mid-layer Helper Functions The CRTC, encoder and connector functions provided by the drivers implement the DRM API. They're called by the DRM core and ioctl handlers @@ -2096,21 +2106,6 @@ void intel_crt_init(struct drm_device *dev) - - Modeset Helper Functions Reference -!Edrivers/gpu/drm/drm_crtc_helper.c - - - fbdev Helper Functions Reference -!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers -!Edrivers/gpu/drm/drm_fb_helper.c - - - Display Port Helper Functions Reference -!Pdrivers/gpu/drm/drm_dp_helper.c dp helpers -!Iinclude/drm/drm_dp_helper.h -!Edrivers/gpu/drm/drm_dp_helper.c - diff --git a/trunk/Documentation/arm64/memory.txt b/trunk/Documentation/arm64/memory.txt index dbbdcbba75a3..4110cca96bd6 100644 --- a/trunk/Documentation/arm64/memory.txt +++ b/trunk/Documentation/arm64/memory.txt @@ -27,17 +27,17 @@ Start End Size Use ----------------------------------------------------------------------- 0000000000000000 0000007fffffffff 512GB user -ffffff8000000000 ffffffbbfffcffff ~240GB vmalloc +ffffff8000000000 ffffffbbfffeffff ~240GB vmalloc -ffffffbbfffd0000 ffffffbcfffdffff 64KB [guard page] +ffffffbbffff0000 ffffffbbffffffff 64KB [guard page] -ffffffbbfffe0000 ffffffbcfffeffff 64KB PCI I/O space +ffffffbc00000000 ffffffbdffffffff 8GB vmemmap -ffffffbbffff0000 ffffffbcffffffff 64KB [guard page] +ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap] -ffffffbc00000000 ffffffbdffffffff 8GB vmemmap +ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space -ffffffbe00000000 ffffffbffbffffff ~8GB [guard, future vmmemap] +ffffffbbffff0000 ffffffbcffffffff ~2MB [guard] ffffffbffc000000 ffffffbfffffffff 64MB modules diff --git a/trunk/Documentation/cgroups/memory.txt b/trunk/Documentation/cgroups/memory.txt index c07f7b4fb88d..71c4da413444 100644 --- a/trunk/Documentation/cgroups/memory.txt +++ b/trunk/Documentation/cgroups/memory.txt @@ -466,6 +466,10 @@ Note: 5.3 swappiness Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. +Please note that unlike the global swappiness, memcg knob set to 0 +really prevents from any swapping even if there is a swap storage +available. This might lead to memcg OOM killer if there are no file +pages to reclaim. Following cgroups' swappiness can't be changed. - root cgroup (uses /proc/sys/vm/swappiness). diff --git a/trunk/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/trunk/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt deleted file mode 100644 index b4fa934ae3a2..000000000000 --- a/trunk/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt +++ /dev/null @@ -1,191 +0,0 @@ -NVIDIA Tegra host1x - -Required properties: -- compatible: "nvidia,tegra-host1x" -- reg: Physical base address and length of the controller's registers. -- interrupts: The interrupt outputs from the controller. -- #address-cells: The number of cells used to represent physical base addresses - in the host1x address space. Should be 1. -- #size-cells: The number of cells used to represent the size of an address - range in the host1x address space. Should be 1. -- ranges: The mapping of the host1x address space to the CPU address space. - -The host1x top-level node defines a number of children, each representing one -of the following host1x client modules: - -- mpe: video encoder - - Required properties: - - compatible: "nvidia,tegra-mpe" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - -- vi: video input - - Required properties: - - compatible: "nvidia,tegra-vi" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - -- epp: encoder pre-processor - - Required properties: - - compatible: "nvidia,tegra-epp" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - -- isp: image signal processor - - Required properties: - - compatible: "nvidia,tegra-isp" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - -- gr2d: 2D graphics engine - - Required properties: - - compatible: "nvidia,tegra-gr2d" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - -- gr3d: 3D graphics engine - - Required properties: - - compatible: "nvidia,tegra-gr3d" - - reg: Physical base address and length of the controller's registers. - -- dc: display controller - - Required properties: - - compatible: "nvidia,tegra-dc" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - - Each display controller node has a child node, named "rgb", that represents - the RGB output associated with the controller. It can take the following - optional properties: - - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing - - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection - - nvidia,edid: supplies a binary EDID blob - -- hdmi: High Definition Multimedia Interface - - Required properties: - - compatible: "nvidia,tegra-hdmi" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - - vdd-supply: regulator for supply voltage - - pll-supply: regulator for PLL - - Optional properties: - - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing - - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection - - nvidia,edid: supplies a binary EDID blob - -- tvo: TV encoder output - - Required properties: - - compatible: "nvidia,tegra-tvo" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - -- dsi: display serial interface - - Required properties: - - compatible: "nvidia,tegra-dsi" - - reg: Physical base address and length of the controller's registers. - -Example: - -/ { - ... - - host1x { - compatible = "nvidia,tegra20-host1x", "simple-bus"; - reg = <0x50000000 0x00024000>; - interrupts = <0 65 0x04 /* mpcore syncpt */ - 0 67 0x04>; /* mpcore general */ - - #address-cells = <1>; - #size-cells = <1>; - - ranges = <0x54000000 0x54000000 0x04000000>; - - mpe { - compatible = "nvidia,tegra20-mpe"; - reg = <0x54040000 0x00040000>; - interrupts = <0 68 0x04>; - }; - - vi { - compatible = "nvidia,tegra20-vi"; - reg = <0x54080000 0x00040000>; - interrupts = <0 69 0x04>; - }; - - epp { - compatible = "nvidia,tegra20-epp"; - reg = <0x540c0000 0x00040000>; - interrupts = <0 70 0x04>; - }; - - isp { - compatible = "nvidia,tegra20-isp"; - reg = <0x54100000 0x00040000>; - interrupts = <0 71 0x04>; - }; - - gr2d { - compatible = "nvidia,tegra20-gr2d"; - reg = <0x54140000 0x00040000>; - interrupts = <0 72 0x04>; - }; - - gr3d { - compatible = "nvidia,tegra20-gr3d"; - reg = <0x54180000 0x00040000>; - }; - - dc@54200000 { - compatible = "nvidia,tegra20-dc"; - reg = <0x54200000 0x00040000>; - interrupts = <0 73 0x04>; - - rgb { - status = "disabled"; - }; - }; - - dc@54240000 { - compatible = "nvidia,tegra20-dc"; - reg = <0x54240000 0x00040000>; - interrupts = <0 74 0x04>; - - rgb { - status = "disabled"; - }; - }; - - hdmi { - compatible = "nvidia,tegra20-hdmi"; - reg = <0x54280000 0x00040000>; - interrupts = <0 75 0x04>; - status = "disabled"; - }; - - tvo { - compatible = "nvidia,tegra20-tvo"; - reg = <0x542c0000 0x00040000>; - interrupts = <0 76 0x04>; - status = "disabled"; - }; - - dsi { - compatible = "nvidia,tegra20-dsi"; - reg = <0x54300000 0x00040000>; - status = "disabled"; - }; - }; - - ... -}; diff --git a/trunk/Documentation/devicetree/bindings/net/mdio-gpio.txt b/trunk/Documentation/devicetree/bindings/net/mdio-gpio.txt index bc9549529014..c79bab025369 100644 --- a/trunk/Documentation/devicetree/bindings/net/mdio-gpio.txt +++ b/trunk/Documentation/devicetree/bindings/net/mdio-gpio.txt @@ -8,9 +8,16 @@ gpios property as described in section VIII.1 in the following order: MDC, MDIO. +Note: Each gpio-mdio bus should have an alias correctly numbered in "aliases" +node. + Example: -mdio { +aliases { + mdio-gpio0 = <&mdio0>; +}; + +mdio0: mdio { compatible = "virtual,mdio-gpio"; #address-cells = <1>; #size-cells = <0>; diff --git a/trunk/Documentation/filesystems/proc.txt b/trunk/Documentation/filesystems/proc.txt index a1793d670cd0..3844d21d6ca3 100644 --- a/trunk/Documentation/filesystems/proc.txt +++ b/trunk/Documentation/filesystems/proc.txt @@ -33,7 +33,7 @@ Table of Contents 2 Modifying System Parameters 3 Per-Process Parameters - 3.1 /proc//oom_score_adj - Adjust the oom-killer + 3.1 /proc//oom_adj & /proc//oom_score_adj - Adjust the oom-killer score 3.2 /proc//oom_score - Display current oom-killer score 3.3 /proc//io - Display the IO accounting fields @@ -1320,10 +1320,10 @@ of the kernel. CHAPTER 3: PER-PROCESS PARAMETERS ------------------------------------------------------------------------------ -3.1 /proc//oom_score_adj- Adjust the oom-killer score +3.1 /proc//oom_adj & /proc//oom_score_adj- Adjust the oom-killer score -------------------------------------------------------------------------------- -This file can be used to adjust the badness heuristic used to select which +These file can be used to adjust the badness heuristic used to select which process gets killed in out of memory conditions. The badness heuristic assigns a value to each candidate task ranging from 0 @@ -1361,6 +1361,12 @@ same system, cpuset, mempolicy, or memory controller resources to use at least equivalent to discounting 50% of the task's allowed memory from being considered as scoring against the task. +For backwards compatibility with previous kernels, /proc//oom_adj may also +be used to tune the badness score. Its acceptable values range from -16 +(OOM_ADJUST_MIN) to +15 (OOM_ADJUST_MAX) and a special value of -17 +(OOM_DISABLE) to disable oom killing entirely for that task. Its value is +scaled linearly with /proc//oom_score_adj. + The value of /proc//oom_score_adj may be reduced no lower than the last value set by a CAP_SYS_RESOURCE process. To reduce the value any lower requires CAP_SYS_RESOURCE. @@ -1375,7 +1381,9 @@ minimal amount of work. ------------------------------------------------------------- This file can be used to check the current score used by the oom-killer is for -any given . +any given . Use it together with /proc//oom_score_adj to tune which +process should be killed in an out-of-memory situation. + 3.3 /proc//io - Display the IO accounting fields ------------------------------------------------------- diff --git a/trunk/Documentation/kref.txt b/trunk/Documentation/kref.txt index ddf85a5dde0c..48ba715d5a63 100644 --- a/trunk/Documentation/kref.txt +++ b/trunk/Documentation/kref.txt @@ -213,91 +213,3 @@ presentation on krefs, which can be found at: and: http://www.kroah.com/linux/talks/ols_2004_kref_talk/ - -The above example could also be optimized using kref_get_unless_zero() in -the following way: - -static struct my_data *get_entry() -{ - struct my_data *entry = NULL; - mutex_lock(&mutex); - if (!list_empty(&q)) { - entry = container_of(q.next, struct my_data, link); - if (!kref_get_unless_zero(&entry->refcount)) - entry = NULL; - } - mutex_unlock(&mutex); - return entry; -} - -static void release_entry(struct kref *ref) -{ - struct my_data *entry = container_of(ref, struct my_data, refcount); - - mutex_lock(&mutex); - list_del(&entry->link); - mutex_unlock(&mutex); - kfree(entry); -} - -static void put_entry(struct my_data *entry) -{ - kref_put(&entry->refcount, release_entry); -} - -Which is useful to remove the mutex lock around kref_put() in put_entry(), but -it's important that kref_get_unless_zero is enclosed in the same critical -section that finds the entry in the lookup table, -otherwise kref_get_unless_zero may reference already freed memory. -Note that it is illegal to use kref_get_unless_zero without checking its -return value. If you are sure (by already having a valid pointer) that -kref_get_unless_zero() will return true, then use kref_get() instead. - -The function kref_get_unless_zero also makes it possible to use rcu -locking for lookups in the above example: - -struct my_data -{ - struct rcu_head rhead; - . - struct kref refcount; - . - . -}; - -static struct my_data *get_entry_rcu() -{ - struct my_data *entry = NULL; - rcu_read_lock(); - if (!list_empty(&q)) { - entry = container_of(q.next, struct my_data, link); - if (!kref_get_unless_zero(&entry->refcount)) - entry = NULL; - } - rcu_read_unlock(); - return entry; -} - -static void release_entry_rcu(struct kref *ref) -{ - struct my_data *entry = container_of(ref, struct my_data, refcount); - - mutex_lock(&mutex); - list_del_rcu(&entry->link); - mutex_unlock(&mutex); - kfree_rcu(entry, rhead); -} - -static void put_entry(struct my_data *entry) -{ - kref_put(&entry->refcount, release_entry_rcu); -} - -But note that the struct kref member needs to remain in valid memory for a -rcu grace period after release_entry_rcu was called. That can be accomplished -by using kfree_rcu(entry, rhead) as done above, or by calling synchronize_rcu() -before using kfree, but note that synchronize_rcu() may sleep for a -substantial amount of time. - - -Thomas Hellstrom diff --git a/trunk/Documentation/networking/netdev-features.txt b/trunk/Documentation/networking/netdev-features.txt index 4164f5c02e4b..f310edec8a77 100644 --- a/trunk/Documentation/networking/netdev-features.txt +++ b/trunk/Documentation/networking/netdev-features.txt @@ -164,4 +164,4 @@ read the CRC recorded by the NIC on receipt of the packet. This requests that the NIC receive all possible frames, including errored frames (such as bad FCS, etc). This can be helpful when sniffing a link with bad packets on it. Some NICs may receive more packets if also put into normal -PROMISC mdoe. +PROMISC mode. diff --git a/trunk/Documentation/networking/vxlan.txt b/trunk/Documentation/networking/vxlan.txt index 5b34b762d7d5..6d993510f091 100644 --- a/trunk/Documentation/networking/vxlan.txt +++ b/trunk/Documentation/networking/vxlan.txt @@ -32,7 +32,7 @@ no entry is in the forwarding table. # ip link delete vxlan0 3. Show vxlan info - # ip -d show vxlan0 + # ip -d link show vxlan0 It is possible to create, destroy and display the vxlan forwarding table using the new bridge command. @@ -41,7 +41,7 @@ forwarding table using the new bridge command. # bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0 2. Delete forwarding table entry - # bridge fdb delete 00:17:42:8a:b4:05 + # bridge fdb delete 00:17:42:8a:b4:05 dev vxlan0 3. Show forwarding table # bridge fdb show dev vxlan0 diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 2f66431bc698..9386a63ea8f6 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -526,17 +526,17 @@ F: drivers/video/geode/ F: arch/x86/include/asm/geode.h AMD IOMMU (AMD-VI) -M: Joerg Roedel +M: Joerg Roedel L: iommu@lists.linux-foundation.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git -S: Supported +S: Maintained F: drivers/iommu/amd_iommu*.[ch] F: include/linux/amd-iommu.h AMD MICROCODE UPDATE SUPPORT -M: Andreas Herrmann +M: Andreas Herrmann L: amd64-microcode@amd64.org -S: Supported +S: Maintained F: arch/x86/kernel/microcode_amd.c AMS (Apple Motion Sensor) DRIVER @@ -841,6 +841,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git F: arch/arm/mach-sa1100/jornada720.c F: arch/arm/mach-sa1100/include/mach/jornada720.h +ARM/IGEP MACHINE SUPPORT +M: Enric Balletbo i Serra +M: Javier Martinez Canillas +L: linux-omap@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm/mach-omap2/board-igep0020.c + ARM/INCOME PXA270 SUPPORT M: Marek Vasut L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -2512,15 +2520,6 @@ S: Supported F: drivers/gpu/drm/exynos F: include/drm/exynos* -DRM DRIVERS FOR NVIDIA TEGRA -M: Thierry Reding -L: dri-devel@lists.freedesktop.org -L: linux-tegra@vger.kernel.org -T: git git://gitorious.org/thierryreding/linux.git -S: Maintained -F: drivers/gpu/drm/tegra/ -F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt - DSCC4 DRIVER M: Francois Romieu L: netdev@vger.kernel.org @@ -2717,10 +2716,10 @@ F: include/linux/edac.h EDAC-AMD64 M: Doug Thompson -M: Borislav Petkov +M: Borislav Petkov L: linux-edac@vger.kernel.org W: bluesmoke.sourceforge.net -S: Supported +S: Maintained F: drivers/edac/amd64_edac* EDAC-E752X @@ -3607,6 +3606,49 @@ F: drivers/hid/hid-hyperv.c F: drivers/net/hyperv/ F: drivers/staging/hv/ +I2C OVER PARALLEL PORT +M: Jean Delvare +L: linux-i2c@vger.kernel.org +S: Maintained +F: Documentation/i2c/busses/i2c-parport +F: Documentation/i2c/busses/i2c-parport-light +F: drivers/i2c/busses/i2c-parport.c +F: drivers/i2c/busses/i2c-parport-light.c + +I2C/SMBUS CONTROLLER DRIVERS FOR PC +M: Jean Delvare +L: linux-i2c@vger.kernel.org +S: Maintained +F: Documentation/i2c/busses/i2c-ali1535 +F: Documentation/i2c/busses/i2c-ali1563 +F: Documentation/i2c/busses/i2c-ali15x3 +F: Documentation/i2c/busses/i2c-amd756 +F: Documentation/i2c/busses/i2c-amd8111 +F: Documentation/i2c/busses/i2c-i801 +F: Documentation/i2c/busses/i2c-nforce2 +F: Documentation/i2c/busses/i2c-piix4 +F: Documentation/i2c/busses/i2c-sis5595 +F: Documentation/i2c/busses/i2c-sis630 +F: Documentation/i2c/busses/i2c-sis96x +F: Documentation/i2c/busses/i2c-via +F: Documentation/i2c/busses/i2c-viapro +F: drivers/i2c/busses/i2c-ali1535.c +F: drivers/i2c/busses/i2c-ali1563.c +F: drivers/i2c/busses/i2c-ali15x3.c +F: drivers/i2c/busses/i2c-amd756.c +F: drivers/i2c/busses/i2c-amd756-s4882.c +F: drivers/i2c/busses/i2c-amd8111.c +F: drivers/i2c/busses/i2c-i801.c +F: drivers/i2c/busses/i2c-isch.c +F: drivers/i2c/busses/i2c-nforce2.c +F: drivers/i2c/busses/i2c-nforce2-s4985.c +F: drivers/i2c/busses/i2c-piix4.c +F: drivers/i2c/busses/i2c-sis5595.c +F: drivers/i2c/busses/i2c-sis630.c +F: drivers/i2c/busses/i2c-sis96x.c +F: drivers/i2c/busses/i2c-via.c +F: drivers/i2c/busses/i2c-viapro.c + I2C/SMBUS STUB DRIVER M: "Mark M. Hoffman" L: linux-i2c@vger.kernel.org @@ -3614,9 +3656,8 @@ S: Maintained F: drivers/i2c/busses/i2c-stub.c I2C SUBSYSTEM -M: "Jean Delvare (PC drivers, core)" +M: Wolfram Sang M: "Ben Dooks (embedded platforms)" -M: "Wolfram Sang (embedded platforms)" L: linux-i2c@vger.kernel.org W: http://i2c.wiki.kernel.org/ T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/ @@ -3627,6 +3668,13 @@ F: drivers/i2c/ F: include/linux/i2c.h F: include/linux/i2c-*.h +I2C-TAOS-EVM DRIVER +M: Jean Delvare +L: linux-i2c@vger.kernel.org +S: Maintained +F: Documentation/i2c/busses/i2c-taos-evm +F: drivers/i2c/busses/i2c-taos-evm.c + I2C-TINY-USB DRIVER M: Till Harbaum L: linux-i2c@vger.kernel.org @@ -3713,7 +3761,7 @@ S: Maintained F: drivers/platform/x86/ideapad-laptop.c IDE/ATAPI DRIVERS -M: Borislav Petkov +M: Borislav Petkov L: linux-ide@vger.kernel.org S: Maintained F: Documentation/cdrom/ide-cd @@ -4240,8 +4288,8 @@ F: include/linux/lockd/ F: include/linux/sunrpc/ KERNEL VIRTUAL MACHINE (KVM) -M: Avi Kivity M: Marcelo Tosatti +M: Gleb Natapov L: kvm@vger.kernel.org W: http://kvm.qumranet.com S: Supported @@ -5373,7 +5421,7 @@ S: Maintained F: sound/drivers/opl4/ OPROFILE -M: Robert Richter +M: Robert Richter L: oprofile-list@lists.sf.net S: Maintained F: arch/*/include/asm/oprofile*.h @@ -7219,6 +7267,14 @@ L: linux-xtensa@linux-xtensa.org S: Maintained F: arch/xtensa/ +THERMAL +M: Zhang Rui +L: linux-pm@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git +S: Supported +F: drivers/thermal/ +F: include/linux/thermal.h + THINKPAD ACPI EXTRAS DRIVER M: Henrique de Moraes Holschuh L: ibm-acpi-devel@lists.sourceforge.net @@ -7896,13 +7952,6 @@ M: Roger Luethi S: Maintained F: drivers/net/ethernet/via/via-rhine.c -VIAPRO SMBUS DRIVER -M: Jean Delvare -L: linux-i2c@vger.kernel.org -S: Maintained -F: Documentation/i2c/busses/i2c-viapro -F: drivers/i2c/busses/i2c-viapro.c - VIA SD/MMC CARD CONTROLLER DRIVER M: Bruce Chang M: Harald Welte @@ -8157,7 +8206,7 @@ F: drivers/platform/x86 X86 MCE INFRASTRUCTURE M: Tony Luck -M: Borislav Petkov +M: Borislav Petkov L: linux-edac@vger.kernel.org S: Maintained F: arch/x86/kernel/cpu/mcheck/* diff --git a/trunk/Makefile b/trunk/Makefile index a1ccf225c4e9..3d2fc460b22f 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 7 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc7 NAME = Terrified Chipmunk # *DOCUMENTATION* diff --git a/trunk/arch/alpha/kernel/osf_sys.c b/trunk/arch/alpha/kernel/osf_sys.c index 1e6956a90608..14db93e4c8a8 100644 --- a/trunk/arch/alpha/kernel/osf_sys.c +++ b/trunk/arch/alpha/kernel/osf_sys.c @@ -445,7 +445,7 @@ struct procfs_args { * unhappy with OSF UFS. [CHECKME] */ static int -osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags) +osf_ufs_mount(const char *dirname, struct ufs_args __user *args, int flags) { int retval; struct cdfs_args tmp; @@ -465,7 +465,7 @@ osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags) } static int -osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags) +osf_cdfs_mount(const char *dirname, struct cdfs_args __user *args, int flags) { int retval; struct cdfs_args tmp; @@ -485,7 +485,7 @@ osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags) } static int -osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags) +osf_procfs_mount(const char *dirname, struct procfs_args __user *args, int flags) { struct procfs_args tmp; diff --git a/trunk/arch/arm/boot/Makefile b/trunk/arch/arm/boot/Makefile index f2aa09eb658e..9137df539b61 100644 --- a/trunk/arch/arm/boot/Makefile +++ b/trunk/arch/arm/boot/Makefile @@ -33,7 +33,7 @@ ifeq ($(CONFIG_XIP_KERNEL),y) $(obj)/xipImage: vmlinux FORCE $(call if_changed,objcopy) - $(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))' + @$(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))' $(obj)/Image $(obj)/zImage: FORCE @echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)' @@ -48,14 +48,14 @@ $(obj)/xipImage: FORCE $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) - $(kecho) ' Kernel: $@ is ready' + @$(kecho) ' Kernel: $@ is ready' $(obj)/compressed/vmlinux: $(obj)/Image FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed $@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE $(call if_changed,objcopy) - $(kecho) ' Kernel: $@ is ready' + @$(kecho) ' Kernel: $@ is ready' endif @@ -90,7 +90,7 @@ fi $(obj)/uImage: $(obj)/zImage FORCE @$(check_for_multiple_loadaddr) $(call if_changed,uimage) - $(kecho) ' Image $@ is ready' + @$(kecho) ' Image $@ is ready' $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE $(Q)$(MAKE) $(build)=$(obj)/bootp $@ @@ -98,7 +98,7 @@ $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE $(obj)/bootpImage: $(obj)/bootp/bootp FORCE $(call if_changed,objcopy) - $(kecho) ' Kernel: $@ is ready' + @$(kecho) ' Kernel: $@ is ready' PHONY += initrd FORCE initrd: diff --git a/trunk/arch/arm/boot/dts/tegra30.dtsi b/trunk/arch/arm/boot/dts/tegra30.dtsi index b1497c7d7d68..df7f2270fc91 100644 --- a/trunk/arch/arm/boot/dts/tegra30.dtsi +++ b/trunk/arch/arm/boot/dts/tegra30.dtsi @@ -73,8 +73,8 @@ pinmux: pinmux { compatible = "nvidia,tegra30-pinmux"; - reg = <0x70000868 0xd0 /* Pad control registers */ - 0x70003000 0x3e0>; /* Mux registers */ + reg = <0x70000868 0xd4 /* Pad control registers */ + 0x70003000 0x3e4>; /* Mux registers */ }; serial@70006000 { diff --git a/trunk/arch/arm/include/asm/io.h b/trunk/arch/arm/include/asm/io.h index 35c1ed89b936..42f042ee4ada 100644 --- a/trunk/arch/arm/include/asm/io.h +++ b/trunk/arch/arm/include/asm/io.h @@ -64,7 +64,7 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); static inline void __raw_writew(u16 val, volatile void __iomem *addr) { asm volatile("strh %1, %0" - : "+Qo" (*(volatile u16 __force *)addr) + : "+Q" (*(volatile u16 __force *)addr) : "r" (val)); } @@ -72,7 +72,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr) { u16 val; asm volatile("ldrh %1, %0" - : "+Qo" (*(volatile u16 __force *)addr), + : "+Q" (*(volatile u16 __force *)addr), "=r" (val)); return val; } diff --git a/trunk/arch/arm/include/asm/sched_clock.h b/trunk/arch/arm/include/asm/sched_clock.h index 05b8e82ec9f5..e3f757263438 100644 --- a/trunk/arch/arm/include/asm/sched_clock.h +++ b/trunk/arch/arm/include/asm/sched_clock.h @@ -10,7 +10,5 @@ extern void sched_clock_postinit(void); extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); -extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, - unsigned long rate); #endif diff --git a/trunk/arch/arm/include/asm/vfpmacros.h b/trunk/arch/arm/include/asm/vfpmacros.h index 6a6f1e485f41..301c1db3e99b 100644 --- a/trunk/arch/arm/include/asm/vfpmacros.h +++ b/trunk/arch/arm/include/asm/vfpmacros.h @@ -27,9 +27,9 @@ #if __LINUX_ARM_ARCH__ <= 6 ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] - tst \tmp, #HWCAP_VFPv3D16 - ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} - addne \base, \base, #32*4 @ step over unused register space + tst \tmp, #HWCAP_VFPD32 + ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} + addeq \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field @@ -51,9 +51,9 @@ #if __LINUX_ARM_ARCH__ <= 6 ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] - tst \tmp, #HWCAP_VFPv3D16 - stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} - addne \base, \base, #32*4 @ step over unused register space + tst \tmp, #HWCAP_VFPD32 + stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} + addeq \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field diff --git a/trunk/arch/arm/include/uapi/asm/hwcap.h b/trunk/arch/arm/include/uapi/asm/hwcap.h index f254f6503cce..3688fd15a32d 100644 --- a/trunk/arch/arm/include/uapi/asm/hwcap.h +++ b/trunk/arch/arm/include/uapi/asm/hwcap.h @@ -18,11 +18,12 @@ #define HWCAP_THUMBEE (1 << 11) #define HWCAP_NEON (1 << 12) #define HWCAP_VFPv3 (1 << 13) -#define HWCAP_VFPv3D16 (1 << 14) +#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */ #define HWCAP_TLS (1 << 15) #define HWCAP_VFPv4 (1 << 16) #define HWCAP_IDIVA (1 << 17) #define HWCAP_IDIVT (1 << 18) +#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) diff --git a/trunk/arch/arm/kernel/sched_clock.c b/trunk/arch/arm/kernel/sched_clock.c index e21bac20d90d..fc6692e2b603 100644 --- a/trunk/arch/arm/kernel/sched_clock.c +++ b/trunk/arch/arm/kernel/sched_clock.c @@ -107,13 +107,6 @@ static void sched_clock_poll(unsigned long wrap_ticks) update_sched_clock(); } -void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, - unsigned long rate) -{ - setup_sched_clock(read, bits, rate); - cd.needs_suspend = true; -} - void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) { unsigned long r, w; @@ -189,18 +182,15 @@ void __init sched_clock_postinit(void) static int sched_clock_suspend(void) { sched_clock_poll(sched_clock_timer.data); - if (cd.needs_suspend) - cd.suspended = true; + cd.suspended = true; return 0; } static void sched_clock_resume(void) { - if (cd.needs_suspend) { - cd.epoch_cyc = read_sched_clock(); - cd.epoch_cyc_copy = cd.epoch_cyc; - cd.suspended = false; - } + cd.epoch_cyc = read_sched_clock(); + cd.epoch_cyc_copy = cd.epoch_cyc; + cd.suspended = false; } static struct syscore_ops sched_clock_ops = { diff --git a/trunk/arch/arm/mach-at91/at91rm9200_devices.c b/trunk/arch/arm/mach-at91/at91rm9200_devices.c index 1e122bcd7845..3cee0e6ea7c3 100644 --- a/trunk/arch/arm/mach-at91/at91rm9200_devices.c +++ b/trunk/arch/arm/mach-at91/at91rm9200_devices.c @@ -68,7 +68,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) /* Enable overcurrent notification */ for (i = 0; i < data->ports; i++) { - if (data->overcurrent_pin[i]) + if (gpio_is_valid(data->overcurrent_pin[i])) at91_set_gpio_input(data->overcurrent_pin[i], 1); } diff --git a/trunk/arch/arm/mach-at91/at91sam9260_devices.c b/trunk/arch/arm/mach-at91/at91sam9260_devices.c index aa1e58729885..414bd855fb0c 100644 --- a/trunk/arch/arm/mach-at91/at91sam9260_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9260_devices.c @@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) /* Enable overcurrent notification */ for (i = 0; i < data->ports; i++) { - if (data->overcurrent_pin[i]) + if (gpio_is_valid(data->overcurrent_pin[i])) at91_set_gpio_input(data->overcurrent_pin[i], 1); } diff --git a/trunk/arch/arm/mach-at91/at91sam9261_devices.c b/trunk/arch/arm/mach-at91/at91sam9261_devices.c index b9487696b7be..cd604aad8e96 100644 --- a/trunk/arch/arm/mach-at91/at91sam9261_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9261_devices.c @@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) /* Enable overcurrent notification */ for (i = 0; i < data->ports; i++) { - if (data->overcurrent_pin[i]) + if (gpio_is_valid(data->overcurrent_pin[i])) at91_set_gpio_input(data->overcurrent_pin[i], 1); } diff --git a/trunk/arch/arm/mach-at91/at91sam9263_devices.c b/trunk/arch/arm/mach-at91/at91sam9263_devices.c index cb85da2eccea..9c61e59a2104 100644 --- a/trunk/arch/arm/mach-at91/at91sam9263_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9263_devices.c @@ -78,7 +78,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) /* Enable overcurrent notification */ for (i = 0; i < data->ports; i++) { - if (data->overcurrent_pin[i]) + if (gpio_is_valid(data->overcurrent_pin[i])) at91_set_gpio_input(data->overcurrent_pin[i], 1); } diff --git a/trunk/arch/arm/mach-at91/at91sam9g45_devices.c b/trunk/arch/arm/mach-at91/at91sam9g45_devices.c index b1596072dcc2..fcd233cb33d2 100644 --- a/trunk/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9g45_devices.c @@ -1841,8 +1841,8 @@ static struct resource sha_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { - .start = AT91SAM9G45_ID_AESTDESSHA, - .end = AT91SAM9G45_ID_AESTDESSHA, + .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, + .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, .flags = IORESOURCE_IRQ, }, }; @@ -1874,8 +1874,8 @@ static struct resource tdes_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { - .start = AT91SAM9G45_ID_AESTDESSHA, - .end = AT91SAM9G45_ID_AESTDESSHA, + .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, + .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, .flags = IORESOURCE_IRQ, }, }; @@ -1910,8 +1910,8 @@ static struct resource aes_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { - .start = AT91SAM9G45_ID_AESTDESSHA, - .end = AT91SAM9G45_ID_AESTDESSHA, + .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, + .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, .flags = IORESOURCE_IRQ, }, }; diff --git a/trunk/arch/arm/mach-davinci/dm644x.c b/trunk/arch/arm/mach-davinci/dm644x.c index cd0c8b1e1ecf..14e9947bad6e 100644 --- a/trunk/arch/arm/mach-davinci/dm644x.c +++ b/trunk/arch/arm/mach-davinci/dm644x.c @@ -713,8 +713,7 @@ static int dm644x_venc_setup_clock(enum vpbe_enc_timings_type type, break; case VPBE_ENC_CUSTOM_TIMINGS: if (pclock <= 27000000) { - v |= DM644X_VPSS_MUXSEL_PLL2_MODE | - DM644X_VPSS_DACCLKEN; + v |= DM644X_VPSS_DACCLKEN; writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL)); } else { /* diff --git a/trunk/arch/arm/mach-exynos/dma.c b/trunk/arch/arm/mach-exynos/dma.c index 21d568b3b149..87e07d6fc615 100644 --- a/trunk/arch/arm/mach-exynos/dma.c +++ b/trunk/arch/arm/mach-exynos/dma.c @@ -275,6 +275,9 @@ static int __init exynos_dma_init(void) exynos_pdma1_pdata.nr_valid_peri = ARRAY_SIZE(exynos4210_pdma1_peri); exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri; + + if (samsung_rev() == EXYNOS4210_REV_0) + exynos_mdma1_device.res.start = EXYNOS4_PA_S_MDMA1; } else if (soc_is_exynos4212() || soc_is_exynos4412()) { exynos_pdma0_pdata.nr_valid_peri = ARRAY_SIZE(exynos4212_pdma0_peri); diff --git a/trunk/arch/arm/mach-exynos/include/mach/map.h b/trunk/arch/arm/mach-exynos/include/mach/map.h index 8480849affb9..ed4da4544cd2 100644 --- a/trunk/arch/arm/mach-exynos/include/mach/map.h +++ b/trunk/arch/arm/mach-exynos/include/mach/map.h @@ -90,6 +90,7 @@ #define EXYNOS4_PA_MDMA0 0x10810000 #define EXYNOS4_PA_MDMA1 0x12850000 +#define EXYNOS4_PA_S_MDMA1 0x12840000 #define EXYNOS4_PA_PDMA0 0x12680000 #define EXYNOS4_PA_PDMA1 0x12690000 #define EXYNOS5_PA_MDMA0 0x10800000 diff --git a/trunk/arch/arm/mach-highbank/system.c b/trunk/arch/arm/mach-highbank/system.c index 82c27230d4a9..86e37cd9376c 100644 --- a/trunk/arch/arm/mach-highbank/system.c +++ b/trunk/arch/arm/mach-highbank/system.c @@ -28,6 +28,7 @@ void highbank_restart(char mode, const char *cmd) hignbank_set_pwr_soft_reset(); scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); - cpu_do_idle(); + while (1) + cpu_do_idle(); } diff --git a/trunk/arch/arm/mach-imx/clk-gate2.c b/trunk/arch/arm/mach-imx/clk-gate2.c index 3c1b8ff9a0a6..cc49c7ae186e 100644 --- a/trunk/arch/arm/mach-imx/clk-gate2.c +++ b/trunk/arch/arm/mach-imx/clk-gate2.c @@ -112,7 +112,7 @@ struct clk *clk_register_gate2(struct device *dev, const char *name, clk = clk_register(dev, &gate->hw); if (IS_ERR(clk)) - kfree(clk); + kfree(gate); return clk; } diff --git a/trunk/arch/arm/mach-imx/ehci-imx25.c b/trunk/arch/arm/mach-imx/ehci-imx25.c index 412c583a24b0..576af7446952 100644 --- a/trunk/arch/arm/mach-imx/ehci-imx25.c +++ b/trunk/arch/arm/mach-imx/ehci-imx25.c @@ -30,7 +30,7 @@ #define MX25_H1_SIC_SHIFT 21 #define MX25_H1_SIC_MASK (0x3 << MX25_H1_SIC_SHIFT) #define MX25_H1_PP_BIT (1 << 18) -#define MX25_H1_PM_BIT (1 << 8) +#define MX25_H1_PM_BIT (1 << 16) #define MX25_H1_IPPUE_UP_BIT (1 << 7) #define MX25_H1_IPPUE_DOWN_BIT (1 << 6) #define MX25_H1_TLL_BIT (1 << 5) diff --git a/trunk/arch/arm/mach-imx/ehci-imx35.c b/trunk/arch/arm/mach-imx/ehci-imx35.c index 779e16eb65cb..293397852e4e 100644 --- a/trunk/arch/arm/mach-imx/ehci-imx35.c +++ b/trunk/arch/arm/mach-imx/ehci-imx35.c @@ -30,7 +30,7 @@ #define MX35_H1_SIC_SHIFT 21 #define MX35_H1_SIC_MASK (0x3 << MX35_H1_SIC_SHIFT) #define MX35_H1_PP_BIT (1 << 18) -#define MX35_H1_PM_BIT (1 << 8) +#define MX35_H1_PM_BIT (1 << 16) #define MX35_H1_IPPUE_UP_BIT (1 << 7) #define MX35_H1_IPPUE_DOWN_BIT (1 << 6) #define MX35_H1_TLL_BIT (1 << 5) diff --git a/trunk/arch/arm/mach-omap2/board-igep0020.c b/trunk/arch/arm/mach-omap2/board-igep0020.c index 48d5e41dfbfa..378590694447 100644 --- a/trunk/arch/arm/mach-omap2/board-igep0020.c +++ b/trunk/arch/arm/mach-omap2/board-igep0020.c @@ -580,6 +580,11 @@ static void __init igep_wlan_bt_init(void) } else return; + /* Make sure that the GPIO pins are muxed correctly */ + omap_mux_init_gpio(igep_wlan_bt_gpios[0].gpio, OMAP_PIN_OUTPUT); + omap_mux_init_gpio(igep_wlan_bt_gpios[1].gpio, OMAP_PIN_OUTPUT); + omap_mux_init_gpio(igep_wlan_bt_gpios[2].gpio, OMAP_PIN_OUTPUT); + err = gpio_request_array(igep_wlan_bt_gpios, ARRAY_SIZE(igep_wlan_bt_gpios)); if (err) { diff --git a/trunk/arch/arm/mach-omap2/clockdomains44xx_data.c b/trunk/arch/arm/mach-omap2/clockdomains44xx_data.c index b56d06b48782..95192a062d5d 100644 --- a/trunk/arch/arm/mach-omap2/clockdomains44xx_data.c +++ b/trunk/arch/arm/mach-omap2/clockdomains44xx_data.c @@ -359,7 +359,7 @@ static struct clockdomain iss_44xx_clkdm = { .clkdm_offs = OMAP4430_CM2_CAM_CAM_CDOFFS, .wkdep_srcs = iss_wkup_sleep_deps, .sleepdep_srcs = iss_wkup_sleep_deps, - .flags = CLKDM_CAN_HWSUP_SWSUP, + .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3_dss_44xx_clkdm = { diff --git a/trunk/arch/arm/mach-omap2/common-board-devices.c b/trunk/arch/arm/mach-omap2/common-board-devices.c index 48daac2581b4..84551f205e46 100644 --- a/trunk/arch/arm/mach-omap2/common-board-devices.c +++ b/trunk/arch/arm/mach-omap2/common-board-devices.c @@ -64,30 +64,36 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce, struct spi_board_info *spi_bi = &ads7846_spi_board_info; int err; - err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown"); - if (err) { - pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err); - return; - } + /* + * If a board defines get_pendown_state() function, request the pendown + * GPIO and set the GPIO debounce time. + * If a board does not define the get_pendown_state() function, then + * the ads7846 driver will setup the pendown GPIO itself. + */ + if (board_pdata && board_pdata->get_pendown_state) { + err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown"); + if (err) { + pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err); + return; + } - if (gpio_debounce) - gpio_set_debounce(gpio_pendown, gpio_debounce); + if (gpio_debounce) + gpio_set_debounce(gpio_pendown, gpio_debounce); + + gpio_export(gpio_pendown, 0); + } spi_bi->bus_num = bus_num; spi_bi->irq = gpio_to_irq(gpio_pendown); + ads7846_config.gpio_pendown = gpio_pendown; + if (board_pdata) { board_pdata->gpio_pendown = gpio_pendown; + board_pdata->gpio_pendown_debounce = gpio_debounce; spi_bi->platform_data = board_pdata; - if (board_pdata->get_pendown_state) - gpio_export(gpio_pendown, 0); - } else { - ads7846_config.gpio_pendown = gpio_pendown; } - if (!board_pdata || (board_pdata && !board_pdata->get_pendown_state)) - gpio_free(gpio_pendown); - spi_register_board_info(&ads7846_spi_board_info, 1); } #else diff --git a/trunk/arch/arm/mach-omap2/devices.c b/trunk/arch/arm/mach-omap2/devices.c index cba60e05e32e..c72b5a727720 100644 --- a/trunk/arch/arm/mach-omap2/devices.c +++ b/trunk/arch/arm/mach-omap2/devices.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -613,6 +614,83 @@ static void omap_init_vout(void) static inline void omap_init_vout(void) {} #endif +#if defined(CONFIG_OMAP_OCP2SCP) || defined(CONFIG_OMAP_OCP2SCP_MODULE) +static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev) +{ + int cnt = 0; + + while (ocp2scp_dev->drv_name != NULL) { + cnt++; + ocp2scp_dev++; + } + + return cnt; +} + +static void omap_init_ocp2scp(void) +{ + struct omap_hwmod *oh; + struct platform_device *pdev; + int bus_id = -1, dev_cnt = 0, i; + struct omap_ocp2scp_dev *ocp2scp_dev; + const char *oh_name, *name; + struct omap_ocp2scp_platform_data *pdata; + + if (!cpu_is_omap44xx()) + return; + + oh_name = "ocp2scp_usb_phy"; + name = "omap-ocp2scp"; + + oh = omap_hwmod_lookup(oh_name); + if (!oh) { + pr_err("%s: could not find omap_hwmod for %s\n", __func__, + oh_name); + return; + } + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + pr_err("%s: No memory for ocp2scp pdata\n", __func__); + return; + } + + ocp2scp_dev = oh->dev_attr; + dev_cnt = count_ocp2scp_devices(ocp2scp_dev); + + if (!dev_cnt) { + pr_err("%s: No devices connected to ocp2scp\n", __func__); + kfree(pdata); + return; + } + + pdata->devices = kzalloc(sizeof(struct omap_ocp2scp_dev *) + * dev_cnt, GFP_KERNEL); + if (!pdata->devices) { + pr_err("%s: No memory for ocp2scp pdata devices\n", __func__); + kfree(pdata); + return; + } + + for (i = 0; i < dev_cnt; i++, ocp2scp_dev++) + pdata->devices[i] = ocp2scp_dev; + + pdata->dev_cnt = dev_cnt; + + pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(*pdata), NULL, + 0, false); + if (IS_ERR(pdev)) { + pr_err("Could not build omap_device for %s %s\n", + name, oh_name); + kfree(pdata->devices); + kfree(pdata); + return; + } +} +#else +static inline void omap_init_ocp2scp(void) { } +#endif + /*-------------------------------------------------------------------------*/ static int __init omap2_init_devices(void) @@ -640,6 +718,7 @@ static int __init omap2_init_devices(void) omap_init_sham(); omap_init_aes(); omap_init_vout(); + omap_init_ocp2scp(); return 0; } diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod.c b/trunk/arch/arm/mach-omap2/omap_hwmod.c index b969ab1d258b..87cc6d058de2 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod.c @@ -421,6 +421,38 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v) return 0; } +/** + * _wait_softreset_complete - wait for an OCP softreset to complete + * @oh: struct omap_hwmod * to wait on + * + * Wait until the IP block represented by @oh reports that its OCP + * softreset is complete. This can be triggered by software (see + * _ocp_softreset()) or by hardware upon returning from off-mode (one + * example is HSMMC). Waits for up to MAX_MODULE_SOFTRESET_WAIT + * microseconds. Returns the number of microseconds waited. + */ +static int _wait_softreset_complete(struct omap_hwmod *oh) +{ + struct omap_hwmod_class_sysconfig *sysc; + u32 softrst_mask; + int c = 0; + + sysc = oh->class->sysc; + + if (sysc->sysc_flags & SYSS_HAS_RESET_STATUS) + omap_test_timeout((omap_hwmod_read(oh, sysc->syss_offs) + & SYSS_RESETDONE_MASK), + MAX_MODULE_SOFTRESET_WAIT, c); + else if (sysc->sysc_flags & SYSC_HAS_RESET_STATUS) { + softrst_mask = (0x1 << sysc->sysc_fields->srst_shift); + omap_test_timeout(!(omap_hwmod_read(oh, sysc->sysc_offs) + & softrst_mask), + MAX_MODULE_SOFTRESET_WAIT, c); + } + + return c; +} + /** * _set_dmadisable: set OCP_SYSCONFIG.DMADISABLE bit in @v * @oh: struct omap_hwmod * @@ -1282,6 +1314,18 @@ static void _enable_sysc(struct omap_hwmod *oh) if (!oh->class->sysc) return; + /* + * Wait until reset has completed, this is needed as the IP + * block is reset automatically by hardware in some cases + * (off-mode for example), and the drivers require the + * IP to be ready when they access it + */ + if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) + _enable_optional_clocks(oh); + _wait_softreset_complete(oh); + if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) + _disable_optional_clocks(oh); + v = oh->_sysc_cache; sf = oh->class->sysc->sysc_flags; @@ -1804,7 +1848,7 @@ static int _am33xx_disable_module(struct omap_hwmod *oh) */ static int _ocp_softreset(struct omap_hwmod *oh) { - u32 v, softrst_mask; + u32 v; int c = 0; int ret = 0; @@ -1834,19 +1878,7 @@ static int _ocp_softreset(struct omap_hwmod *oh) if (oh->class->sysc->srst_udelay) udelay(oh->class->sysc->srst_udelay); - if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS) - omap_test_timeout((omap_hwmod_read(oh, - oh->class->sysc->syss_offs) - & SYSS_RESETDONE_MASK), - MAX_MODULE_SOFTRESET_WAIT, c); - else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS) { - softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift); - omap_test_timeout(!(omap_hwmod_read(oh, - oh->class->sysc->sysc_offs) - & softrst_mask), - MAX_MODULE_SOFTRESET_WAIT, c); - } - + c = _wait_softreset_complete(oh); if (c == MAX_MODULE_SOFTRESET_WAIT) pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n", oh->name, MAX_MODULE_SOFTRESET_WAIT); @@ -2352,6 +2384,9 @@ static int __init _setup_reset(struct omap_hwmod *oh) if (oh->_state != _HWMOD_STATE_INITIALIZED) return -EINVAL; + if (oh->flags & HWMOD_EXT_OPT_MAIN_CLK) + return -EPERM; + if (oh->rst_lines_cnt == 0) { r = _enable(oh); if (r) { diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 652d0285bd6d..0b1249e00398 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -2125,6 +2126,14 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = { .name = "mcpdm", .class = &omap44xx_mcpdm_hwmod_class, .clkdm_name = "abe_clkdm", + /* + * It's suspected that the McPDM requires an off-chip main + * functional clock, controlled via I2C. This IP block is + * currently reset very early during boot, before I2C is + * available, so it doesn't seem that we have any choice in + * the kernel other than to avoid resetting it. + */ + .flags = HWMOD_EXT_OPT_MAIN_CLK, .mpu_irqs = omap44xx_mcpdm_irqs, .sdma_reqs = omap44xx_mcpdm_sdma_reqs, .main_clk = "mcpdm_fck", @@ -2681,6 +2690,32 @@ static struct omap_hwmod_class omap44xx_ocp2scp_hwmod_class = { .sysc = &omap44xx_ocp2scp_sysc, }; +/* ocp2scp dev_attr */ +static struct resource omap44xx_usb_phy_and_pll_addrs[] = { + { + .name = "usb_phy", + .start = 0x4a0ad080, + .end = 0x4a0ae000, + .flags = IORESOURCE_MEM, + }, + { + /* XXX: Remove this once control module driver is in place */ + .name = "ctrl_dev", + .start = 0x4a002300, + .end = 0x4a002303, + .flags = IORESOURCE_MEM, + }, + { } +}; + +static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = { + { + .drv_name = "omap-usb2", + .res = omap44xx_usb_phy_and_pll_addrs, + }, + { } +}; + /* ocp2scp_usb_phy */ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { .name = "ocp2scp_usb_phy", @@ -2694,6 +2729,7 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { .modulemode = MODULEMODE_HWCTRL, }, }, + .dev_attr = ocp2scp_dev_attr, }; /* diff --git a/trunk/arch/arm/mach-omap2/twl-common.c b/trunk/arch/arm/mach-omap2/twl-common.c index 635e109f5ad3..a256135d8e48 100644 --- a/trunk/arch/arm/mach-omap2/twl-common.c +++ b/trunk/arch/arm/mach-omap2/twl-common.c @@ -73,6 +73,7 @@ void __init omap4_pmic_init(const char *pmic_type, { /* PMIC part*/ omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE); + omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT); omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data); /* Register additional devices on i2c1 bus if needed */ @@ -366,7 +367,7 @@ static struct regulator_init_data omap4_clk32kg_idata = { }; static struct regulator_consumer_supply omap4_vdd1_supply[] = { - REGULATOR_SUPPLY("vcc", "mpu.0"), + REGULATOR_SUPPLY("vcc", "cpu0"), }; static struct regulator_consumer_supply omap4_vdd2_supply[] = { diff --git a/trunk/arch/arm/mach-omap2/vc.c b/trunk/arch/arm/mach-omap2/vc.c index 880249b17012..75878c37959b 100644 --- a/trunk/arch/arm/mach-omap2/vc.c +++ b/trunk/arch/arm/mach-omap2/vc.c @@ -264,7 +264,7 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm) if (initialized) { if (voltdm->pmic->i2c_high_speed != i2c_high_speed) - pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).", + pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).\n", __func__, voltdm->name, i2c_high_speed); return; } diff --git a/trunk/arch/arm/mach-pxa/hx4700.c b/trunk/arch/arm/mach-pxa/hx4700.c index 5ecbd17b5641..e2c6391863fe 100644 --- a/trunk/arch/arm/mach-pxa/hx4700.c +++ b/trunk/arch/arm/mach-pxa/hx4700.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -556,7 +557,7 @@ static struct platform_device hx4700_lcd = { */ static struct platform_pwm_backlight_data backlight_data = { - .pwm_id = 1, + .pwm_id = -1, /* Superseded by pwm_lookup */ .max_brightness = 200, .dft_brightness = 100, .pwm_period_ns = 30923, @@ -571,6 +572,10 @@ static struct platform_device backlight = { }, }; +static struct pwm_lookup hx4700_pwm_lookup[] = { + PWM_LOOKUP("pxa27x-pwm.1", 0, "pwm-backlight", NULL), +}; + /* * USB "Transceiver" */ @@ -872,6 +877,7 @@ static void __init hx4700_init(void) pxa_set_stuart_info(NULL); platform_add_devices(devices, ARRAY_SIZE(devices)); + pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup)); pxa_set_ficp_info(&ficp_info); pxa27x_set_i2c_power_info(NULL); diff --git a/trunk/arch/arm/mach-pxa/spitz_pm.c b/trunk/arch/arm/mach-pxa/spitz_pm.c index 438f02fe122a..842596d4d31e 100644 --- a/trunk/arch/arm/mach-pxa/spitz_pm.c +++ b/trunk/arch/arm/mach-pxa/spitz_pm.c @@ -86,10 +86,7 @@ static void spitz_discharge1(int on) gpio_set_value(SPITZ_GPIO_LED_GREEN, on); } -static unsigned long gpio18_config[] = { - GPIO18_RDY, - GPIO18_GPIO, -}; +static unsigned long gpio18_config = GPIO18_GPIO; static void spitz_presuspend(void) { @@ -112,7 +109,7 @@ static void spitz_presuspend(void) PGSR3 &= ~SPITZ_GPIO_G3_STROBE_BIT; PGSR2 |= GPIO_bit(SPITZ_GPIO_KEY_STROBE0); - pxa2xx_mfp_config(&gpio18_config[0], 1); + pxa2xx_mfp_config(&gpio18_config, 1); gpio_request_one(18, GPIOF_OUT_INIT_HIGH, "Unknown"); gpio_free(18); @@ -131,7 +128,6 @@ static void spitz_presuspend(void) static void spitz_postsuspend(void) { - pxa2xx_mfp_config(&gpio18_config[1], 1); } static int spitz_should_wakeup(unsigned int resume_on_alarm) diff --git a/trunk/arch/arm/mm/alignment.c b/trunk/arch/arm/mm/alignment.c index 023f443784ec..b820edaf3184 100644 --- a/trunk/arch/arm/mm/alignment.c +++ b/trunk/arch/arm/mm/alignment.c @@ -745,7 +745,7 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs, static int do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { - union offset_union offset; + union offset_union uninitialized_var(offset); unsigned long instr = 0, instrptr; int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs); unsigned int type; diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c index f076f209c7a4..58bc3e4d3bd0 100644 --- a/trunk/arch/arm/mm/dma-mapping.c +++ b/trunk/arch/arm/mm/dma-mapping.c @@ -1036,8 +1036,7 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping, spin_unlock_irqrestore(&mapping->lock, flags); } -static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, - gfp_t gfp, struct dma_attrs *attrs) +static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) { struct page **pages; int count = size >> PAGE_SHIFT; @@ -1051,23 +1050,6 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, if (!pages) return NULL; - if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) - { - unsigned long order = get_order(size); - struct page *page; - - page = dma_alloc_from_contiguous(dev, count, order); - if (!page) - goto error; - - __dma_clear_buffer(page, size); - - for (i = 0; i < count; i++) - pages[i] = page + i; - - return pages; - } - while (count) { int j, order = __fls(count); @@ -1101,21 +1083,14 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, return NULL; } -static int __iommu_free_buffer(struct device *dev, struct page **pages, - size_t size, struct dma_attrs *attrs) +static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size) { int count = size >> PAGE_SHIFT; int array_size = count * sizeof(struct page *); int i; - - if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { - dma_release_from_contiguous(dev, pages[0], count); - } else { - for (i = 0; i < count; i++) - if (pages[i]) - __free_pages(pages[i], 0); - } - + for (i = 0; i < count; i++) + if (pages[i]) + __free_pages(pages[i], 0); if (array_size <= PAGE_SIZE) kfree(pages); else @@ -1277,7 +1252,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, if (gfp & GFP_ATOMIC) return __iommu_alloc_atomic(dev, size, handle); - pages = __iommu_alloc_buffer(dev, size, gfp, attrs); + pages = __iommu_alloc_buffer(dev, size, gfp); if (!pages) return NULL; @@ -1298,7 +1273,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, err_mapping: __iommu_remove_mapping(dev, *handle, size); err_buffer: - __iommu_free_buffer(dev, pages, size, attrs); + __iommu_free_buffer(dev, pages, size); return NULL; } @@ -1354,7 +1329,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, } __iommu_remove_mapping(dev, handle, size); - __iommu_free_buffer(dev, pages, size, attrs); + __iommu_free_buffer(dev, pages, size); } static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, diff --git a/trunk/arch/arm/plat-omap/i2c.c b/trunk/arch/arm/plat-omap/i2c.c index a5683a84c6ee..6013831a043e 100644 --- a/trunk/arch/arm/plat-omap/i2c.c +++ b/trunk/arch/arm/plat-omap/i2c.c @@ -26,12 +26,14 @@ #include #include #include +#include #include #include #include #include #include +#include #include #define OMAP_I2C_SIZE 0x3f @@ -127,6 +129,16 @@ static inline int omap1_i2c_add_bus(int bus_id) #ifdef CONFIG_ARCH_OMAP2PLUS +/* + * XXX This function is a temporary compatibility wrapper - only + * needed until the I2C driver can be converted to call + * omap_pm_set_max_dev_wakeup_lat() and handle a return code. + */ +static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t) +{ + omap_pm_set_max_mpu_wakeup_lat(dev, t); +} + static inline int omap2_i2c_add_bus(int bus_id) { int l; @@ -158,6 +170,15 @@ static inline int omap2_i2c_add_bus(int bus_id) dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr; pdata->flags = dev_attr->flags; + /* + * When waiting for completion of a i2c transfer, we need to + * set a wake up latency constraint for the MPU. This is to + * ensure quick enough wakeup from idle, when transfer + * completes. + * Only omap3 has support for constraints + */ + if (cpu_is_omap34xx()) + pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat; pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(struct omap_i2c_bus_platform_data), NULL, 0, 0); diff --git a/trunk/arch/arm/plat-omap/include/plat/omap_hwmod.h b/trunk/arch/arm/plat-omap/include/plat/omap_hwmod.h index b3349f7b1a2c..1db029438022 100644 --- a/trunk/arch/arm/plat-omap/include/plat/omap_hwmod.h +++ b/trunk/arch/arm/plat-omap/include/plat/omap_hwmod.h @@ -443,6 +443,11 @@ struct omap_hwmod_omap4_prcm { * in order to complete the reset. Optional clocks will be disabled * again after the reset. * HWMOD_16BIT_REG: Module has 16bit registers + * HWMOD_EXT_OPT_MAIN_CLK: The only main functional clock source for + * this IP block comes from an off-chip source and is not always + * enabled. This prevents the hwmod code from being able to + * enable and reset the IP block early. XXX Eventually it should + * be possible to query the clock framework for this information. */ #define HWMOD_SWSUP_SIDLE (1 << 0) #define HWMOD_SWSUP_MSTANDBY (1 << 1) @@ -453,6 +458,7 @@ struct omap_hwmod_omap4_prcm { #define HWMOD_NO_IDLEST (1 << 6) #define HWMOD_CONTROL_OPT_CLKS_IN_RESET (1 << 7) #define HWMOD_16BIT_REG (1 << 8) +#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) /* * omap_hwmod._int_flags definitions diff --git a/trunk/arch/arm/tools/Makefile b/trunk/arch/arm/tools/Makefile index cd60a81163e9..32d05c8219dc 100644 --- a/trunk/arch/arm/tools/Makefile +++ b/trunk/arch/arm/tools/Makefile @@ -5,6 +5,6 @@ # include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types - $(kecho) ' Generating $@' + @$(kecho) ' Generating $@' @mkdir -p $(dir $@) $(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; } diff --git a/trunk/arch/arm/vfp/vfpmodule.c b/trunk/arch/arm/vfp/vfpmodule.c index c834b32af275..3b44e0dd0a93 100644 --- a/trunk/arch/arm/vfp/vfpmodule.c +++ b/trunk/arch/arm/vfp/vfpmodule.c @@ -701,11 +701,14 @@ static int __init vfp_init(void) elf_hwcap |= HWCAP_VFPv3; /* - * Check for VFPv3 D16. CPUs in this configuration - * only have 16 x 64bit registers. + * Check for VFPv3 D16 and VFPv4 D16. CPUs in + * this configuration only have 16 x 64bit + * registers. */ if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) - elf_hwcap |= HWCAP_VFPv3D16; + elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */ + else + elf_hwcap |= HWCAP_VFPD32; } #endif /* diff --git a/trunk/arch/arm/xen/enlighten.c b/trunk/arch/arm/xen/enlighten.c index 59bcb96ac369..f57609275449 100644 --- a/trunk/arch/arm/xen/enlighten.c +++ b/trunk/arch/arm/xen/enlighten.c @@ -166,3 +166,14 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) *pages = NULL; } EXPORT_SYMBOL_GPL(free_xenballooned_pages); + +/* In the hypervisor.S file. */ +EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); +EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); +EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version); +EXPORT_SYMBOL_GPL(HYPERVISOR_console_io); +EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op); +EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op); +EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); +EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); +EXPORT_SYMBOL_GPL(privcmd_call); diff --git a/trunk/arch/arm64/Kconfig b/trunk/arch/arm64/Kconfig index ef54a59a9e89..15ac18a56c93 100644 --- a/trunk/arch/arm64/Kconfig +++ b/trunk/arch/arm64/Kconfig @@ -1,6 +1,7 @@ config ARM64 def_bool y select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION select GENERIC_CLOCKEVENTS select GENERIC_HARDIRQS_NO_DEPRECATED select GENERIC_IOMAP diff --git a/trunk/arch/arm64/include/asm/elf.h b/trunk/arch/arm64/include/asm/elf.h index cf284649dfcb..07fea290d7c1 100644 --- a/trunk/arch/arm64/include/asm/elf.h +++ b/trunk/arch/arm64/include/asm/elf.h @@ -25,12 +25,10 @@ #include typedef unsigned long elf_greg_t; -typedef unsigned long elf_freg_t[3]; #define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; - -typedef struct user_fp elf_fpregset_t; +typedef struct user_fpsimd_state elf_fpregset_t; #define EM_AARCH64 183 @@ -87,7 +85,6 @@ typedef struct user_fp elf_fpregset_t; #define R_AARCH64_MOVW_PREL_G2_NC 292 #define R_AARCH64_MOVW_PREL_G3 293 - /* * These are used to set parameters in the core dumps. */ diff --git a/trunk/arch/arm64/include/asm/fpsimd.h b/trunk/arch/arm64/include/asm/fpsimd.h index b42fab9f62a9..c43b4ac13008 100644 --- a/trunk/arch/arm64/include/asm/fpsimd.h +++ b/trunk/arch/arm64/include/asm/fpsimd.h @@ -25,9 +25,8 @@ * - FPSR and FPCR * - 32 128-bit data registers * - * Note that user_fp forms a prefix of this structure, which is relied - * upon in the ptrace FP/SIMD accessors. struct user_fpsimd_state must - * form a prefix of struct fpsimd_state. + * Note that user_fpsimd forms a prefix of this structure, which is + * relied upon in the ptrace FP/SIMD accessors. */ struct fpsimd_state { union { diff --git a/trunk/arch/arm64/include/asm/io.h b/trunk/arch/arm64/include/asm/io.h index 74a2a7d304a9..d2f05a608274 100644 --- a/trunk/arch/arm64/include/asm/io.h +++ b/trunk/arch/arm64/include/asm/io.h @@ -114,7 +114,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * I/O port access primitives. */ #define IO_SPACE_LIMIT 0xffff -#define PCI_IOBASE ((void __iomem *)0xffffffbbfffe0000UL) +#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M)) static inline u8 inb(unsigned long addr) { @@ -222,12 +222,12 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot extern void __iounmap(volatile void __iomem *addr); #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) -#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) +#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) -#define ioremap(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE) -#define ioremap_nocache(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE) -#define ioremap_wc(addr, size) __ioremap((addr), (size), PROT_NORMAL_NC) +#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) +#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) +#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) #define iounmap __iounmap #define ARCH_HAS_IOREMAP_WC diff --git a/trunk/arch/arm64/include/asm/pgtable-hwdef.h b/trunk/arch/arm64/include/asm/pgtable-hwdef.h index 0f3b4581d925..75fd13d289b9 100644 --- a/trunk/arch/arm64/include/asm/pgtable-hwdef.h +++ b/trunk/arch/arm64/include/asm/pgtable-hwdef.h @@ -38,7 +38,8 @@ #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) -#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54) +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53) +#define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54) /* * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). @@ -57,7 +58,8 @@ #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ #define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ #define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */ -#define PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ +#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */ +#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */ /* * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). diff --git a/trunk/arch/arm64/include/asm/pgtable.h b/trunk/arch/arm64/include/asm/pgtable.h index 8960239be722..14aba2db6776 100644 --- a/trunk/arch/arm64/include/asm/pgtable.h +++ b/trunk/arch/arm64/include/asm/pgtable.h @@ -62,23 +62,23 @@ extern pgprot_t pgprot_default; #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) -#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_XN | PTE_RDONLY) -#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN) -#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG) -#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) -#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY) -#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) -#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY) -#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_XN | PTE_DIRTY) -#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_DIRTY) - -#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_XN | PTE_RDONLY) -#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN) -#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG) -#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) -#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY) -#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) -#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY) +#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) +#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) +#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) +#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) +#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) +#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) + +#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) +#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) +#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) +#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) #endif /* __ASSEMBLY__ */ @@ -130,10 +130,10 @@ extern struct page *empty_zero_page; #define pte_young(pte) (pte_val(pte) & PTE_AF) #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) -#define pte_exec(pte) (!(pte_val(pte) & PTE_XN)) +#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) #define pte_present_exec_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_XN)) == \ + ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \ (PTE_VALID | PTE_USER)) #define PTE_BIT_FUNC(fn,op) \ @@ -262,7 +262,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const pteval_t mask = PTE_USER | PTE_XN | PTE_RDONLY; + const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/trunk/arch/arm64/include/asm/processor.h b/trunk/arch/arm64/include/asm/processor.h index 5d810044feda..77f696c14339 100644 --- a/trunk/arch/arm64/include/asm/processor.h +++ b/trunk/arch/arm64/include/asm/processor.h @@ -43,6 +43,8 @@ #else #define STACK_TOP STACK_TOP_MAX #endif /* CONFIG_COMPAT */ + +#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK #endif /* __KERNEL__ */ struct debug_info { diff --git a/trunk/arch/arm64/include/asm/unistd.h b/trunk/arch/arm64/include/asm/unistd.h index 63f853f8b718..68aff2816e86 100644 --- a/trunk/arch/arm64/include/asm/unistd.h +++ b/trunk/arch/arm64/include/asm/unistd.h @@ -14,7 +14,6 @@ * along with this program. If not, see . */ #ifdef CONFIG_COMPAT -#define __ARCH_WANT_COMPAT_IPC_PARSE_VERSION #define __ARCH_WANT_COMPAT_STAT64 #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE diff --git a/trunk/arch/arm64/kernel/perf_event.c b/trunk/arch/arm64/kernel/perf_event.c index ecbf2d81ec5c..c76c7241125b 100644 --- a/trunk/arch/arm64/kernel/perf_event.c +++ b/trunk/arch/arm64/kernel/perf_event.c @@ -613,17 +613,11 @@ enum armv8_pmuv3_perf_types { ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19, ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A, ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D, - - /* - * This isn't an architected event. - * We detect this event number and use the cycle counter instead. - */ - ARMV8_PMUV3_PERFCTR_CPU_CYCLES = 0xFF, }; /* PMUv3 HW events mapping. */ static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { - [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED, [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, @@ -1106,7 +1100,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT; /* Always place a cycle counter into the cycle counter. */ - if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) { + if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) return -EAGAIN; diff --git a/trunk/arch/arm64/kernel/process.c b/trunk/arch/arm64/kernel/process.c index f22965ea1cfc..e04cebdbb47f 100644 --- a/trunk/arch/arm64/kernel/process.c +++ b/trunk/arch/arm64/kernel/process.c @@ -309,24 +309,6 @@ struct task_struct *__switch_to(struct task_struct *prev, return last; } -/* - * Fill in the task's elfregs structure for a core dump. - */ -int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) -{ - elf_core_copy_regs(elfregs, task_pt_regs(t)); - return 1; -} - -/* - * fill in the fpe structure for a core dump... - */ -int dump_fpu (struct pt_regs *regs, struct user_fp *fp) -{ - return 0; -} -EXPORT_SYMBOL(dump_fpu); - /* * Shuffle the argument into the correct register before calling the * thread function. x1 is the thread argument, x2 is the pointer to diff --git a/trunk/arch/arm64/kernel/smp.c b/trunk/arch/arm64/kernel/smp.c index 226b6bf6e9c2..538300f2273d 100644 --- a/trunk/arch/arm64/kernel/smp.c +++ b/trunk/arch/arm64/kernel/smp.c @@ -211,8 +211,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) * before we continue. */ set_cpu_online(cpu, true); - while (!cpu_active(cpu)) - cpu_relax(); + complete(&cpu_running); /* * OK, it's off to the idle thread for us diff --git a/trunk/arch/arm64/mm/init.c b/trunk/arch/arm64/mm/init.c index efbf7df05d3f..4cd28931dba9 100644 --- a/trunk/arch/arm64/mm/init.c +++ b/trunk/arch/arm64/mm/init.c @@ -80,7 +80,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) #ifdef CONFIG_ZONE_DMA32 /* 4GB maximum for 32-bit only capable devices */ max_dma32 = min(max, MAX_DMA32_PFN); - zone_size[ZONE_DMA32] = max_dma32 - min; + zone_size[ZONE_DMA32] = max(min, max_dma32) - min; #endif zone_size[ZONE_NORMAL] = max - max_dma32; diff --git a/trunk/arch/h8300/include/asm/cache.h b/trunk/arch/h8300/include/asm/cache.h index c6350283649d..05887a1d80e5 100644 --- a/trunk/arch/h8300/include/asm/cache.h +++ b/trunk/arch/h8300/include/asm/cache.h @@ -2,7 +2,8 @@ #define __ARCH_H8300_CACHE_H /* bytes per L1 cache line */ -#define L1_CACHE_BYTES 4 +#define L1_CACHE_SHIFT 2 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) /* m68k-elf-gcc 2.95.2 doesn't like these */ diff --git a/trunk/arch/ia64/mm/init.c b/trunk/arch/ia64/mm/init.c index acd5b68e8871..082e383c1b6f 100644 --- a/trunk/arch/ia64/mm/init.c +++ b/trunk/arch/ia64/mm/init.c @@ -637,7 +637,6 @@ mem_init (void) high_memory = __va(max_low_pfn * PAGE_SIZE); - reset_zone_present_pages(); for_each_online_pgdat(pgdat) if (pgdat->bdata->node_bootmem_map) totalram_pages += free_all_bootmem_node(pgdat); diff --git a/trunk/arch/m68k/include/asm/signal.h b/trunk/arch/m68k/include/asm/signal.h index 67e489d8d1bd..2df26b57c26a 100644 --- a/trunk/arch/m68k/include/asm/signal.h +++ b/trunk/arch/m68k/include/asm/signal.h @@ -41,7 +41,7 @@ struct k_sigaction { static inline void sigaddset(sigset_t *set, int _sig) { asm ("bfset %0{%1,#1}" - : "+od" (*set) + : "+o" (*set) : "id" ((_sig - 1) ^ 31) : "cc"); } @@ -49,7 +49,7 @@ static inline void sigaddset(sigset_t *set, int _sig) static inline void sigdelset(sigset_t *set, int _sig) { asm ("bfclr %0{%1,#1}" - : "+od" (*set) + : "+o" (*set) : "id" ((_sig - 1) ^ 31) : "cc"); } @@ -65,7 +65,7 @@ static inline int __gen_sigismember(sigset_t *set, int _sig) int ret; asm ("bfextu %1{%2,#1},%0" : "=d" (ret) - : "od" (*set), "id" ((_sig-1) ^ 31) + : "o" (*set), "id" ((_sig-1) ^ 31) : "cc"); return ret; } diff --git a/trunk/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/trunk/arch/mips/cavium-octeon/executive/cvmx-l2c.c index d38246e33ddb..9f883bf76953 100644 --- a/trunk/arch/mips/cavium-octeon/executive/cvmx-l2c.c +++ b/trunk/arch/mips/cavium-octeon/executive/cvmx-l2c.c @@ -30,6 +30,7 @@ * measurement, and debugging facilities. */ +#include #include #include #include diff --git a/trunk/arch/mips/fw/arc/misc.c b/trunk/arch/mips/fw/arc/misc.c index 7cf80ca2c1d2..f9f5307434c2 100644 --- a/trunk/arch/mips/fw/arc/misc.c +++ b/trunk/arch/mips/fw/arc/misc.c @@ -11,6 +11,7 @@ */ #include #include +#include #include diff --git a/trunk/arch/mips/include/asm/bitops.h b/trunk/arch/mips/include/asm/bitops.h index 82ad35ce2b45..46ac73abd5ee 100644 --- a/trunk/arch/mips/include/asm/bitops.h +++ b/trunk/arch/mips/include/asm/bitops.h @@ -14,7 +14,6 @@ #endif #include -#include #include #include #include /* sigh ... */ @@ -44,6 +43,24 @@ #define smp_mb__before_clear_bit() smp_mb__before_llsc() #define smp_mb__after_clear_bit() smp_llsc_mb() + +/* + * These are the "slower" versions of the functions and are in bitops.c. + * These functions call raw_local_irq_{save,restore}(). + */ +void __mips_set_bit(unsigned long nr, volatile unsigned long *addr); +void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr); +void __mips_change_bit(unsigned long nr, volatile unsigned long *addr); +int __mips_test_and_set_bit(unsigned long nr, + volatile unsigned long *addr); +int __mips_test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *addr); +int __mips_test_and_clear_bit(unsigned long nr, + volatile unsigned long *addr); +int __mips_test_and_change_bit(unsigned long nr, + volatile unsigned long *addr); + + /* * set_bit - Atomically set a bit in memory * @nr: the bit to set @@ -57,7 +74,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long temp; if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) : "=&r" (temp), "+m" (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - *a |= mask; - raw_local_irq_restore(flags); - } + } else + __mips_set_bit(nr, addr); } /* @@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long temp; if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) : "=&r" (temp), "+m" (*m) : "ir" (~(1UL << bit))); } while (unlikely(!temp)); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - *a &= ~mask; - raw_local_irq_restore(flags); - } + } else + __mips_clear_bit(nr, addr); } /* @@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad */ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; if (kernel_uses_llsc && R10000_LLSC_WAR) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); @@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) : "=&r" (temp), "+m" (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - *a ^= mask; - raw_local_irq_restore(flags); - } + } else + __mips_change_bit(nr, addr); } /* @@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); @@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a |= mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_set_bit(nr, addr); smp_llsc_mb(); @@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr, static inline int test_and_set_bit_lock(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a |= mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_set_bit_lock(nr, addr); smp_llsc_mb(); @@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr, static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); @@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a &= ~mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_clear_bit(nr, addr); smp_llsc_mb(); @@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr, static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); @@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a ^= mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_change_bit(nr, addr); smp_llsc_mb(); diff --git a/trunk/arch/mips/include/asm/compat.h b/trunk/arch/mips/include/asm/compat.h index 58277e0e9cd4..3c5d1464b7bd 100644 --- a/trunk/arch/mips/include/asm/compat.h +++ b/trunk/arch/mips/include/asm/compat.h @@ -290,7 +290,7 @@ struct compat_shmid64_ds { static inline int is_compat_task(void) { - return test_thread_flag(TIF_32BIT); + return test_thread_flag(TIF_32BIT_ADDR); } #endif /* _ASM_COMPAT_H */ diff --git a/trunk/arch/mips/include/asm/io.h b/trunk/arch/mips/include/asm/io.h index 29d9c23c20c7..ff2e0345e013 100644 --- a/trunk/arch/mips/include/asm/io.h +++ b/trunk/arch/mips/include/asm/io.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include diff --git a/trunk/arch/mips/include/asm/irqflags.h b/trunk/arch/mips/include/asm/irqflags.h index 309cbcd6909c..9f3384c789d7 100644 --- a/trunk/arch/mips/include/asm/irqflags.h +++ b/trunk/arch/mips/include/asm/irqflags.h @@ -16,83 +16,13 @@ #include #include -__asm__( - " .macro arch_local_irq_enable \n" - " .set push \n" - " .set reorder \n" - " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" - " ori $1, 0x400 \n" - " xori $1, 0x400 \n" - " mtc0 $1, $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) - " ei \n" -#else - " mfc0 $1,$12 \n" - " ori $1,0x1f \n" - " xori $1,0x1e \n" - " mtc0 $1,$12 \n" -#endif - " irq_enable_hazard \n" - " .set pop \n" - " .endm"); +#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) -extern void smtc_ipi_replay(void); - -static inline void arch_local_irq_enable(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC kernel needs to do a software replay of queued - * IPIs, at the cost of call overhead on each local_irq_enable() - */ - smtc_ipi_replay(); -#endif - __asm__ __volatile__( - "arch_local_irq_enable" - : /* no outputs */ - : /* no inputs */ - : "memory"); -} - - -/* - * For cli() we have to insert nops to make sure that the new value - * has actually arrived in the status register before the end of this - * macro. - * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs - * no nops at all. - */ -/* - * For TX49, operating only IE bit is not enough. - * - * If mfc0 $12 follows store and the mfc0 is last instruction of a - * page and fetching the next instruction causes TLB miss, the result - * of the mfc0 might wrongly contain EXL bit. - * - * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 - * - * Workaround: mask EXL bit of the result or place a nop before mfc0. - */ __asm__( " .macro arch_local_irq_disable\n" " .set push \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 $1, $2, 1 \n" - " ori $1, 0x400 \n" - " .set noreorder \n" - " mtc0 $1, $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) " di \n" -#else - " mfc0 $1,$12 \n" - " ori $1,0x1f \n" - " xori $1,0x1f \n" - " .set noreorder \n" - " mtc0 $1,$12 \n" -#endif " irq_disable_hazard \n" " .set pop \n" " .endm \n"); @@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void) : "memory"); } -__asm__( - " .macro arch_local_save_flags flags \n" - " .set push \n" - " .set reorder \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 \\flags, $2, 1 \n" -#else - " mfc0 \\flags, $12 \n" -#endif - " .set pop \n" - " .endm \n"); - -static inline unsigned long arch_local_save_flags(void) -{ - unsigned long flags; - asm volatile("arch_local_save_flags %0" : "=r" (flags)); - return flags; -} __asm__( " .macro arch_local_irq_save result \n" " .set push \n" " .set reorder \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 \\result, $2, 1 \n" - " ori $1, \\result, 0x400 \n" - " .set noreorder \n" - " mtc0 $1, $2, 1 \n" - " andi \\result, \\result, 0x400 \n" -#elif defined(CONFIG_CPU_MIPSR2) " di \\result \n" " andi \\result, 1 \n" -#else - " mfc0 \\result, $12 \n" - " ori $1, \\result, 0x1f \n" - " xori $1, 0x1f \n" - " .set noreorder \n" - " mtc0 $1, $12 \n" -#endif " irq_disable_hazard \n" " .set pop \n" " .endm \n"); @@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void) return flags; } + __asm__( " .macro arch_local_irq_restore flags \n" " .set push \n" " .set noreorder \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - "mfc0 $1, $2, 1 \n" - "andi \\flags, 0x400 \n" - "ori $1, 0x400 \n" - "xori $1, 0x400 \n" - "or \\flags, $1 \n" - "mtc0 \\flags, $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) +#if defined(CONFIG_IRQ_CPU) /* * Slow, but doesn't suffer from a relatively unlikely race * condition we're having since days 1. */ " beqz \\flags, 1f \n" - " di \n" + " di \n" " ei \n" "1: \n" -#elif defined(CONFIG_CPU_MIPSR2) +#else /* * Fast, dangerous. Life is fun, life is good. */ " mfc0 $1, $12 \n" " ins $1, \\flags, 0, 1 \n" " mtc0 $1, $12 \n" -#else - " mfc0 $1, $12 \n" - " andi \\flags, 1 \n" - " ori $1, 0x1f \n" - " xori $1, 0x1f \n" - " or \\flags, $1 \n" - " mtc0 \\flags, $12 \n" #endif " irq_disable_hazard \n" " .set pop \n" " .endm \n"); - static inline void arch_local_irq_restore(unsigned long flags) { unsigned long __tmp1; -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC kernel needs to do a software replay of queued - * IPIs, at the cost of branch and call overhead on each - * local_irq_restore() - */ - if (unlikely(!(flags & 0x0400))) - smtc_ipi_replay(); -#endif - __asm__ __volatile__( "arch_local_irq_restore\t%0" : "=r" (__tmp1) @@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags) : "0" (flags) : "memory"); } +#else +/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ +void arch_local_irq_disable(void); +unsigned long arch_local_irq_save(void); +void arch_local_irq_restore(unsigned long flags); +void __arch_local_irq_restore(unsigned long flags); +#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ + + +__asm__( + " .macro arch_local_irq_enable \n" + " .set push \n" + " .set reorder \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" + " ori $1, 0x400 \n" + " xori $1, 0x400 \n" + " mtc0 $1, $2, 1 \n" +#elif defined(CONFIG_CPU_MIPSR2) + " ei \n" +#else + " mfc0 $1,$12 \n" + " ori $1,0x1f \n" + " xori $1,0x1e \n" + " mtc0 $1,$12 \n" +#endif + " irq_enable_hazard \n" + " .set pop \n" + " .endm"); + +extern void smtc_ipi_replay(void); + +static inline void arch_local_irq_enable(void) +{ +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC kernel needs to do a software replay of queued + * IPIs, at the cost of call overhead on each local_irq_enable() + */ + smtc_ipi_replay(); +#endif + __asm__ __volatile__( + "arch_local_irq_enable" + : /* no outputs */ + : /* no inputs */ + : "memory"); +} + + +__asm__( + " .macro arch_local_save_flags flags \n" + " .set push \n" + " .set reorder \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 \\flags, $2, 1 \n" +#else + " mfc0 \\flags, $12 \n" +#endif + " .set pop \n" + " .endm \n"); + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile("arch_local_save_flags %0" : "=r" (flags)); + return flags; +} + static inline int arch_irqs_disabled_flags(unsigned long flags) { @@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) #endif } -#endif +#endif /* #ifndef __ASSEMBLY__ */ /* * Do the CPU's IRQ-state tracing from assembly code. diff --git a/trunk/arch/mips/include/asm/thread_info.h b/trunk/arch/mips/include/asm/thread_info.h index 8debe9e91754..18806a52061c 100644 --- a/trunk/arch/mips/include/asm/thread_info.h +++ b/trunk/arch/mips/include/asm/thread_info.h @@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28"); #define TIF_LOAD_WATCH 25 /* If set, load watch registers */ #define TIF_SYSCALL_TRACE 31 /* syscall trace active */ -#ifdef CONFIG_MIPS32_O32 -#define TIF_32BIT TIF_32BIT_REGS -#elif defined(CONFIG_MIPS32_N32) -#define TIF_32BIT _TIF_32BIT_ADDR -#endif /* CONFIG_MIPS32_O32 */ - #define _TIF_SYSCALL_TRACE (1<addr + prev->size == start && prev->type == type) { - prev->size += size; + for (i = 0; i < boot_mem_map.nr_map; i++) { + struct boot_mem_map_entry *entry = boot_mem_map.map + i; + unsigned long top; + + if (entry->type != type) + continue; + + if (start + size < entry->addr) + continue; /* no overlap */ + + if (entry->addr + entry->size < start) + continue; /* no overlap */ + + top = max(entry->addr + entry->size, start + size); + entry->addr = min(entry->addr, start); + entry->size = top - entry->addr; + return; } - if (x == BOOT_MEM_MAP_MAX) { + if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) { pr_err("Ooops! Too many entries in the memory map!\n"); return; } diff --git a/trunk/arch/mips/lib/Makefile b/trunk/arch/mips/lib/Makefile index c4a82e841c73..eeddc58802e1 100644 --- a/trunk/arch/mips/lib/Makefile +++ b/trunk/arch/mips/lib/Makefile @@ -2,8 +2,9 @@ # Makefile for MIPS-specific library files.. # -lib-y += csum_partial.o delay.o memcpy.o memset.o \ - strlen_user.o strncpy_user.o strnlen_user.o uncached.o +lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ + mips-atomic.o strlen_user.o strncpy_user.o \ + strnlen_user.o uncached.o obj-y += iomap.o obj-$(CONFIG_PCI) += iomap-pci.o diff --git a/trunk/arch/mips/lib/bitops.c b/trunk/arch/mips/lib/bitops.c new file mode 100644 index 000000000000..239a9c957b02 --- /dev/null +++ b/trunk/arch/mips/lib/bitops.c @@ -0,0 +1,179 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org) + * Copyright (c) 1999, 2000 Silicon Graphics, Inc. + */ +#include +#include +#include + + +/** + * __mips_set_bit - Atomically set a bit in memory. This is called by + * set_bit() if it cannot find a faster solution. + * @nr: the bit to set + * @addr: the address to start counting from + */ +void __mips_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + volatile unsigned long *a = addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + *a |= mask; + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_set_bit); + + +/** + * __mips_clear_bit - Clears a bit in memory. This is called by clear_bit() if + * it cannot find a faster solution. + * @nr: Bit to clear + * @addr: Address to start counting from + */ +void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + volatile unsigned long *a = addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + *a &= ~mask; + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_clear_bit); + + +/** + * __mips_change_bit - Toggle a bit in memory. This is called by change_bit() + * if it cannot find a faster solution. + * @nr: Bit to change + * @addr: Address to start counting from + */ +void __mips_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + volatile unsigned long *a = addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + *a ^= mask; + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_change_bit); + + +/** + * __mips_test_and_set_bit - Set a bit and return its old value. This is + * called by test_and_set_bit() if it cannot find a faster solution. + * @nr: Bit to set + * @addr: Address to count from + */ +int __mips_test_and_set_bit(unsigned long nr, + volatile unsigned long *addr) +{ + volatile unsigned long *a = addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + unsigned long res; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a); + *a |= mask; + raw_local_irq_restore(flags); + return res; +} +EXPORT_SYMBOL(__mips_test_and_set_bit); + + +/** + * __mips_test_and_set_bit_lock - Set a bit and return its old value. This is + * called by test_and_set_bit_lock() if it cannot find a faster solution. + * @nr: Bit to set + * @addr: Address to count from + */ +int __mips_test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *addr) +{ + volatile unsigned long *a = addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + unsigned long res; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a); + *a |= mask; + raw_local_irq_restore(flags); + return res; +} +EXPORT_SYMBOL(__mips_test_and_set_bit_lock); + + +/** + * __mips_test_and_clear_bit - Clear a bit and return its old value. This is + * called by test_and_clear_bit() if it cannot find a faster solution. + * @nr: Bit to clear + * @addr: Address to count from + */ +int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + volatile unsigned long *a = addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + unsigned long res; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a); + *a &= ~mask; + raw_local_irq_restore(flags); + return res; +} +EXPORT_SYMBOL(__mips_test_and_clear_bit); + + +/** + * __mips_test_and_change_bit - Change a bit and return its old value. This is + * called by test_and_change_bit() if it cannot find a faster solution. + * @nr: Bit to change + * @addr: Address to count from + */ +int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + volatile unsigned long *a = addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + unsigned long res; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a); + *a ^= mask; + raw_local_irq_restore(flags); + return res; +} +EXPORT_SYMBOL(__mips_test_and_change_bit); diff --git a/trunk/arch/mips/lib/mips-atomic.c b/trunk/arch/mips/lib/mips-atomic.c new file mode 100644 index 000000000000..cd160be3ce4d --- /dev/null +++ b/trunk/arch/mips/lib/mips-atomic.c @@ -0,0 +1,176 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle + * Copyright (C) 1996 by Paul M. Antoine + * Copyright (C) 1999 Silicon Graphics + * Copyright (C) 2000 MIPS Technologies, Inc. + */ +#include +#include +#include +#include +#include + +#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) + +/* + * For cli() we have to insert nops to make sure that the new value + * has actually arrived in the status register before the end of this + * macro. + * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs + * no nops at all. + */ +/* + * For TX49, operating only IE bit is not enough. + * + * If mfc0 $12 follows store and the mfc0 is last instruction of a + * page and fetching the next instruction causes TLB miss, the result + * of the mfc0 might wrongly contain EXL bit. + * + * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 + * + * Workaround: mask EXL bit of the result or place a nop before mfc0. + */ +__asm__( + " .macro arch_local_irq_disable\n" + " .set push \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 $1, $2, 1 \n" + " ori $1, 0x400 \n" + " .set noreorder \n" + " mtc0 $1, $2, 1 \n" +#elif defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 $1,$12 \n" + " ori $1,0x1f \n" + " xori $1,0x1f \n" + " .set noreorder \n" + " mtc0 $1,$12 \n" +#endif + " irq_disable_hazard \n" + " .set pop \n" + " .endm \n"); + +notrace void arch_local_irq_disable(void) +{ + preempt_disable(); + __asm__ __volatile__( + "arch_local_irq_disable" + : /* no outputs */ + : /* no inputs */ + : "memory"); + preempt_enable(); +} +EXPORT_SYMBOL(arch_local_irq_disable); + + +__asm__( + " .macro arch_local_irq_save result \n" + " .set push \n" + " .set reorder \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 \\result, $2, 1 \n" + " ori $1, \\result, 0x400 \n" + " .set noreorder \n" + " mtc0 $1, $2, 1 \n" + " andi \\result, \\result, 0x400 \n" +#elif defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 \\result, $12 \n" + " ori $1, \\result, 0x1f \n" + " xori $1, 0x1f \n" + " .set noreorder \n" + " mtc0 $1, $12 \n" +#endif + " irq_disable_hazard \n" + " .set pop \n" + " .endm \n"); + +notrace unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + preempt_disable(); + asm volatile("arch_local_irq_save\t%0" + : "=r" (flags) + : /* no inputs */ + : "memory"); + preempt_enable(); + return flags; +} +EXPORT_SYMBOL(arch_local_irq_save); + + +__asm__( + " .macro arch_local_irq_restore flags \n" + " .set push \n" + " .set noreorder \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + "mfc0 $1, $2, 1 \n" + "andi \\flags, 0x400 \n" + "ori $1, 0x400 \n" + "xori $1, 0x400 \n" + "or \\flags, $1 \n" + "mtc0 \\flags, $2, 1 \n" +#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) + /* see irqflags.h for inline function */ +#elif defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 $1, $12 \n" + " andi \\flags, 1 \n" + " ori $1, 0x1f \n" + " xori $1, 0x1f \n" + " or \\flags, $1 \n" + " mtc0 \\flags, $12 \n" +#endif + " irq_disable_hazard \n" + " .set pop \n" + " .endm \n"); + +notrace void arch_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC kernel needs to do a software replay of queued + * IPIs, at the cost of branch and call overhead on each + * local_irq_restore() + */ + if (unlikely(!(flags & 0x0400))) + smtc_ipi_replay(); +#endif + preempt_disable(); + __asm__ __volatile__( + "arch_local_irq_restore\t%0" + : "=r" (__tmp1) + : "0" (flags) + : "memory"); + preempt_enable(); +} +EXPORT_SYMBOL(arch_local_irq_restore); + + +notrace void __arch_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + + preempt_disable(); + __asm__ __volatile__( + "arch_local_irq_restore\t%0" + : "=r" (__tmp1) + : "0" (flags) + : "memory"); + preempt_enable(); +} +EXPORT_SYMBOL(__arch_local_irq_restore); + +#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */ diff --git a/trunk/arch/mips/mti-malta/malta-platform.c b/trunk/arch/mips/mti-malta/malta-platform.c index 80562b81f0f2..74732177851c 100644 --- a/trunk/arch/mips/mti-malta/malta-platform.c +++ b/trunk/arch/mips/mti-malta/malta-platform.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #define SMC_PORT(base, int) \ @@ -48,7 +49,7 @@ static struct plat_serial8250_port uart8250_data[] = { SMC_PORT(0x2F8, 3), { .mapbase = 0x1f000900, /* The CBUS UART */ - .irq = MIPS_CPU_IRQ_BASE + 2, + .irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2, .uartclk = 3686400, /* Twice the usual clk! */ .iotype = UPIO_MEM32, .flags = CBUS_UART_FLAGS, diff --git a/trunk/arch/parisc/kernel/signal32.c b/trunk/arch/parisc/kernel/signal32.c index fd49aeda9eb8..5dede04f2f3e 100644 --- a/trunk/arch/parisc/kernel/signal32.c +++ b/trunk/arch/parisc/kernel/signal32.c @@ -65,7 +65,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) { compat_sigset_t s; - if (sz != sizeof *set) panic("put_sigset32()"); + if (sz != sizeof *set) + return -EINVAL; sigset_64to32(&s, set); return copy_to_user(up, &s, sizeof s); @@ -77,7 +78,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) compat_sigset_t s; int r; - if (sz != sizeof *set) panic("put_sigset32()"); + if (sz != sizeof *set) + return -EINVAL; if ((r = copy_from_user(&s, up, sz)) == 0) { sigset_32to64(set, &s); diff --git a/trunk/arch/parisc/kernel/sys_parisc.c b/trunk/arch/parisc/kernel/sys_parisc.c index 7426e40699bd..f76c10863c62 100644 --- a/trunk/arch/parisc/kernel/sys_parisc.c +++ b/trunk/arch/parisc/kernel/sys_parisc.c @@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping, struct vm_area_struct *vma; int offset = mapping ? get_offset(mapping) : 0; + offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000; + addr = DCACHE_ALIGN(addr - offset) + offset; for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { diff --git a/trunk/arch/powerpc/boot/dts/mpc5200b.dtsi b/trunk/arch/powerpc/boot/dts/mpc5200b.dtsi index 7ab286ab5300..39ed65a44c5f 100644 --- a/trunk/arch/powerpc/boot/dts/mpc5200b.dtsi +++ b/trunk/arch/powerpc/boot/dts/mpc5200b.dtsi @@ -231,6 +231,12 @@ interrupts = <2 7 0>; }; + sclpc@3c00 { + compatible = "fsl,mpc5200-lpbfifo"; + reg = <0x3c00 0x60>; + interrupts = <2 23 0>; + }; + i2c@3d00 { #address-cells = <1>; #size-cells = <0>; diff --git a/trunk/arch/powerpc/boot/dts/o2d.dtsi b/trunk/arch/powerpc/boot/dts/o2d.dtsi index 3444eb8f0ade..24f668039295 100644 --- a/trunk/arch/powerpc/boot/dts/o2d.dtsi +++ b/trunk/arch/powerpc/boot/dts/o2d.dtsi @@ -86,12 +86,6 @@ reg = <0>; }; }; - - sclpc@3c00 { - compatible = "fsl,mpc5200-lpbfifo"; - reg = <0x3c00 0x60>; - interrupts = <3 23 0>; - }; }; localbus { diff --git a/trunk/arch/powerpc/boot/dts/pcm030.dts b/trunk/arch/powerpc/boot/dts/pcm030.dts index 9e354997eb7e..96512c058033 100644 --- a/trunk/arch/powerpc/boot/dts/pcm030.dts +++ b/trunk/arch/powerpc/boot/dts/pcm030.dts @@ -59,7 +59,7 @@ #gpio-cells = <2>; }; - psc@2000 { /* PSC1 in ac97 mode */ + audioplatform: psc@2000 { /* PSC1 in ac97 mode */ compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97"; cell-index = <0>; }; @@ -134,4 +134,9 @@ localbus { status = "disabled"; }; + + sound { + compatible = "phytec,pcm030-audio-fabric"; + asoc-platform = <&audioplatform>; + }; }; diff --git a/trunk/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/trunk/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 8520b58a5e9a..b89ef65392dc 100644 --- a/trunk/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/trunk/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -372,10 +372,11 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq, case MPC52xx_IRQ_L1_MAIN: irqchip = &mpc52xx_main_irqchip; break; case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break; case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break; - default: - pr_err("%s: invalid irq: virq=%i, l1=%i, l2=%i\n", - __func__, virq, l1irq, l2irq); - return -EINVAL; + case MPC52xx_IRQ_L1_CRIT: + pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n", + __func__, l2irq); + irq_set_chip(virq, &no_irq_chip); + return 0; } irq_set_chip_and_handler(virq, irqchip, handle_level_irq); diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_pe.c b/trunk/arch/powerpc/platforms/pseries/eeh_pe.c index 797cd181dc3f..d16c8ded1084 100644 --- a/trunk/arch/powerpc/platforms/pseries/eeh_pe.c +++ b/trunk/arch/powerpc/platforms/pseries/eeh_pe.c @@ -449,7 +449,7 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe) if (list_empty(&pe->edevs)) { cnt = 0; list_for_each_entry(child, &pe->child_list, child) { - if (!(pe->type & EEH_PE_INVALID)) { + if (!(child->type & EEH_PE_INVALID)) { cnt++; break; } diff --git a/trunk/arch/powerpc/platforms/pseries/msi.c b/trunk/arch/powerpc/platforms/pseries/msi.c index d19f4977c834..e5b084723131 100644 --- a/trunk/arch/powerpc/platforms/pseries/msi.c +++ b/trunk/arch/powerpc/platforms/pseries/msi.c @@ -220,7 +220,8 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) /* Get the top level device in the PE */ edev = of_node_to_eeh_dev(dn); - edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); + if (edev->pe) + edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); dn = eeh_dev_to_of_node(edev); if (!dn) return NULL; diff --git a/trunk/arch/s390/Kconfig b/trunk/arch/s390/Kconfig index 5dba755a43e6..d385f396dfee 100644 --- a/trunk/arch/s390/Kconfig +++ b/trunk/arch/s390/Kconfig @@ -96,6 +96,7 @@ config S390 select HAVE_MEMBLOCK_NODE_MAP select HAVE_CMPXCHG_LOCAL select HAVE_CMPXCHG_DOUBLE + select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_VIRT_CPU_ACCOUNTING select VIRT_CPU_ACCOUNTING select ARCH_DISCARD_MEMBLOCK diff --git a/trunk/arch/s390/include/asm/cio.h b/trunk/arch/s390/include/asm/cio.h index 55bde6035216..ad2b924167d7 100644 --- a/trunk/arch/s390/include/asm/cio.h +++ b/trunk/arch/s390/include/asm/cio.h @@ -9,6 +9,8 @@ #define LPM_ANYPATH 0xff #define __MAX_CSSID 0 +#define __MAX_SUBCHANNEL 65535 +#define __MAX_SSID 3 #include diff --git a/trunk/arch/s390/include/asm/compat.h b/trunk/arch/s390/include/asm/compat.h index a34a9d612fc0..18cd6b592650 100644 --- a/trunk/arch/s390/include/asm/compat.h +++ b/trunk/arch/s390/include/asm/compat.h @@ -20,7 +20,7 @@ #define PSW32_MASK_CC 0x00003000UL #define PSW32_MASK_PM 0x00000f00UL -#define PSW32_MASK_USER 0x00003F00UL +#define PSW32_MASK_USER 0x0000FF00UL #define PSW32_ADDR_AMODE 0x80000000UL #define PSW32_ADDR_INSN 0x7FFFFFFFUL diff --git a/trunk/arch/s390/include/asm/pgtable.h b/trunk/arch/s390/include/asm/pgtable.h index dd647c919a66..2d3b7cb26005 100644 --- a/trunk/arch/s390/include/asm/pgtable.h +++ b/trunk/arch/s390/include/asm/pgtable.h @@ -506,12 +506,15 @@ static inline int pud_bad(pud_t pud) static inline int pmd_present(pmd_t pmd) { - return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL; + unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO; + return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE || + !(pmd_val(pmd) & _SEGMENT_ENTRY_INV); } static inline int pmd_none(pmd_t pmd) { - return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL; + return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) && + !(pmd_val(pmd) & _SEGMENT_ENTRY_RO); } static inline int pmd_large(pmd_t pmd) @@ -1223,6 +1226,11 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) } #ifdef CONFIG_TRANSPARENT_HUGEPAGE + +#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) +#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) +#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) + #define __HAVE_ARCH_PGTABLE_DEPOSIT extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); @@ -1242,16 +1250,15 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) { - unsigned long pgprot_pmd = 0; - - if (pgprot_val(pgprot) & _PAGE_INVALID) { - if (pgprot_val(pgprot) & _PAGE_SWT) - pgprot_pmd |= _HPAGE_TYPE_NONE; - pgprot_pmd |= _SEGMENT_ENTRY_INV; - } - if (pgprot_val(pgprot) & _PAGE_RO) - pgprot_pmd |= _SEGMENT_ENTRY_RO; - return pgprot_pmd; + /* + * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx) + * Convert to segment table entry format. + */ + if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) + return pgprot_val(SEGMENT_NONE); + if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) + return pgprot_val(SEGMENT_RO); + return pgprot_val(SEGMENT_RW); } static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) @@ -1269,7 +1276,9 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) static inline pmd_t pmd_mkwrite(pmd_t pmd) { - pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; + /* Do not clobber _HPAGE_TYPE_NONE pages! */ + if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV)) + pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; return pmd; } diff --git a/trunk/arch/s390/include/asm/topology.h b/trunk/arch/s390/include/asm/topology.h index 9ca305383760..9935cbd6a46f 100644 --- a/trunk/arch/s390/include/asm/topology.h +++ b/trunk/arch/s390/include/asm/topology.h @@ -8,6 +8,9 @@ struct cpu; #ifdef CONFIG_SCHED_BOOK +extern unsigned char cpu_socket_id[NR_CPUS]; +#define topology_physical_package_id(cpu) (cpu_socket_id[cpu]) + extern unsigned char cpu_core_id[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS]; diff --git a/trunk/arch/s390/include/uapi/asm/ptrace.h b/trunk/arch/s390/include/uapi/asm/ptrace.h index 705588a16d70..a5ca214b34fd 100644 --- a/trunk/arch/s390/include/uapi/asm/ptrace.h +++ b/trunk/arch/s390/include/uapi/asm/ptrace.h @@ -239,7 +239,7 @@ typedef struct #define PSW_MASK_EA 0x00000000UL #define PSW_MASK_BA 0x00000000UL -#define PSW_MASK_USER 0x00003F00UL +#define PSW_MASK_USER 0x0000FF00UL #define PSW_ADDR_AMODE 0x80000000UL #define PSW_ADDR_INSN 0x7FFFFFFFUL @@ -269,7 +269,7 @@ typedef struct #define PSW_MASK_EA 0x0000000100000000UL #define PSW_MASK_BA 0x0000000080000000UL -#define PSW_MASK_USER 0x00003F8180000000UL +#define PSW_MASK_USER 0x0000FF8180000000UL #define PSW_ADDR_AMODE 0x0000000000000000UL #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL diff --git a/trunk/arch/s390/kernel/compat_signal.c b/trunk/arch/s390/kernel/compat_signal.c index a1e8a8694bb7..593fcc9253fc 100644 --- a/trunk/arch/s390/kernel/compat_signal.c +++ b/trunk/arch/s390/kernel/compat_signal.c @@ -309,6 +309,10 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); + /* Check for invalid user address space control. */ + if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) + regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); for (i = 0; i < NUM_GPRS; i++) regs->gprs[i] = (__u64) regs32.gprs[i]; @@ -481,7 +485,10 @@ static int setup_frame32(int sig, struct k_sigaction *ka, /* Set up registers for signal handler */ regs->gprs[15] = (__force __u64) frame; - regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ + /* Force 31 bit amode and default user address space control. */ + regs->psw.mask = PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (__force __u64) ka->sa.sa_handler; regs->gprs[2] = map_signal(sig); @@ -549,7 +556,10 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up registers for signal handler */ regs->gprs[15] = (__force __u64) frame; - regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ + /* Force 31 bit amode and default user address space control. */ + regs->psw.mask = PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (__u64) ka->sa.sa_handler; regs->gprs[2] = map_signal(sig); diff --git a/trunk/arch/s390/kernel/sclp.S b/trunk/arch/s390/kernel/sclp.S index bf053898630d..b6506ee32a36 100644 --- a/trunk/arch/s390/kernel/sclp.S +++ b/trunk/arch/s390/kernel/sclp.S @@ -44,6 +44,12 @@ _sclp_wait_int: #endif mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) mvc 0(16,%r8),0(%r9) +#ifdef CONFIG_64BIT + epsw %r6,%r7 # set current addressing mode + nill %r6,0x1 # in new psw (31 or 64 bit mode) + nilh %r7,0x8000 + stm %r6,%r7,0(%r8) +#endif lhi %r6,0x0200 # cr mask for ext int (cr0.54) ltr %r2,%r2 jz .LsetctS1 @@ -87,7 +93,7 @@ _sclp_wait_int: .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int #ifdef CONFIG_64BIT .LextpswS1_64: - .quad 0x0000000180000000, .LwaitS1 # PSW to handle ext int, 64 bit + .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit #endif .LwaitpswS1: .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int diff --git a/trunk/arch/s390/kernel/signal.c b/trunk/arch/s390/kernel/signal.c index c13a2a37ef00..d1259d875074 100644 --- a/trunk/arch/s390/kernel/signal.c +++ b/trunk/arch/s390/kernel/signal.c @@ -136,6 +136,10 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | (user_sregs.regs.psw.mask & PSW_MASK_USER); + /* Check for invalid user address space control. */ + if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) + regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); /* Check for invalid amode */ if (regs->psw.mask & PSW_MASK_EA) regs->psw.mask |= PSW_MASK_BA; @@ -273,7 +277,10 @@ static int setup_frame(int sig, struct k_sigaction *ka, /* Set up registers for signal handler */ regs->gprs[15] = (unsigned long) frame; - regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ + /* Force default amode and default user address space control. */ + regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; regs->gprs[2] = map_signal(sig); @@ -346,7 +353,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up registers for signal handler */ regs->gprs[15] = (unsigned long) frame; - regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ + /* Force default amode and default user address space control. */ + regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; regs->gprs[2] = map_signal(sig); diff --git a/trunk/arch/s390/kernel/topology.c b/trunk/arch/s390/kernel/topology.c index 54d93f4b6818..dd55f7c20104 100644 --- a/trunk/arch/s390/kernel/topology.c +++ b/trunk/arch/s390/kernel/topology.c @@ -40,6 +40,7 @@ static DEFINE_SPINLOCK(topology_lock); static struct mask_info core_info; cpumask_t cpu_core_map[NR_CPUS]; unsigned char cpu_core_id[NR_CPUS]; +unsigned char cpu_socket_id[NR_CPUS]; static struct mask_info book_info; cpumask_t cpu_book_map[NR_CPUS]; @@ -83,11 +84,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, cpumask_set_cpu(lcpu, &book->mask); cpu_book_id[lcpu] = book->id; cpumask_set_cpu(lcpu, &core->mask); + cpu_core_id[lcpu] = rcpu; if (one_core_per_cpu) { - cpu_core_id[lcpu] = rcpu; + cpu_socket_id[lcpu] = rcpu; core = core->next; } else { - cpu_core_id[lcpu] = core->id; + cpu_socket_id[lcpu] = core->id; } smp_cpu_set_polarization(lcpu, tl_cpu->pp); } diff --git a/trunk/arch/s390/lib/uaccess_pt.c b/trunk/arch/s390/lib/uaccess_pt.c index 2d37bb861faf..9017a63dda3d 100644 --- a/trunk/arch/s390/lib/uaccess_pt.c +++ b/trunk/arch/s390/lib/uaccess_pt.c @@ -39,7 +39,7 @@ static __always_inline unsigned long follow_table(struct mm_struct *mm, pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return -0x10UL; - if (pmd_huge(*pmd)) { + if (pmd_large(*pmd)) { if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) return -0x04UL; return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK); diff --git a/trunk/arch/s390/mm/gup.c b/trunk/arch/s390/mm/gup.c index 60acb93a4680..1f5315d1215c 100644 --- a/trunk/arch/s390/mm/gup.c +++ b/trunk/arch/s390/mm/gup.c @@ -126,7 +126,7 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, */ if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; - if (unlikely(pmd_huge(pmd))) { + if (unlikely(pmd_large(pmd))) { if (!gup_huge_pmd(pmdp, pmd, addr, next, write, pages, nr)) return 0; @@ -180,8 +180,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, - (void __user *)start, len))) + if ((end < start) || (end > TASK_SIZE)) return 0; local_irq_save(flags); @@ -229,7 +228,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; - if (end < start) + if ((end < start) || (end > TASK_SIZE)) goto slow_irqon; /* diff --git a/trunk/arch/sparc/Kconfig b/trunk/arch/sparc/Kconfig index b6b442b0d793..9f2edb5c5551 100644 --- a/trunk/arch/sparc/Kconfig +++ b/trunk/arch/sparc/Kconfig @@ -20,6 +20,7 @@ config SPARC select HAVE_ARCH_TRACEHOOK select SYSCTL_EXCEPTION_TRACE select ARCH_WANT_OPTIONAL_GPIOLIB + select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select RTC_CLASS select RTC_DRV_M48T59 select HAVE_IRQ_WORK diff --git a/trunk/arch/sparc/crypto/Makefile b/trunk/arch/sparc/crypto/Makefile index 6ae1ad5e502b..5d469d81761f 100644 --- a/trunk/arch/sparc/crypto/Makefile +++ b/trunk/arch/sparc/crypto/Makefile @@ -13,13 +13,13 @@ obj-$(CONFIG_CRYPTO_DES_SPARC64) += camellia-sparc64.o obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o -sha1-sparc64-y := sha1_asm.o sha1_glue.o crop_devid.o -sha256-sparc64-y := sha256_asm.o sha256_glue.o crop_devid.o -sha512-sparc64-y := sha512_asm.o sha512_glue.o crop_devid.o -md5-sparc64-y := md5_asm.o md5_glue.o crop_devid.o +sha1-sparc64-y := sha1_asm.o sha1_glue.o +sha256-sparc64-y := sha256_asm.o sha256_glue.o +sha512-sparc64-y := sha512_asm.o sha512_glue.o +md5-sparc64-y := md5_asm.o md5_glue.o -aes-sparc64-y := aes_asm.o aes_glue.o crop_devid.o -des-sparc64-y := des_asm.o des_glue.o crop_devid.o -camellia-sparc64-y := camellia_asm.o camellia_glue.o crop_devid.o +aes-sparc64-y := aes_asm.o aes_glue.o +des-sparc64-y := des_asm.o des_glue.o +camellia-sparc64-y := camellia_asm.o camellia_glue.o -crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o crop_devid.o +crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o diff --git a/trunk/arch/sparc/crypto/aes_glue.c b/trunk/arch/sparc/crypto/aes_glue.c index 8f1c9980f637..3965d1d36dfa 100644 --- a/trunk/arch/sparc/crypto/aes_glue.c +++ b/trunk/arch/sparc/crypto/aes_glue.c @@ -475,3 +475,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated"); MODULE_ALIAS("aes"); + +#include "crop_devid.c" diff --git a/trunk/arch/sparc/crypto/camellia_glue.c b/trunk/arch/sparc/crypto/camellia_glue.c index 42905c084299..62c89af3fd3f 100644 --- a/trunk/arch/sparc/crypto/camellia_glue.c +++ b/trunk/arch/sparc/crypto/camellia_glue.c @@ -320,3 +320,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); MODULE_ALIAS("aes"); + +#include "crop_devid.c" diff --git a/trunk/arch/sparc/crypto/crc32c_glue.c b/trunk/arch/sparc/crypto/crc32c_glue.c index 0bd89cea8d8e..5162fad912ce 100644 --- a/trunk/arch/sparc/crypto/crc32c_glue.c +++ b/trunk/arch/sparc/crypto/crc32c_glue.c @@ -177,3 +177,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated"); MODULE_ALIAS("crc32c"); + +#include "crop_devid.c" diff --git a/trunk/arch/sparc/crypto/des_glue.c b/trunk/arch/sparc/crypto/des_glue.c index c4940c2d3073..41524cebcc49 100644 --- a/trunk/arch/sparc/crypto/des_glue.c +++ b/trunk/arch/sparc/crypto/des_glue.c @@ -527,3 +527,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated"); MODULE_ALIAS("des"); + +#include "crop_devid.c" diff --git a/trunk/arch/sparc/crypto/md5_glue.c b/trunk/arch/sparc/crypto/md5_glue.c index 603d723038ce..09a9ea1dfb69 100644 --- a/trunk/arch/sparc/crypto/md5_glue.c +++ b/trunk/arch/sparc/crypto/md5_glue.c @@ -186,3 +186,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated"); MODULE_ALIAS("md5"); + +#include "crop_devid.c" diff --git a/trunk/arch/sparc/crypto/sha1_glue.c b/trunk/arch/sparc/crypto/sha1_glue.c index 2bbb20bee9f1..6cd5f29e1e0d 100644 --- a/trunk/arch/sparc/crypto/sha1_glue.c +++ b/trunk/arch/sparc/crypto/sha1_glue.c @@ -181,3 +181,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated"); MODULE_ALIAS("sha1"); + +#include "crop_devid.c" diff --git a/trunk/arch/sparc/crypto/sha256_glue.c b/trunk/arch/sparc/crypto/sha256_glue.c index 591e656bd891..04f555ab2680 100644 --- a/trunk/arch/sparc/crypto/sha256_glue.c +++ b/trunk/arch/sparc/crypto/sha256_glue.c @@ -239,3 +239,5 @@ MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 op MODULE_ALIAS("sha224"); MODULE_ALIAS("sha256"); + +#include "crop_devid.c" diff --git a/trunk/arch/sparc/crypto/sha512_glue.c b/trunk/arch/sparc/crypto/sha512_glue.c index 486f0a2b7001..f04d1994d19a 100644 --- a/trunk/arch/sparc/crypto/sha512_glue.c +++ b/trunk/arch/sparc/crypto/sha512_glue.c @@ -224,3 +224,5 @@ MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 op MODULE_ALIAS("sha384"); MODULE_ALIAS("sha512"); + +#include "crop_devid.c" diff --git a/trunk/arch/sparc/include/asm/atomic_64.h b/trunk/arch/sparc/include/asm/atomic_64.h index ce35a1cf1a20..be56a244c9cf 100644 --- a/trunk/arch/sparc/include/asm/atomic_64.h +++ b/trunk/arch/sparc/include/asm/atomic_64.h @@ -1,7 +1,7 @@ /* atomic.h: Thankfully the V9 is at least reasonable for this * stuff. * - * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com) + * Copyright (C) 1996, 1997, 2000, 2012 David S. Miller (davem@redhat.com) */ #ifndef __ARCH_SPARC64_ATOMIC__ @@ -106,6 +106,8 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +extern long atomic64_dec_if_positive(atomic64_t *v); + /* Atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() diff --git a/trunk/arch/sparc/include/asm/backoff.h b/trunk/arch/sparc/include/asm/backoff.h index db3af0d30fb1..4e02086b839c 100644 --- a/trunk/arch/sparc/include/asm/backoff.h +++ b/trunk/arch/sparc/include/asm/backoff.h @@ -1,6 +1,46 @@ #ifndef _SPARC64_BACKOFF_H #define _SPARC64_BACKOFF_H +/* The macros in this file implement an exponential backoff facility + * for atomic operations. + * + * When multiple threads compete on an atomic operation, it is + * possible for one thread to be continually denied a successful + * completion of the compare-and-swap instruction. Heavily + * threaded cpu implementations like Niagara can compound this + * problem even further. + * + * When an atomic operation fails and needs to be retried, we spin a + * certain number of times. At each subsequent failure of the same + * operation we double the spin count, realizing an exponential + * backoff. + * + * When we spin, we try to use an operation that will cause the + * current cpu strand to block, and therefore make the core fully + * available to any other other runnable strands. There are two + * options, based upon cpu capabilities. + * + * On all cpus prior to SPARC-T4 we do three dummy reads of the + * condition code register. Each read blocks the strand for something + * between 40 and 50 cpu cycles. + * + * For SPARC-T4 and later we have a special "pause" instruction + * available. This is implemented using writes to register %asr27. + * The cpu will block the number of cycles written into the register, + * unless a disrupting trap happens first. SPARC-T4 specifically + * implements pause with a granularity of 8 cycles. Each strand has + * an internal pause counter which decrements every 8 cycles. So the + * chip shifts the %asr27 value down by 3 bits, and writes the result + * into the pause counter. If a value smaller than 8 is written, the + * chip blocks for 1 cycle. + * + * To achieve the same amount of backoff as the three %ccr reads give + * on earlier chips, we shift the backoff value up by 7 bits. (Three + * %ccr reads block for about 128 cycles, 1 << 7 == 128) We write the + * whole amount we want to block into the pause register, rather than + * loop writing 128 each time. + */ + #define BACKOFF_LIMIT (4 * 1024) #ifdef CONFIG_SMP @@ -11,16 +51,25 @@ #define BACKOFF_LABEL(spin_label, continue_label) \ spin_label -#define BACKOFF_SPIN(reg, tmp, label) \ - mov reg, tmp; \ -88: brnz,pt tmp, 88b; \ - sub tmp, 1, tmp; \ - set BACKOFF_LIMIT, tmp; \ - cmp reg, tmp; \ - bg,pn %xcc, label; \ - nop; \ - ba,pt %xcc, label; \ - sllx reg, 1, reg; +#define BACKOFF_SPIN(reg, tmp, label) \ + mov reg, tmp; \ +88: rd %ccr, %g0; \ + rd %ccr, %g0; \ + rd %ccr, %g0; \ + .section .pause_3insn_patch,"ax";\ + .word 88b; \ + sllx tmp, 7, tmp; \ + wr tmp, 0, %asr27; \ + clr tmp; \ + .previous; \ + brnz,pt tmp, 88b; \ + sub tmp, 1, tmp; \ + set BACKOFF_LIMIT, tmp; \ + cmp reg, tmp; \ + bg,pn %xcc, label; \ + nop; \ + ba,pt %xcc, label; \ + sllx reg, 1, reg; #else diff --git a/trunk/arch/sparc/include/asm/compat.h b/trunk/arch/sparc/include/asm/compat.h index cef99fbc0a21..830502fe62b4 100644 --- a/trunk/arch/sparc/include/asm/compat.h +++ b/trunk/arch/sparc/include/asm/compat.h @@ -232,9 +232,10 @@ static inline void __user *arch_compat_alloc_user_space(long len) struct pt_regs *regs = current_thread_info()->kregs; unsigned long usp = regs->u_regs[UREG_I6]; - if (!(test_thread_flag(TIF_32BIT))) + if (test_thread_64bit_stack(usp)) usp += STACK_BIAS; - else + + if (test_thread_flag(TIF_32BIT)) usp &= 0xffffffffUL; usp -= len; diff --git a/trunk/arch/sparc/include/asm/processor_64.h b/trunk/arch/sparc/include/asm/processor_64.h index 4e5a483122a0..721e25f0e2ea 100644 --- a/trunk/arch/sparc/include/asm/processor_64.h +++ b/trunk/arch/sparc/include/asm/processor_64.h @@ -196,7 +196,22 @@ extern unsigned long get_wchan(struct task_struct *task); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) -#define cpu_relax() barrier() +/* Please see the commentary in asm/backoff.h for a description of + * what these instructions are doing and how they have been choosen. + * To make a long story short, we are trying to yield the current cpu + * strand during busy loops. + */ +#define cpu_relax() asm volatile("\n99:\n\t" \ + "rd %%ccr, %%g0\n\t" \ + "rd %%ccr, %%g0\n\t" \ + "rd %%ccr, %%g0\n\t" \ + ".section .pause_3insn_patch,\"ax\"\n\t"\ + ".word 99b\n\t" \ + "wr %%g0, 128, %%asr27\n\t" \ + "nop\n\t" \ + "nop\n\t" \ + ".previous" \ + ::: "memory") /* Prefetch support. This is tuned for UltraSPARC-III and later. * UltraSPARC-I will treat these as nops, and UltraSPARC-II has diff --git a/trunk/arch/sparc/include/asm/prom.h b/trunk/arch/sparc/include/asm/prom.h index c28765110706..67c62578d170 100644 --- a/trunk/arch/sparc/include/asm/prom.h +++ b/trunk/arch/sparc/include/asm/prom.h @@ -63,5 +63,13 @@ extern char *of_console_options; extern void irq_trans_init(struct device_node *dp); extern char *build_path_component(struct device_node *dp); +/* SPARC has local implementations */ +extern int of_address_to_resource(struct device_node *dev, int index, + struct resource *r); +#define of_address_to_resource of_address_to_resource + +void __iomem *of_iomap(struct device_node *node, int index); +#define of_iomap of_iomap + #endif /* __KERNEL__ */ #endif /* _SPARC_PROM_H */ diff --git a/trunk/arch/sparc/include/asm/thread_info_64.h b/trunk/arch/sparc/include/asm/thread_info_64.h index 4e2276631081..a3fe4dcc0aa6 100644 --- a/trunk/arch/sparc/include/asm/thread_info_64.h +++ b/trunk/arch/sparc/include/asm/thread_info_64.h @@ -259,6 +259,11 @@ static inline bool test_and_clear_restore_sigmask(void) #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) +#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0) +#define test_thread_64bit_stack(__SP) \ + ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \ + false : true) + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/trunk/arch/sparc/include/asm/ttable.h b/trunk/arch/sparc/include/asm/ttable.h index 48f2807d3265..71b5a67522ab 100644 --- a/trunk/arch/sparc/include/asm/ttable.h +++ b/trunk/arch/sparc/include/asm/ttable.h @@ -372,7 +372,9 @@ etrap_spill_fixup_64bit: \ /* Normal 32bit spill */ #define SPILL_2_GENERIC(ASI) \ - srl %sp, 0, %sp; \ + and %sp, 1, %g3; \ + brnz,pn %g3, (. - (128 + 4)); \ + srl %sp, 0, %sp; \ stwa %l0, [%sp + %g0] ASI; \ mov 0x04, %g3; \ stwa %l1, [%sp + %g3] ASI; \ @@ -398,14 +400,16 @@ etrap_spill_fixup_64bit: \ stwa %i6, [%g1 + %g0] ASI; \ stwa %i7, [%g1 + %g3] ASI; \ saved; \ - retry; nop; nop; \ + retry; \ b,a,pt %xcc, spill_fixup_dax; \ b,a,pt %xcc, spill_fixup_mna; \ b,a,pt %xcc, spill_fixup; #define SPILL_2_GENERIC_ETRAP \ etrap_user_spill_32bit: \ - srl %sp, 0, %sp; \ + and %sp, 1, %g3; \ + brnz,pn %g3, etrap_user_spill_64bit; \ + srl %sp, 0, %sp; \ stwa %l0, [%sp + 0x00] %asi; \ stwa %l1, [%sp + 0x04] %asi; \ stwa %l2, [%sp + 0x08] %asi; \ @@ -427,7 +431,7 @@ etrap_user_spill_32bit: \ ba,pt %xcc, etrap_save; \ wrpr %g1, %cwp; \ nop; nop; nop; nop; \ - nop; nop; nop; nop; \ + nop; nop; \ ba,a,pt %xcc, etrap_spill_fixup_32bit; \ ba,a,pt %xcc, etrap_spill_fixup_32bit; \ ba,a,pt %xcc, etrap_spill_fixup_32bit; @@ -592,7 +596,9 @@ user_rtt_fill_64bit: \ /* Normal 32bit fill */ #define FILL_2_GENERIC(ASI) \ - srl %sp, 0, %sp; \ + and %sp, 1, %g3; \ + brnz,pn %g3, (. - (128 + 4)); \ + srl %sp, 0, %sp; \ lduwa [%sp + %g0] ASI, %l0; \ mov 0x04, %g2; \ mov 0x08, %g3; \ @@ -616,14 +622,16 @@ user_rtt_fill_64bit: \ lduwa [%g1 + %g3] ASI, %i6; \ lduwa [%g1 + %g5] ASI, %i7; \ restored; \ - retry; nop; nop; nop; nop; \ + retry; nop; nop; \ b,a,pt %xcc, fill_fixup_dax; \ b,a,pt %xcc, fill_fixup_mna; \ b,a,pt %xcc, fill_fixup; #define FILL_2_GENERIC_RTRAP \ user_rtt_fill_32bit: \ - srl %sp, 0, %sp; \ + and %sp, 1, %g3; \ + brnz,pn %g3, user_rtt_fill_64bit; \ + srl %sp, 0, %sp; \ lduwa [%sp + 0x00] %asi, %l0; \ lduwa [%sp + 0x04] %asi, %l1; \ lduwa [%sp + 0x08] %asi, %l2; \ @@ -643,7 +651,7 @@ user_rtt_fill_32bit: \ ba,pt %xcc, user_rtt_pre_restore; \ restored; \ nop; nop; nop; nop; nop; \ - nop; nop; nop; nop; nop; \ + nop; nop; nop; \ ba,a,pt %xcc, user_rtt_fill_fixup; \ ba,a,pt %xcc, user_rtt_fill_fixup; \ ba,a,pt %xcc, user_rtt_fill_fixup; diff --git a/trunk/arch/sparc/include/uapi/asm/unistd.h b/trunk/arch/sparc/include/uapi/asm/unistd.h index 8974ef7ae920..cac719d1bc5c 100644 --- a/trunk/arch/sparc/include/uapi/asm/unistd.h +++ b/trunk/arch/sparc/include/uapi/asm/unistd.h @@ -405,8 +405,13 @@ #define __NR_setns 337 #define __NR_process_vm_readv 338 #define __NR_process_vm_writev 339 +#define __NR_kern_features 340 +#define __NR_kcmp 341 -#define NR_syscalls 340 +#define NR_syscalls 342 + +/* Bitmask values returned from kern_features system call. */ +#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 #ifdef __32bit_syscall_numbers__ /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, diff --git a/trunk/arch/sparc/kernel/entry.h b/trunk/arch/sparc/kernel/entry.h index 0c218e4c0881..cc3c5cb47cda 100644 --- a/trunk/arch/sparc/kernel/entry.h +++ b/trunk/arch/sparc/kernel/entry.h @@ -59,6 +59,13 @@ struct popc_6insn_patch_entry { extern struct popc_6insn_patch_entry __popc_6insn_patch, __popc_6insn_patch_end; +struct pause_patch_entry { + unsigned int addr; + unsigned int insns[3]; +}; +extern struct pause_patch_entry __pause_3insn_patch, + __pause_3insn_patch_end; + extern void __init per_cpu_patch(void); extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, struct sun4v_1insn_patch_entry *); diff --git a/trunk/arch/sparc/kernel/leon_kernel.c b/trunk/arch/sparc/kernel/leon_kernel.c index f8b6eee40bde..87f60ee65433 100644 --- a/trunk/arch/sparc/kernel/leon_kernel.c +++ b/trunk/arch/sparc/kernel/leon_kernel.c @@ -56,11 +56,13 @@ static inline unsigned int leon_eirq_get(int cpu) static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc) { unsigned int eirq; + struct irq_bucket *p; int cpu = sparc_leon3_cpuid(); eirq = leon_eirq_get(cpu); - if ((eirq & 0x10) && irq_map[eirq]->irq) /* bit4 tells if IRQ happened */ - generic_handle_irq(irq_map[eirq]->irq); + p = irq_map[eirq]; + if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */ + generic_handle_irq(p->irq); } /* The extended IRQ controller has been found, this function registers it */ diff --git a/trunk/arch/sparc/kernel/perf_event.c b/trunk/arch/sparc/kernel/perf_event.c index 885a8af74064..b5c38faa4ead 100644 --- a/trunk/arch/sparc/kernel/perf_event.c +++ b/trunk/arch/sparc/kernel/perf_event.c @@ -1762,15 +1762,25 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; do { - struct sparc_stackf32 *usf, sf; unsigned long pc; - usf = (struct sparc_stackf32 *) ufp; - if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) - break; + if (thread32_stack_is_64bit(ufp)) { + struct sparc_stackf *usf, sf; - pc = sf.callers_pc; - ufp = (unsigned long)sf.fp; + ufp += STACK_BIAS; + usf = (struct sparc_stackf *) ufp; + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) + break; + pc = sf.callers_pc & 0xffffffff; + ufp = ((unsigned long) sf.fp) & 0xffffffff; + } else { + struct sparc_stackf32 *usf, sf; + usf = (struct sparc_stackf32 *) ufp; + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) + break; + pc = sf.callers_pc; + ufp = (unsigned long)sf.fp; + } perf_callchain_store(entry, pc); } while (entry->nr < PERF_MAX_STACK_DEPTH); } diff --git a/trunk/arch/sparc/kernel/process_64.c b/trunk/arch/sparc/kernel/process_64.c index d778248ef3f8..c6e0c2910043 100644 --- a/trunk/arch/sparc/kernel/process_64.c +++ b/trunk/arch/sparc/kernel/process_64.c @@ -452,13 +452,16 @@ void flush_thread(void) /* It's a bit more tricky when 64-bit tasks are involved... */ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) { + bool stack_64bit = test_thread_64bit_stack(psp); unsigned long fp, distance, rval; - if (!(test_thread_flag(TIF_32BIT))) { + if (stack_64bit) { csp += STACK_BIAS; psp += STACK_BIAS; __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); fp += STACK_BIAS; + if (test_thread_flag(TIF_32BIT)) + fp &= 0xffffffff; } else __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); @@ -472,7 +475,7 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) rval = (csp - distance); if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) rval = 0; - else if (test_thread_flag(TIF_32BIT)) { + else if (!stack_64bit) { if (put_user(((u32)csp), &(((struct reg_window32 __user *)rval)->ins[6]))) rval = 0; @@ -507,18 +510,18 @@ void synchronize_user_stack(void) flush_user_windows(); if ((window = get_thread_wsaved()) != 0) { - int winsize = sizeof(struct reg_window); - int bias = 0; - - if (test_thread_flag(TIF_32BIT)) - winsize = sizeof(struct reg_window32); - else - bias = STACK_BIAS; - window -= 1; do { - unsigned long sp = (t->rwbuf_stkptrs[window] + bias); struct reg_window *rwin = &t->reg_window[window]; + int winsize = sizeof(struct reg_window); + unsigned long sp; + + sp = t->rwbuf_stkptrs[window]; + + if (test_thread_64bit_stack(sp)) + sp += STACK_BIAS; + else + winsize = sizeof(struct reg_window32); if (!copy_to_user((char __user *)sp, rwin, winsize)) { shift_window_buffer(window, get_thread_wsaved() - 1, t); @@ -544,13 +547,6 @@ void fault_in_user_windows(void) { struct thread_info *t = current_thread_info(); unsigned long window; - int winsize = sizeof(struct reg_window); - int bias = 0; - - if (test_thread_flag(TIF_32BIT)) - winsize = sizeof(struct reg_window32); - else - bias = STACK_BIAS; flush_user_windows(); window = get_thread_wsaved(); @@ -558,8 +554,16 @@ void fault_in_user_windows(void) if (likely(window != 0)) { window -= 1; do { - unsigned long sp = (t->rwbuf_stkptrs[window] + bias); struct reg_window *rwin = &t->reg_window[window]; + int winsize = sizeof(struct reg_window); + unsigned long sp; + + sp = t->rwbuf_stkptrs[window]; + + if (test_thread_64bit_stack(sp)) + sp += STACK_BIAS; + else + winsize = sizeof(struct reg_window32); if (unlikely(sp & 0x7UL)) stack_unaligned(sp); diff --git a/trunk/arch/sparc/kernel/ptrace_64.c b/trunk/arch/sparc/kernel/ptrace_64.c index 484dabac7045..7ff45e4ba681 100644 --- a/trunk/arch/sparc/kernel/ptrace_64.c +++ b/trunk/arch/sparc/kernel/ptrace_64.c @@ -151,7 +151,7 @@ static int regwindow64_get(struct task_struct *target, { unsigned long rw_addr = regs->u_regs[UREG_I6]; - if (test_tsk_thread_flag(current, TIF_32BIT)) { + if (!test_thread_64bit_stack(rw_addr)) { struct reg_window32 win32; int i; @@ -176,7 +176,7 @@ static int regwindow64_set(struct task_struct *target, { unsigned long rw_addr = regs->u_regs[UREG_I6]; - if (test_tsk_thread_flag(current, TIF_32BIT)) { + if (!test_thread_64bit_stack(rw_addr)) { struct reg_window32 win32; int i; diff --git a/trunk/arch/sparc/kernel/setup_64.c b/trunk/arch/sparc/kernel/setup_64.c index 0800e71d8a88..0eaf0059aaef 100644 --- a/trunk/arch/sparc/kernel/setup_64.c +++ b/trunk/arch/sparc/kernel/setup_64.c @@ -316,6 +316,25 @@ static void __init popc_patch(void) } } +static void __init pause_patch(void) +{ + struct pause_patch_entry *p; + + p = &__pause_3insn_patch; + while (p < &__pause_3insn_patch_end) { + unsigned long i, addr = p->addr; + + for (i = 0; i < 3; i++) { + *(unsigned int *) (addr + (i * 4)) = p->insns[i]; + wmb(); + __asm__ __volatile__("flush %0" + : : "r" (addr + (i * 4))); + } + + p++; + } +} + #ifdef CONFIG_SMP void __init boot_cpu_id_too_large(int cpu) { @@ -528,6 +547,8 @@ static void __init init_sparc64_elf_hwcap(void) if (sparc64_elf_hwcap & AV_SPARC_POPC) popc_patch(); + if (sparc64_elf_hwcap & AV_SPARC_PAUSE) + pause_patch(); } void __init setup_arch(char **cmdline_p) diff --git a/trunk/arch/sparc/kernel/signal_64.c b/trunk/arch/sparc/kernel/signal_64.c index 867de2f8189c..689e1ba62809 100644 --- a/trunk/arch/sparc/kernel/signal_64.c +++ b/trunk/arch/sparc/kernel/signal_64.c @@ -295,9 +295,7 @@ void do_rt_sigreturn(struct pt_regs *regs) err |= restore_fpu_state(regs, fpu_save); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); - err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); - - if (err) + if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT) goto segv; err |= __get_user(rwin_save, &sf->rwin_save); diff --git a/trunk/arch/sparc/kernel/sys_sparc_64.c b/trunk/arch/sparc/kernel/sys_sparc_64.c index 11c6c9603e71..878ef3d5fec5 100644 --- a/trunk/arch/sparc/kernel/sys_sparc_64.c +++ b/trunk/arch/sparc/kernel/sys_sparc_64.c @@ -751,3 +751,8 @@ int kernel_execve(const char *filename, : "cc"); return __res; } + +asmlinkage long sys_kern_features(void) +{ + return KERN_FEATURE_MIXED_MODE_STACK; +} diff --git a/trunk/arch/sparc/kernel/systbls_32.S b/trunk/arch/sparc/kernel/systbls_32.S index 63402f9e9f51..5147f574f125 100644 --- a/trunk/arch/sparc/kernel/systbls_32.S +++ b/trunk/arch/sparc/kernel/systbls_32.S @@ -85,3 +85,4 @@ sys_call_table: /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init /*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev +/*340*/ .long sys_ni_syscall, sys_kcmp diff --git a/trunk/arch/sparc/kernel/systbls_64.S b/trunk/arch/sparc/kernel/systbls_64.S index 3a58e0d66f51..1c9af9fa38e9 100644 --- a/trunk/arch/sparc/kernel/systbls_64.S +++ b/trunk/arch/sparc/kernel/systbls_64.S @@ -86,6 +86,7 @@ sys_call_table32: .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init /*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev +/*340*/ .word sys_kern_features, sys_kcmp #endif /* CONFIG_COMPAT */ @@ -163,3 +164,4 @@ sys_call_table: .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init /*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev +/*340*/ .word sys_kern_features, sys_kcmp diff --git a/trunk/arch/sparc/kernel/unaligned_64.c b/trunk/arch/sparc/kernel/unaligned_64.c index f81d038f7340..8201c25e7669 100644 --- a/trunk/arch/sparc/kernel/unaligned_64.c +++ b/trunk/arch/sparc/kernel/unaligned_64.c @@ -113,21 +113,24 @@ static inline long sign_extend_imm13(long imm) static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) { - unsigned long value; + unsigned long value, fp; if (reg < 16) return (!reg ? 0 : regs->u_regs[reg]); + + fp = regs->u_regs[UREG_FP]; + if (regs->tstate & TSTATE_PRIV) { struct reg_window *win; - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window *)(fp + STACK_BIAS); value = win->locals[reg - 16]; - } else if (test_thread_flag(TIF_32BIT)) { + } else if (!test_thread_64bit_stack(fp)) { struct reg_window32 __user *win32; - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); get_user(value, &win32->locals[reg - 16]); } else { struct reg_window __user *win; - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window __user *)(fp + STACK_BIAS); get_user(value, &win->locals[reg - 16]); } return value; @@ -135,19 +138,24 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) { + unsigned long fp; + if (reg < 16) return ®s->u_regs[reg]; + + fp = regs->u_regs[UREG_FP]; + if (regs->tstate & TSTATE_PRIV) { struct reg_window *win; - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window *)(fp + STACK_BIAS); return &win->locals[reg - 16]; - } else if (test_thread_flag(TIF_32BIT)) { + } else if (!test_thread_64bit_stack(fp)) { struct reg_window32 *win32; - win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP])); + win32 = (struct reg_window32 *)((unsigned long)((u32)fp)); return (unsigned long *)&win32->locals[reg - 16]; } else { struct reg_window *win; - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window *)(fp + STACK_BIAS); return &win->locals[reg - 16]; } } @@ -392,13 +400,15 @@ int handle_popc(u32 insn, struct pt_regs *regs) if (rd) regs->u_regs[rd] = ret; } else { - if (test_thread_flag(TIF_32BIT)) { + unsigned long fp = regs->u_regs[UREG_FP]; + + if (!test_thread_64bit_stack(fp)) { struct reg_window32 __user *win32; - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); put_user(ret, &win32->locals[rd - 16]); } else { struct reg_window __user *win; - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window __user *)(fp + STACK_BIAS); put_user(ret, &win->locals[rd - 16]); } } @@ -554,7 +564,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs) reg[0] = 0; if ((insn & 0x780000) == 0x180000) reg[1] = 0; - } else if (test_thread_flag(TIF_32BIT)) { + } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { put_user(0, (int __user *) reg); if ((insn & 0x780000) == 0x180000) put_user(0, ((int __user *) reg) + 1); diff --git a/trunk/arch/sparc/kernel/visemul.c b/trunk/arch/sparc/kernel/visemul.c index 08e074b7eb6a..c096c624ac4d 100644 --- a/trunk/arch/sparc/kernel/visemul.c +++ b/trunk/arch/sparc/kernel/visemul.c @@ -149,21 +149,24 @@ static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) { - unsigned long value; + unsigned long value, fp; if (reg < 16) return (!reg ? 0 : regs->u_regs[reg]); + + fp = regs->u_regs[UREG_FP]; + if (regs->tstate & TSTATE_PRIV) { struct reg_window *win; - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window *)(fp + STACK_BIAS); value = win->locals[reg - 16]; - } else if (test_thread_flag(TIF_32BIT)) { + } else if (!test_thread_64bit_stack(fp)) { struct reg_window32 __user *win32; - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); get_user(value, &win32->locals[reg - 16]); } else { struct reg_window __user *win; - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window __user *)(fp + STACK_BIAS); get_user(value, &win->locals[reg - 16]); } return value; @@ -172,16 +175,18 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg, struct pt_regs *regs) { + unsigned long fp = regs->u_regs[UREG_FP]; + BUG_ON(reg < 16); BUG_ON(regs->tstate & TSTATE_PRIV); - if (test_thread_flag(TIF_32BIT)) { + if (!test_thread_64bit_stack(fp)) { struct reg_window32 __user *win32; - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); return (unsigned long __user *)&win32->locals[reg - 16]; } else { struct reg_window __user *win; - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window __user *)(fp + STACK_BIAS); return &win->locals[reg - 16]; } } @@ -204,7 +209,7 @@ static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd) } else { unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs); - if (test_thread_flag(TIF_32BIT)) + if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) __put_user((u32)val, (u32 __user *)rd_user); else __put_user(val, rd_user); diff --git a/trunk/arch/sparc/kernel/vmlinux.lds.S b/trunk/arch/sparc/kernel/vmlinux.lds.S index 89c2c29f154b..0bacceb19150 100644 --- a/trunk/arch/sparc/kernel/vmlinux.lds.S +++ b/trunk/arch/sparc/kernel/vmlinux.lds.S @@ -132,6 +132,11 @@ SECTIONS *(.popc_6insn_patch) __popc_6insn_patch_end = .; } + .pause_3insn_patch : { + __pause_3insn_patch = .; + *(.pause_3insn_patch) + __pause_3insn_patch_end = .; + } PERCPU_SECTION(SMP_CACHE_BYTES) . = ALIGN(PAGE_SIZE); diff --git a/trunk/arch/sparc/kernel/winfixup.S b/trunk/arch/sparc/kernel/winfixup.S index a6b0863c27df..1e67ce958369 100644 --- a/trunk/arch/sparc/kernel/winfixup.S +++ b/trunk/arch/sparc/kernel/winfixup.S @@ -43,6 +43,8 @@ spill_fixup_mna: spill_fixup_dax: TRAP_LOAD_THREAD_REG(%g6, %g1) ldx [%g6 + TI_FLAGS], %g1 + andcc %sp, 0x1, %g0 + movne %icc, 0, %g1 andcc %g1, _TIF_32BIT, %g0 ldub [%g6 + TI_WSAVED], %g1 sll %g1, 3, %g3 diff --git a/trunk/arch/sparc/lib/atomic_64.S b/trunk/arch/sparc/lib/atomic_64.S index 4d502da3de78..85c233d0a340 100644 --- a/trunk/arch/sparc/lib/atomic_64.S +++ b/trunk/arch/sparc/lib/atomic_64.S @@ -1,6 +1,6 @@ /* atomic.S: These things are too big to do inline. * - * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) + * Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net) */ #include @@ -117,3 +117,17 @@ ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ sub %g1, %o0, %o0 2: BACKOFF_SPIN(%o2, %o3, 1b) ENDPROC(atomic64_sub_ret) + +ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: ldx [%o0], %g1 + brlez,pn %g1, 3f + sub %g1, 1, %g7 + casx [%o0], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) + nop +3: retl + sub %g1, 1, %o0 +2: BACKOFF_SPIN(%o2, %o3, 1b) +ENDPROC(atomic64_dec_if_positive) diff --git a/trunk/arch/sparc/lib/ksyms.c b/trunk/arch/sparc/lib/ksyms.c index ee31b884c61b..0c4e35e522fa 100644 --- a/trunk/arch/sparc/lib/ksyms.c +++ b/trunk/arch/sparc/lib/ksyms.c @@ -116,6 +116,7 @@ EXPORT_SYMBOL(atomic64_add); EXPORT_SYMBOL(atomic64_add_ret); EXPORT_SYMBOL(atomic64_sub); EXPORT_SYMBOL(atomic64_sub_ret); +EXPORT_SYMBOL(atomic64_dec_if_positive); /* Atomic bit operations. */ EXPORT_SYMBOL(test_and_set_bit); diff --git a/trunk/arch/sparc/math-emu/math_64.c b/trunk/arch/sparc/math-emu/math_64.c index 1704068da928..034aadbff036 100644 --- a/trunk/arch/sparc/math-emu/math_64.c +++ b/trunk/arch/sparc/math-emu/math_64.c @@ -320,7 +320,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap) XR = 0; else if (freg < 16) XR = regs->u_regs[freg]; - else if (test_thread_flag(TIF_32BIT)) { + else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { struct reg_window32 __user *win32; flushw_user (); win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); diff --git a/trunk/arch/unicore32/Kconfig b/trunk/arch/unicore32/Kconfig index e5c5473e69ce..c4fbb21e802b 100644 --- a/trunk/arch/unicore32/Kconfig +++ b/trunk/arch/unicore32/Kconfig @@ -16,6 +16,8 @@ config UNICORE32 select ARCH_WANT_FRAME_POINTERS select GENERIC_IOMAP select MODULES_USE_ELF_REL + select GENERIC_KERNEL_THREAD + select GENERIC_KERNEL_EXECVE help UniCore-32 is 32-bit Instruction Set Architecture, including a series of low-power-consumption RISC chip @@ -64,6 +66,9 @@ config GENERIC_CALIBRATE_DELAY config ARCH_MAY_HAVE_PC_FDC bool +config ZONE_DMA + def_bool y + config NEED_DMA_MAP_STATE def_bool y @@ -216,7 +221,7 @@ config PUV3_GPIO bool depends on !ARCH_FPGA select GENERIC_GPIO - select GPIO_SYSFS if EXPERIMENTAL + select GPIO_SYSFS default y if PUV3_NB0916 diff --git a/trunk/arch/unicore32/include/asm/Kbuild b/trunk/arch/unicore32/include/asm/Kbuild index c910c9857e11..601e92f18af6 100644 --- a/trunk/arch/unicore32/include/asm/Kbuild +++ b/trunk/arch/unicore32/include/asm/Kbuild @@ -1,4 +1,3 @@ -include include/asm-generic/Kbuild.asm generic-y += atomic.h generic-y += auxvec.h diff --git a/trunk/arch/unicore32/include/asm/bug.h b/trunk/arch/unicore32/include/asm/bug.h index b1ff8cadb086..93a56f3e2344 100644 --- a/trunk/arch/unicore32/include/asm/bug.h +++ b/trunk/arch/unicore32/include/asm/bug.h @@ -19,9 +19,4 @@ extern void die(const char *msg, struct pt_regs *regs, int err); extern void uc32_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, unsigned long err, unsigned long trap); -extern asmlinkage void __backtrace(void); -extern asmlinkage void c_backtrace(unsigned long fp, int pmode); - -extern void __show_regs(struct pt_regs *); - #endif /* __UNICORE_BUG_H__ */ diff --git a/trunk/arch/unicore32/include/asm/cmpxchg.h b/trunk/arch/unicore32/include/asm/cmpxchg.h index df4d5acfd19f..8e797ad4fa24 100644 --- a/trunk/arch/unicore32/include/asm/cmpxchg.h +++ b/trunk/arch/unicore32/include/asm/cmpxchg.h @@ -35,7 +35,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, : "memory", "cc"); break; default: - ret = __xchg_bad_pointer(); + __xchg_bad_pointer(); } return ret; diff --git a/trunk/arch/unicore32/include/asm/kvm_para.h b/trunk/arch/unicore32/include/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b957..000000000000 --- a/trunk/arch/unicore32/include/asm/kvm_para.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/unicore32/include/asm/processor.h b/trunk/arch/unicore32/include/asm/processor.h index 14382cb09657..4eaa42167667 100644 --- a/trunk/arch/unicore32/include/asm/processor.h +++ b/trunk/arch/unicore32/include/asm/processor.h @@ -72,11 +72,6 @@ unsigned long get_wchan(struct task_struct *p); #define cpu_relax() barrier() -/* - * Create a new kernel thread - */ -extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); - #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) diff --git a/trunk/arch/unicore32/include/asm/ptrace.h b/trunk/arch/unicore32/include/asm/ptrace.h index b9caf9b0997b..726749dab52f 100644 --- a/trunk/arch/unicore32/include/asm/ptrace.h +++ b/trunk/arch/unicore32/include/asm/ptrace.h @@ -12,80 +12,10 @@ #ifndef __UNICORE_PTRACE_H__ #define __UNICORE_PTRACE_H__ -#define PTRACE_GET_THREAD_AREA 22 - -/* - * PSR bits - */ -#define USER_MODE 0x00000010 -#define REAL_MODE 0x00000011 -#define INTR_MODE 0x00000012 -#define PRIV_MODE 0x00000013 -#define ABRT_MODE 0x00000017 -#define EXTN_MODE 0x0000001b -#define SUSR_MODE 0x0000001f -#define MODE_MASK 0x0000001f -#define PSR_R_BIT 0x00000040 -#define PSR_I_BIT 0x00000080 -#define PSR_V_BIT 0x10000000 -#define PSR_C_BIT 0x20000000 -#define PSR_Z_BIT 0x40000000 -#define PSR_S_BIT 0x80000000 - -/* - * Groups of PSR bits - */ -#define PSR_f 0xff000000 /* Flags */ -#define PSR_c 0x000000ff /* Control */ +#include #ifndef __ASSEMBLY__ -/* - * This struct defines the way the registers are stored on the - * stack during a system call. Note that sizeof(struct pt_regs) - * has to be a multiple of 8. - */ -struct pt_regs { - unsigned long uregs[34]; -}; - -#define UCreg_asr uregs[32] -#define UCreg_pc uregs[31] -#define UCreg_lr uregs[30] -#define UCreg_sp uregs[29] -#define UCreg_ip uregs[28] -#define UCreg_fp uregs[27] -#define UCreg_26 uregs[26] -#define UCreg_25 uregs[25] -#define UCreg_24 uregs[24] -#define UCreg_23 uregs[23] -#define UCreg_22 uregs[22] -#define UCreg_21 uregs[21] -#define UCreg_20 uregs[20] -#define UCreg_19 uregs[19] -#define UCreg_18 uregs[18] -#define UCreg_17 uregs[17] -#define UCreg_16 uregs[16] -#define UCreg_15 uregs[15] -#define UCreg_14 uregs[14] -#define UCreg_13 uregs[13] -#define UCreg_12 uregs[12] -#define UCreg_11 uregs[11] -#define UCreg_10 uregs[10] -#define UCreg_09 uregs[9] -#define UCreg_08 uregs[8] -#define UCreg_07 uregs[7] -#define UCreg_06 uregs[6] -#define UCreg_05 uregs[5] -#define UCreg_04 uregs[4] -#define UCreg_03 uregs[3] -#define UCreg_02 uregs[2] -#define UCreg_01 uregs[1] -#define UCreg_00 uregs[0] -#define UCreg_ORIG_00 uregs[33] - -#ifdef __KERNEL__ - #define user_mode(regs) \ (processor_mode(regs) == USER_MODE) @@ -125,9 +55,5 @@ static inline int valid_user_regs(struct pt_regs *regs) #define instruction_pointer(regs) ((regs)->UCreg_pc) -#endif /* __KERNEL__ */ - #endif /* __ASSEMBLY__ */ - #endif - diff --git a/trunk/arch/unicore32/include/uapi/asm/Kbuild b/trunk/arch/unicore32/include/uapi/asm/Kbuild index baebb3da1d44..0514d7ad6855 100644 --- a/trunk/arch/unicore32/include/uapi/asm/Kbuild +++ b/trunk/arch/unicore32/include/uapi/asm/Kbuild @@ -1,3 +1,10 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm +header-y += byteorder.h +header-y += kvm_para.h +header-y += ptrace.h +header-y += sigcontext.h +header-y += unistd.h + +generic-y += kvm_para.h diff --git a/trunk/arch/unicore32/include/asm/byteorder.h b/trunk/arch/unicore32/include/uapi/asm/byteorder.h similarity index 100% rename from trunk/arch/unicore32/include/asm/byteorder.h rename to trunk/arch/unicore32/include/uapi/asm/byteorder.h diff --git a/trunk/arch/unicore32/include/uapi/asm/ptrace.h b/trunk/arch/unicore32/include/uapi/asm/ptrace.h new file mode 100644 index 000000000000..187aa2e98a53 --- /dev/null +++ b/trunk/arch/unicore32/include/uapi/asm/ptrace.h @@ -0,0 +1,90 @@ +/* + * linux/arch/unicore32/include/asm/ptrace.h + * + * Code specific to PKUnity SoC and UniCore ISA + * + * Copyright (C) 2001-2010 GUAN Xue-tao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _UAPI__UNICORE_PTRACE_H__ +#define _UAPI__UNICORE_PTRACE_H__ + +#define PTRACE_GET_THREAD_AREA 22 + +/* + * PSR bits + */ +#define USER_MODE 0x00000010 +#define REAL_MODE 0x00000011 +#define INTR_MODE 0x00000012 +#define PRIV_MODE 0x00000013 +#define ABRT_MODE 0x00000017 +#define EXTN_MODE 0x0000001b +#define SUSR_MODE 0x0000001f +#define MODE_MASK 0x0000001f +#define PSR_R_BIT 0x00000040 +#define PSR_I_BIT 0x00000080 +#define PSR_V_BIT 0x10000000 +#define PSR_C_BIT 0x20000000 +#define PSR_Z_BIT 0x40000000 +#define PSR_S_BIT 0x80000000 + +/* + * Groups of PSR bits + */ +#define PSR_f 0xff000000 /* Flags */ +#define PSR_c 0x000000ff /* Control */ + +#ifndef __ASSEMBLY__ + +/* + * This struct defines the way the registers are stored on the + * stack during a system call. Note that sizeof(struct pt_regs) + * has to be a multiple of 8. + */ +struct pt_regs { + unsigned long uregs[34]; +}; + +#define UCreg_asr uregs[32] +#define UCreg_pc uregs[31] +#define UCreg_lr uregs[30] +#define UCreg_sp uregs[29] +#define UCreg_ip uregs[28] +#define UCreg_fp uregs[27] +#define UCreg_26 uregs[26] +#define UCreg_25 uregs[25] +#define UCreg_24 uregs[24] +#define UCreg_23 uregs[23] +#define UCreg_22 uregs[22] +#define UCreg_21 uregs[21] +#define UCreg_20 uregs[20] +#define UCreg_19 uregs[19] +#define UCreg_18 uregs[18] +#define UCreg_17 uregs[17] +#define UCreg_16 uregs[16] +#define UCreg_15 uregs[15] +#define UCreg_14 uregs[14] +#define UCreg_13 uregs[13] +#define UCreg_12 uregs[12] +#define UCreg_11 uregs[11] +#define UCreg_10 uregs[10] +#define UCreg_09 uregs[9] +#define UCreg_08 uregs[8] +#define UCreg_07 uregs[7] +#define UCreg_06 uregs[6] +#define UCreg_05 uregs[5] +#define UCreg_04 uregs[4] +#define UCreg_03 uregs[3] +#define UCreg_02 uregs[2] +#define UCreg_01 uregs[1] +#define UCreg_00 uregs[0] +#define UCreg_ORIG_00 uregs[33] + + +#endif /* __ASSEMBLY__ */ + +#endif /* _UAPI__UNICORE_PTRACE_H__ */ diff --git a/trunk/arch/unicore32/include/asm/sigcontext.h b/trunk/arch/unicore32/include/uapi/asm/sigcontext.h similarity index 100% rename from trunk/arch/unicore32/include/asm/sigcontext.h rename to trunk/arch/unicore32/include/uapi/asm/sigcontext.h diff --git a/trunk/arch/unicore32/include/asm/unistd.h b/trunk/arch/unicore32/include/uapi/asm/unistd.h similarity index 92% rename from trunk/arch/unicore32/include/asm/unistd.h rename to trunk/arch/unicore32/include/uapi/asm/unistd.h index 2abcf61c615d..d18a3be89b38 100644 --- a/trunk/arch/unicore32/include/asm/unistd.h +++ b/trunk/arch/unicore32/include/uapi/asm/unistd.h @@ -12,3 +12,4 @@ /* Use the standard ABI for syscalls. */ #include +#define __ARCH_WANT_SYS_EXECVE diff --git a/trunk/arch/unicore32/kernel/entry.S b/trunk/arch/unicore32/kernel/entry.S index dcb87ab19ddd..7049350c790f 100644 --- a/trunk/arch/unicore32/kernel/entry.S +++ b/trunk/arch/unicore32/kernel/entry.S @@ -573,17 +573,16 @@ ENDPROC(ret_to_user) */ ENTRY(ret_from_fork) b.l schedule_tail - get_thread_info tsk - ldw r1, [tsk+], #TI_FLAGS @ check for syscall tracing - mov why, #1 - cand.a r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? - beq ret_slow_syscall - mov r1, sp - mov r0, #1 @ trace exit [IP = 1] - b.l syscall_trace b ret_slow_syscall ENDPROC(ret_from_fork) +ENTRY(ret_from_kernel_thread) + b.l schedule_tail + mov r0, r5 + adr lr, ret_slow_syscall + mov pc, r4 +ENDPROC(ret_from_kernel_thread) + /*============================================================================= * SWI handler *----------------------------------------------------------------------------- @@ -669,11 +668,6 @@ __cr_alignment: #endif .ltorg -ENTRY(sys_execve) - add r3, sp, #S_OFF - b __sys_execve -ENDPROC(sys_execve) - ENTRY(sys_clone) add ip, sp, #S_OFF stw ip, [sp+], #4 diff --git a/trunk/arch/unicore32/kernel/process.c b/trunk/arch/unicore32/kernel/process.c index b008586dad75..a8fe265ce2c0 100644 --- a/trunk/arch/unicore32/kernel/process.c +++ b/trunk/arch/unicore32/kernel/process.c @@ -258,6 +258,7 @@ void release_thread(struct task_struct *dead_task) } asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); +asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread"); int copy_thread(unsigned long clone_flags, unsigned long stack_start, @@ -266,17 +267,22 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); - *childregs = *regs; - childregs->UCreg_00 = 0; - childregs->UCreg_sp = stack_start; - memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); thread->cpu_context.sp = (unsigned long)childregs; - thread->cpu_context.pc = (unsigned long)ret_from_fork; - - if (clone_flags & CLONE_SETTLS) - childregs->UCreg_16 = regs->UCreg_03; + if (unlikely(!regs)) { + thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread; + thread->cpu_context.r4 = stack_start; + thread->cpu_context.r5 = stk_sz; + memset(childregs, 0, sizeof(struct pt_regs)); + } else { + thread->cpu_context.pc = (unsigned long)ret_from_fork; + *childregs = *regs; + childregs->UCreg_00 = 0; + childregs->UCreg_sp = stack_start; + if (clone_flags & CLONE_SETTLS) + childregs->UCreg_16 = regs->UCreg_03; + } return 0; } @@ -305,42 +311,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fp) } EXPORT_SYMBOL(dump_fpu); -/* - * Shuffle the argument into the correct register before calling the - * thread function. r1 is the thread argument, r2 is the pointer to - * the thread function, and r3 points to the exit function. - */ -asm(".pushsection .text\n" -" .align\n" -" .type kernel_thread_helper, #function\n" -"kernel_thread_helper:\n" -" mov.a asr, r7\n" -" mov r0, r4\n" -" mov lr, r6\n" -" mov pc, r5\n" -" .size kernel_thread_helper, . - kernel_thread_helper\n" -" .popsection"); - -/* - * Create a kernel thread. - */ -pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) -{ - struct pt_regs regs; - - memset(®s, 0, sizeof(regs)); - - regs.UCreg_04 = (unsigned long)arg; - regs.UCreg_05 = (unsigned long)fn; - regs.UCreg_06 = (unsigned long)do_exit; - regs.UCreg_07 = PRIV_MODE; - regs.UCreg_pc = (unsigned long)kernel_thread_helper; - regs.UCreg_asr = regs.UCreg_07 | PSR_I_BIT; - - return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); -} -EXPORT_SYMBOL(kernel_thread); - unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; diff --git a/trunk/arch/unicore32/kernel/setup.h b/trunk/arch/unicore32/kernel/setup.h index f23955028a18..30f749da8f73 100644 --- a/trunk/arch/unicore32/kernel/setup.h +++ b/trunk/arch/unicore32/kernel/setup.h @@ -30,4 +30,10 @@ extern char __vectors_start[], __vectors_end[]; extern void kernel_thread_helper(void); extern void __init early_signal_init(void); + +extern asmlinkage void __backtrace(void); +extern asmlinkage void c_backtrace(unsigned long fp, int pmode); + +extern void __show_regs(struct pt_regs *); + #endif diff --git a/trunk/arch/unicore32/kernel/sys.c b/trunk/arch/unicore32/kernel/sys.c index fabdee96110b..9680134b31f0 100644 --- a/trunk/arch/unicore32/kernel/sys.c +++ b/trunk/arch/unicore32/kernel/sys.c @@ -42,69 +42,6 @@ asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp, parent_tid, child_tid); } -/* sys_execve() executes a new program. - * This is called indirectly via a small wrapper - */ -asmlinkage long __sys_execve(const char __user *filename, - const char __user *const __user *argv, - const char __user *const __user *envp, - struct pt_regs *regs) -{ - int error; - struct filename *fn; - - fn = getname(filename); - error = PTR_ERR(fn); - if (IS_ERR(fn)) - goto out; - error = do_execve(fn->name, argv, envp, regs); - putname(fn); -out: - return error; -} - -int kernel_execve(const char *filename, - const char *const argv[], - const char *const envp[]) -{ - struct pt_regs regs; - int ret; - - memset(®s, 0, sizeof(struct pt_regs)); - ret = do_execve(filename, - (const char __user *const __user *)argv, - (const char __user *const __user *)envp, ®s); - if (ret < 0) - goto out; - - /* - * Save argc to the register structure for userspace. - */ - regs.UCreg_00 = ret; - - /* - * We were successful. We won't be returning to our caller, but - * instead to user space by manipulating the kernel stack. - */ - asm("add r0, %0, %1\n\t" - "mov r1, %2\n\t" - "mov r2, %3\n\t" - "mov r22, #0\n\t" /* not a syscall */ - "mov r23, %0\n\t" /* thread structure */ - "b.l memmove\n\t" /* copy regs to top of stack */ - "mov sp, r0\n\t" /* reposition stack pointer */ - "b ret_to_user" - : - : "r" (current_thread_info()), - "Ir" (THREAD_START_SP - sizeof(regs)), - "r" (®s), - "Ir" (sizeof(regs)) - : "r0", "r1", "r2", "r3", "ip", "lr", "memory"); - - out: - return ret; -} - /* Note: used by the compat code even in 64-bit Linux. */ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, diff --git a/trunk/arch/unicore32/mm/fault.c b/trunk/arch/unicore32/mm/fault.c index 2eeb9c04cab0..f9b5c10bccee 100644 --- a/trunk/arch/unicore32/mm/fault.c +++ b/trunk/arch/unicore32/mm/fault.c @@ -168,7 +168,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) } static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr, - struct task_struct *tsk) + unsigned int flags, struct task_struct *tsk) { struct vm_area_struct *vma; int fault; @@ -194,14 +194,7 @@ static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr, * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, - (!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0); - if (unlikely(fault & VM_FAULT_ERROR)) - return fault; - if (fault & VM_FAULT_MAJOR) - tsk->maj_flt++; - else - tsk->min_flt++; + fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); return fault; check_stack: @@ -216,6 +209,8 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) struct task_struct *tsk; struct mm_struct *mm; int fault, sig, code; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | + ((!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0); tsk = current; mm = tsk->mm; @@ -236,6 +231,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (!user_mode(regs) && !search_exception_tables(regs->UCreg_pc)) goto no_context; +retry: down_read(&mm->mmap_sem); } else { /* @@ -251,7 +247,28 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) #endif } - fault = __do_pf(mm, addr, fsr, tsk); + fault = __do_pf(mm, addr, fsr, flags, tsk); + + /* If we need to retry but a fatal signal is pending, handle the + * signal first. We do not need to release the mmap_sem because + * it would already be released in __lock_page_or_retry in + * mm/filemap.c. */ + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return 0; + + if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) { + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; + if (fault & VM_FAULT_RETRY) { + /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk + * of starvation. */ + flags &= ~FAULT_FLAG_ALLOW_RETRY; + goto retry; + } + } + up_read(&mm->mmap_sem); /* diff --git a/trunk/arch/x86/boot/compressed/eboot.c b/trunk/arch/x86/boot/compressed/eboot.c index c760e073963e..e87b0cac14b5 100644 --- a/trunk/arch/x86/boot/compressed/eboot.c +++ b/trunk/arch/x86/boot/compressed/eboot.c @@ -12,6 +12,8 @@ #include #include +#undef memcpy /* Use memcpy from misc.c */ + #include "eboot.h" static efi_system_table_t *sys_table; diff --git a/trunk/arch/x86/boot/header.S b/trunk/arch/x86/boot/header.S index 2a017441b8b2..8c132a625b94 100644 --- a/trunk/arch/x86/boot/header.S +++ b/trunk/arch/x86/boot/header.S @@ -476,6 +476,3 @@ die: setup_corrupt: .byte 7 .string "No setup signature found...\n" - - .data -dummy: .long 0 diff --git a/trunk/arch/x86/include/asm/ptrace.h b/trunk/arch/x86/include/asm/ptrace.h index dcfde52979c3..19f16ebaf4fa 100644 --- a/trunk/arch/x86/include/asm/ptrace.h +++ b/trunk/arch/x86/include/asm/ptrace.h @@ -205,21 +205,14 @@ static inline bool user_64bit_mode(struct pt_regs *regs) } #endif -/* - * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode - * when it traps. The previous stack will be directly underneath the saved - * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. - * - * This is valid only for kernel mode traps. - */ -static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) -{ #ifdef CONFIG_X86_32 - return (unsigned long)(®s->sp); +extern unsigned long kernel_stack_pointer(struct pt_regs *regs); #else +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ return regs->sp; -#endif } +#endif #define GET_IP(regs) ((regs)->ip) #define GET_FP(regs) ((regs)->bp) diff --git a/trunk/arch/x86/include/asm/xen/hypercall.h b/trunk/arch/x86/include/asm/xen/hypercall.h index 59c226d120cd..c20d1ce62dc6 100644 --- a/trunk/arch/x86/include/asm/xen/hypercall.h +++ b/trunk/arch/x86/include/asm/xen/hypercall.h @@ -359,18 +359,14 @@ HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val, return _hypercall4(int, update_va_mapping, va, new_val.pte, new_val.pte >> 32, flags); } +extern int __must_check xen_event_channel_op_compat(int, void *); static inline int HYPERVISOR_event_channel_op(int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); - if (unlikely(rc == -ENOSYS)) { - struct evtchn_op op; - op.cmd = cmd; - memcpy(&op.u, arg, sizeof(op.u)); - rc = _hypercall1(int, event_channel_op_compat, &op); - memcpy(arg, &op.u, sizeof(op.u)); - } + if (unlikely(rc == -ENOSYS)) + rc = xen_event_channel_op_compat(cmd, arg); return rc; } @@ -386,17 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str) return _hypercall3(int, console_io, cmd, count, str); } +extern int __must_check HYPERVISOR_physdev_op_compat(int, void *); + static inline int HYPERVISOR_physdev_op(int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); - if (unlikely(rc == -ENOSYS)) { - struct physdev_op op; - op.cmd = cmd; - memcpy(&op.u, arg, sizeof(op.u)); - rc = _hypercall1(int, physdev_op_compat, &op); - memcpy(arg, &op.u, sizeof(op.u)); - } + if (unlikely(rc == -ENOSYS)) + rc = HYPERVISOR_physdev_op_compat(cmd, arg); return rc; } diff --git a/trunk/arch/x86/kernel/cpu/amd.c b/trunk/arch/x86/kernel/cpu/amd.c index f7e98a2c0d12..1b7d1656a042 100644 --- a/trunk/arch/x86/kernel/cpu/amd.c +++ b/trunk/arch/x86/kernel/cpu/amd.c @@ -631,6 +631,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } + /* + * The way access filter has a performance penalty on some workloads. + * Disable it on the affected CPUs. + */ + if ((c->x86 == 0x15) && + (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { + u64 val; + + if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { + val |= 0x1E; + wrmsrl_safe(0xc0011021, val); + } + } + cpu_detect_cache_sizes(c); /* Multi core CPU? */ diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c index 698b6ec12e0f..1ac581f38dfa 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -6,7 +6,7 @@ * * Written by Jacob Shin - AMD, Inc. * - * Support: borislav.petkov@amd.com + * Maintained by: Borislav Petkov * * April 2006 * - added support for AMD Family 0x10 processors diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce_intel.c b/trunk/arch/x86/kernel/cpu/mcheck/mce_intel.c index 5f88abf07e9c..4f9a3cbfc4a3 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -285,34 +285,39 @@ void cmci_clear(void) raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); } +static long cmci_rediscover_work_func(void *arg) +{ + int banks; + + /* Recheck banks in case CPUs don't all have the same */ + if (cmci_supported(&banks)) + cmci_discover(banks); + + return 0; +} + /* * After a CPU went down cycle through all the others and rediscover * Must run in process context. */ void cmci_rediscover(int dying) { - int banks; - int cpu; - cpumask_var_t old; + int cpu, banks; if (!cmci_supported(&banks)) return; - if (!alloc_cpumask_var(&old, GFP_KERNEL)) - return; - cpumask_copy(old, ¤t->cpus_allowed); for_each_online_cpu(cpu) { if (cpu == dying) continue; - if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) + + if (cpu == smp_processor_id()) { + cmci_rediscover_work_func(NULL); continue; - /* Recheck banks in case CPUs don't all have the same */ - if (cmci_supported(&banks)) - cmci_discover(banks); - } + } - set_cpus_allowed_ptr(current, old); - free_cpumask_var(old); + work_on_cpu(cpu, cmci_rediscover_work_func, NULL); + } } /* diff --git a/trunk/arch/x86/kernel/entry_64.S b/trunk/arch/x86/kernel/entry_64.S index b51b2c7ee51f..1328fe49a3f1 100644 --- a/trunk/arch/x86/kernel/entry_64.S +++ b/trunk/arch/x86/kernel/entry_64.S @@ -995,8 +995,8 @@ END(interrupt) */ .p2align CONFIG_X86_L1_CACHE_SHIFT common_interrupt: - ASM_CLAC XCPT_FRAME + ASM_CLAC addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ interrupt do_IRQ /* 0(%rsp): old_rsp-ARGOFFSET */ @@ -1135,8 +1135,8 @@ END(common_interrupt) */ .macro apicinterrupt num sym do_sym ENTRY(\sym) - ASM_CLAC INTR_FRAME + ASM_CLAC pushq_cfi $~(\num) .Lcommon_\sym: interrupt \do_sym @@ -1190,8 +1190,8 @@ apicinterrupt IRQ_WORK_VECTOR \ */ .macro zeroentry sym do_sym ENTRY(\sym) - ASM_CLAC INTR_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ subq $ORIG_RAX-R15, %rsp @@ -1208,8 +1208,8 @@ END(\sym) .macro paranoidzeroentry sym do_sym ENTRY(\sym) - ASM_CLAC INTR_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ subq $ORIG_RAX-R15, %rsp @@ -1227,8 +1227,8 @@ END(\sym) #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) .macro paranoidzeroentry_ist sym do_sym ist ENTRY(\sym) - ASM_CLAC INTR_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ subq $ORIG_RAX-R15, %rsp @@ -1247,8 +1247,8 @@ END(\sym) .macro errorentry sym do_sym ENTRY(\sym) - ASM_CLAC XCPT_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME subq $ORIG_RAX-R15, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 @@ -1266,8 +1266,8 @@ END(\sym) /* error code is on the stack already */ .macro paranoiderrorentry sym do_sym ENTRY(\sym) - ASM_CLAC XCPT_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME subq $ORIG_RAX-R15, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 diff --git a/trunk/arch/x86/kernel/microcode_amd.c b/trunk/arch/x86/kernel/microcode_amd.c index 7720ff5a9ee2..efdec7cd8e01 100644 --- a/trunk/arch/x86/kernel/microcode_amd.c +++ b/trunk/arch/x86/kernel/microcode_amd.c @@ -8,8 +8,8 @@ * Tigran Aivazian * * Maintainers: - * Andreas Herrmann - * Borislav Petkov + * Andreas Herrmann + * Borislav Petkov * * This driver allows to upgrade microcode on F10h AMD * CPUs and later. @@ -190,6 +190,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, #define F1XH_MPB_MAX_SIZE 2048 #define F14H_MPB_MAX_SIZE 1824 #define F15H_MPB_MAX_SIZE 4096 +#define F16H_MPB_MAX_SIZE 3458 switch (c->x86) { case 0x14: @@ -198,6 +199,9 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, case 0x15: max_size = F15H_MPB_MAX_SIZE; break; + case 0x16: + max_size = F16H_MPB_MAX_SIZE; + break; default: max_size = F1XH_MPB_MAX_SIZE; break; diff --git a/trunk/arch/x86/kernel/ptrace.c b/trunk/arch/x86/kernel/ptrace.c index b00b33a18390..5e0596b0632e 100644 --- a/trunk/arch/x86/kernel/ptrace.c +++ b/trunk/arch/x86/kernel/ptrace.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -166,6 +167,35 @@ static inline bool invalid_selector(u16 value) #define FLAG_MASK FLAG_MASK_32 +/* + * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode + * when it traps. The previous stack will be directly underneath the saved + * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. + * + * Now, if the stack is empty, '®s->sp' is out of range. In this + * case we try to take the previous stack. To always return a non-null + * stack pointer we fall back to regs as stack if no previous stack + * exists. + * + * This is valid only for kernel mode traps. + */ +unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); + unsigned long sp = (unsigned long)®s->sp; + struct thread_info *tinfo; + + if (context == (sp & ~(THREAD_SIZE - 1))) + return sp; + + tinfo = (struct thread_info *)context; + if (tinfo->previous_esp) + return tinfo->previous_esp; + + return (unsigned long)regs; +} +EXPORT_SYMBOL_GPL(kernel_stack_pointer); + static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) { BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); diff --git a/trunk/arch/x86/kvm/cpuid.h b/trunk/arch/x86/kvm/cpuid.h index a10e46016851..58fc51488828 100644 --- a/trunk/arch/x86/kvm/cpuid.h +++ b/trunk/arch/x86/kvm/cpuid.h @@ -24,6 +24,9 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; + if (!static_cpu_has(X86_FEATURE_XSAVE)) + return 0; + best = kvm_find_cpuid_entry(vcpu, 1, 0); return best && (best->ecx & bit(X86_FEATURE_XSAVE)); } diff --git a/trunk/arch/x86/kvm/vmx.c b/trunk/arch/x86/kvm/vmx.c index ad6b1dd06f8b..f85815945fc6 100644 --- a/trunk/arch/x86/kvm/vmx.c +++ b/trunk/arch/x86/kvm/vmx.c @@ -6549,19 +6549,22 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) } } - exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); /* Exposing INVPCID only when PCID is exposed */ best = kvm_find_cpuid_entry(vcpu, 0x7, 0); if (vmx_invpcid_supported() && best && (best->ebx & bit(X86_FEATURE_INVPCID)) && guest_cpuid_has_pcid(vcpu)) { + exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } else { - exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; - vmcs_write32(SECONDARY_VM_EXEC_CONTROL, - exec_control); + if (cpu_has_secondary_exec_ctrls()) { + exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); + exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; + vmcs_write32(SECONDARY_VM_EXEC_CONTROL, + exec_control); + } if (best) best->ebx &= ~bit(X86_FEATURE_INVPCID); } diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index 224a7e78cb6c..4f7641756be2 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -5781,6 +5781,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, int pending_vec, max_bits, idx; struct desc_ptr dt; + if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) + return -EINVAL; + dt.size = sregs->idt.limit; dt.address = sregs->idt.base; kvm_x86_ops->set_idt(vcpu, &dt); diff --git a/trunk/arch/x86/mm/tlb.c b/trunk/arch/x86/mm/tlb.c index 0777f042e400..60f926cd8b0e 100644 --- a/trunk/arch/x86/mm/tlb.c +++ b/trunk/arch/x86/mm/tlb.c @@ -197,7 +197,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, } if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 - || vmflag == VM_HUGETLB) { + || vmflag & VM_HUGETLB) { local_flush_tlb(); goto flush_all; } diff --git a/trunk/arch/x86/pci/ce4100.c b/trunk/arch/x86/pci/ce4100.c index 41bd2a2d2c50..b914e20b5a00 100644 --- a/trunk/arch/x86/pci/ce4100.c +++ b/trunk/arch/x86/pci/ce4100.c @@ -115,6 +115,16 @@ static void sata_revid_read(struct sim_dev_reg *reg, u32 *value) reg_read(reg, value); } +static void reg_noirq_read(struct sim_dev_reg *reg, u32 *value) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + /* force interrupt pin value to 0 */ + *value = reg->sim_reg.value & 0xfff00ff; + raw_spin_unlock_irqrestore(&pci_config_lock, flags); +} + static struct sim_dev_reg bus1_fixups[] = { DEFINE_REG(2, 0, 0x10, (16*MB), reg_init, reg_read, reg_write) DEFINE_REG(2, 0, 0x14, (256), reg_init, reg_read, reg_write) @@ -144,6 +154,7 @@ static struct sim_dev_reg bus1_fixups[] = { DEFINE_REG(11, 5, 0x10, (64*KB), reg_init, reg_read, reg_write) DEFINE_REG(11, 6, 0x10, (256), reg_init, reg_read, reg_write) DEFINE_REG(11, 7, 0x10, (64*KB), reg_init, reg_read, reg_write) + DEFINE_REG(11, 7, 0x3c, 256, reg_init, reg_noirq_read, reg_write) DEFINE_REG(12, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) DEFINE_REG(12, 0, 0x14, (256), reg_init, reg_read, reg_write) DEFINE_REG(12, 1, 0x10, (1024), reg_init, reg_read, reg_write) @@ -161,8 +172,10 @@ static struct sim_dev_reg bus1_fixups[] = { DEFINE_REG(16, 0, 0x10, (64*KB), reg_init, reg_read, reg_write) DEFINE_REG(16, 0, 0x14, (64*MB), reg_init, reg_read, reg_write) DEFINE_REG(16, 0, 0x18, (64*MB), reg_init, reg_read, reg_write) + DEFINE_REG(16, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write) DEFINE_REG(17, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) DEFINE_REG(18, 0, 0x10, (1*KB), reg_init, reg_read, reg_write) + DEFINE_REG(18, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write) }; static void __init init_sim_regs(void) diff --git a/trunk/arch/x86/platform/ce4100/ce4100.c b/trunk/arch/x86/platform/ce4100/ce4100.c index 4c61b52191eb..92525cb8e54c 100644 --- a/trunk/arch/x86/platform/ce4100/ce4100.c +++ b/trunk/arch/x86/platform/ce4100/ce4100.c @@ -21,12 +21,25 @@ #include #include #include +#include static int ce4100_i8042_detect(void) { return 0; } +/* + * The CE4100 platform has an internal 8051 Microcontroller which is + * responsible for signaling to the external Power Management Unit the + * intention to reset, reboot or power off the system. This 8051 device has + * its command register mapped at I/O port 0xcf9 and the value 0x4 is used + * to power off the system. + */ +static void ce4100_power_off(void) +{ + outb(0x4, 0xcf9); +} + #ifdef CONFIG_SERIAL_8250 static unsigned int mem_serial_in(struct uart_port *p, int offset) @@ -139,8 +152,19 @@ void __init x86_ce4100_early_setup(void) x86_init.mpparse.find_smp_config = x86_init_noop; x86_init.pci.init = ce4100_pci_init; + /* + * By default, the reboot method is ACPI which is supported by the + * CE4100 bootloader CEFDK using FADT.ResetReg Address and ResetValue + * the bootloader will however issue a system power off instead of + * reboot. By using BOOT_KBD we ensure proper system reboot as + * expected. + */ + reboot_type = BOOT_KBD; + #ifdef CONFIG_X86_IO_APIC x86_init.pci.init_irq = sdv_pci_init; x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck; #endif + + pm_power_off = ce4100_power_off; } diff --git a/trunk/block/blk-exec.c b/trunk/block/blk-exec.c index 8b6dc5bd4dd0..f71eac35c1b9 100644 --- a/trunk/block/blk-exec.c +++ b/trunk/block/blk-exec.c @@ -52,11 +52,17 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, rq_end_io_fn *done) { int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; + bool is_pm_resume; WARN_ON(irqs_disabled()); rq->rq_disk = bd_disk; rq->end_io = done; + /* + * need to check this before __blk_run_queue(), because rq can + * be freed before that returns. + */ + is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME; spin_lock_irq(q->queue_lock); @@ -71,7 +77,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, __elv_add_request(q, rq, where); __blk_run_queue(q); /* the queue is stopped so it won't be run */ - if (rq->cmd_type == REQ_TYPE_PM_RESUME) + if (is_pm_resume) q->request_fn(q); spin_unlock_irq(q->queue_lock); } diff --git a/trunk/crypto/cryptd.c b/trunk/crypto/cryptd.c index 671d4d6d14df..7bdd61b867c8 100644 --- a/trunk/crypto/cryptd.c +++ b/trunk/crypto/cryptd.c @@ -137,13 +137,18 @@ static void cryptd_queue_worker(struct work_struct *work) struct crypto_async_request *req, *backlog; cpu_queue = container_of(work, struct cryptd_cpu_queue, work); - /* Only handle one request at a time to avoid hogging crypto - * workqueue. preempt_disable/enable is used to prevent - * being preempted by cryptd_enqueue_request() */ + /* + * Only handle one request at a time to avoid hogging crypto workqueue. + * preempt_disable/enable is used to prevent being preempted by + * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent + * cryptd_enqueue_request() being accessed from software interrupts. + */ + local_bh_disable(); preempt_disable(); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); preempt_enable(); + local_bh_enable(); if (!req) return; diff --git a/trunk/drivers/ata/ahci_platform.c b/trunk/drivers/ata/ahci_platform.c index b1ae48054dc5..b7078afddb74 100644 --- a/trunk/drivers/ata/ahci_platform.c +++ b/trunk/drivers/ata/ahci_platform.c @@ -238,7 +238,7 @@ static int __devexit ahci_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int ahci_suspend(struct device *dev) { struct ahci_platform_data *pdata = dev_get_platdata(dev); diff --git a/trunk/drivers/ata/libata-acpi.c b/trunk/drivers/ata/libata-acpi.c index fd9ecf74e631..5b0ba3f20edc 100644 --- a/trunk/drivers/ata/libata-acpi.c +++ b/trunk/drivers/ata/libata-acpi.c @@ -1105,10 +1105,15 @@ static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev, struct acpi_device *acpi_dev; struct acpi_device_power_state *states; - if (ap->flags & ATA_FLAG_ACPI_SATA) - ata_dev = &ap->link.device[sdev->channel]; - else + if (ap->flags & ATA_FLAG_ACPI_SATA) { + if (!sata_pmp_attached(ap)) + ata_dev = &ap->link.device[sdev->id]; + else + ata_dev = &ap->pmp_link[sdev->channel].device[sdev->id]; + } + else { ata_dev = &ap->link.device[sdev->id]; + } *handle = ata_dev_acpi_handle(ata_dev); diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c index 3cc7096cfda7..f46fbd3bd3fb 100644 --- a/trunk/drivers/ata/libata-core.c +++ b/trunk/drivers/ata/libata-core.c @@ -2942,6 +2942,10 @@ const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) if (xfer_mode == t->mode) return t; + + WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", + __func__, xfer_mode); + return NULL; } diff --git a/trunk/drivers/ata/libata-scsi.c b/trunk/drivers/ata/libata-scsi.c index e3bda074fa12..a6df6a351d6e 100644 --- a/trunk/drivers/ata/libata-scsi.c +++ b/trunk/drivers/ata/libata-scsi.c @@ -1052,6 +1052,8 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev) { sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; + sdev->no_report_opcodes = 1; + sdev->no_write_same = 1; /* Schedule policy is determined by ->qc_defer() callback and * it needs to see every deferred qc. Set dev_blocked to 1 to diff --git a/trunk/drivers/ata/pata_arasan_cf.c b/trunk/drivers/ata/pata_arasan_cf.c index 26201ebef3ca..371fd2c698b7 100644 --- a/trunk/drivers/ata/pata_arasan_cf.c +++ b/trunk/drivers/ata/pata_arasan_cf.c @@ -317,6 +317,12 @@ static int cf_init(struct arasan_cf_dev *acdev) return ret; } + ret = clk_set_rate(acdev->clk, 166000000); + if (ret) { + dev_warn(acdev->host->dev, "clock set rate failed"); + return ret; + } + spin_lock_irqsave(&acdev->host->lock, flags); /* configure CF interface clock */ writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk : @@ -908,7 +914,7 @@ static int __devexit arasan_cf_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int arasan_cf_suspend(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); diff --git a/trunk/drivers/ata/sata_highbank.c b/trunk/drivers/ata/sata_highbank.c index 0d7c4c2cd26f..400bf1c3e982 100644 --- a/trunk/drivers/ata/sata_highbank.c +++ b/trunk/drivers/ata/sata_highbank.c @@ -260,7 +260,7 @@ static const struct of_device_id ahci_of_match[] = { }; MODULE_DEVICE_TABLE(of, ahci_of_match); -static int __init ahci_highbank_probe(struct platform_device *pdev) +static int __devinit ahci_highbank_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ahci_host_priv *hpriv; @@ -378,7 +378,7 @@ static int __devexit ahci_highbank_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int ahci_highbank_suspend(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); diff --git a/trunk/drivers/ata/sata_svw.c b/trunk/drivers/ata/sata_svw.c index 44a4256533e1..08608de87e4e 100644 --- a/trunk/drivers/ata/sata_svw.c +++ b/trunk/drivers/ata/sata_svw.c @@ -142,6 +142,39 @@ static int k2_sata_scr_write(struct ata_link *link, return 0; } +static int k2_sata_softreset(struct ata_link *link, + unsigned int *class, unsigned long deadline) +{ + u8 dmactl; + void __iomem *mmio = link->ap->ioaddr.bmdma_addr; + + dmactl = readb(mmio + ATA_DMA_CMD); + + /* Clear the start bit */ + if (dmactl & ATA_DMA_START) { + dmactl &= ~ATA_DMA_START; + writeb(dmactl, mmio + ATA_DMA_CMD); + } + + return ata_sff_softreset(link, class, deadline); +} + +static int k2_sata_hardreset(struct ata_link *link, + unsigned int *class, unsigned long deadline) +{ + u8 dmactl; + void __iomem *mmio = link->ap->ioaddr.bmdma_addr; + + dmactl = readb(mmio + ATA_DMA_CMD); + + /* Clear the start bit */ + if (dmactl & ATA_DMA_START) { + dmactl &= ~ATA_DMA_START; + writeb(dmactl, mmio + ATA_DMA_CMD); + } + + return sata_sff_hardreset(link, class, deadline); +} static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { @@ -346,6 +379,8 @@ static struct scsi_host_template k2_sata_sht = { static struct ata_port_operations k2_sata_ops = { .inherits = &ata_bmdma_port_ops, + .softreset = k2_sata_softreset, + .hardreset = k2_sata_hardreset, .sff_tf_load = k2_sata_tf_load, .sff_tf_read = k2_sata_tf_read, .sff_check_status = k2_stat_check_status, diff --git a/trunk/drivers/base/platform.c b/trunk/drivers/base/platform.c index 8727e9c5eea4..72c776f2a1f5 100644 --- a/trunk/drivers/base/platform.c +++ b/trunk/drivers/base/platform.c @@ -83,9 +83,16 @@ EXPORT_SYMBOL_GPL(platform_get_resource); */ int platform_get_irq(struct platform_device *dev, unsigned int num) { +#ifdef CONFIG_SPARC + /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ + if (!dev || num >= dev->archdata.num_irqs) + return -ENXIO; + return dev->archdata.irqs[num]; +#else struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); return r ? r->start : -ENXIO; +#endif } EXPORT_SYMBOL_GPL(platform_get_irq); diff --git a/trunk/drivers/base/power/qos.c b/trunk/drivers/base/power/qos.c index 74a67e0019a2..fbbd4ed2edf2 100644 --- a/trunk/drivers/base/power/qos.c +++ b/trunk/drivers/base/power/qos.c @@ -451,7 +451,7 @@ int dev_pm_qos_add_ancestor_request(struct device *dev, if (ancestor) error = dev_pm_qos_add_request(ancestor, req, value); - if (error) + if (error < 0) req->dev = NULL; return error; diff --git a/trunk/drivers/block/aoe/aoecmd.c b/trunk/drivers/block/aoe/aoecmd.c index 3804a0af3ef1..9fe4f1865558 100644 --- a/trunk/drivers/block/aoe/aoecmd.c +++ b/trunk/drivers/block/aoe/aoecmd.c @@ -935,7 +935,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail) /* cf. http://lkml.org/lkml/2006/10/31/28 */ if (!fastfail) - q->request_fn(q); + __blk_run_queue(q); } static void diff --git a/trunk/drivers/block/floppy.c b/trunk/drivers/block/floppy.c index 1c49d7173966..2ddd64a9ffde 100644 --- a/trunk/drivers/block/floppy.c +++ b/trunk/drivers/block/floppy.c @@ -4330,6 +4330,7 @@ static int __init do_floppy_init(void) out_unreg_blkdev: unregister_blkdev(FLOPPY_MAJOR, "fd"); out_put_disk: + destroy_workqueue(floppy_wq); for (drive = 0; drive < N_DRIVE; drive++) { if (!disks[drive]) break; @@ -4340,7 +4341,6 @@ static int __init do_floppy_init(void) } put_disk(disks[drive]); } - destroy_workqueue(floppy_wq); return err; } @@ -4555,6 +4555,8 @@ static void __exit floppy_module_exit(void) unregister_blkdev(FLOPPY_MAJOR, "fd"); platform_driver_unregister(&floppy_driver); + destroy_workqueue(floppy_wq); + for (drive = 0; drive < N_DRIVE; drive++) { del_timer_sync(&motor_off_timer[drive]); @@ -4578,7 +4580,6 @@ static void __exit floppy_module_exit(void) cancel_delayed_work_sync(&fd_timeout); cancel_delayed_work_sync(&fd_timer); - destroy_workqueue(floppy_wq); if (atomic_read(&usage_count)) floppy_release_irq_and_dma(); diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.c b/trunk/drivers/block/mtip32xx/mtip32xx.c index adc6f36564cf..9694dd99bbbc 100644 --- a/trunk/drivers/block/mtip32xx/mtip32xx.c +++ b/trunk/drivers/block/mtip32xx/mtip32xx.c @@ -559,7 +559,7 @@ static void mtip_timeout_function(unsigned long int data) struct mtip_cmd *command; int tag, cmdto_cnt = 0; unsigned int bit, group; - unsigned int num_command_slots = port->dd->slot_groups * 32; + unsigned int num_command_slots; unsigned long to, tagaccum[SLOTBITS_IN_LONGS]; if (unlikely(!port)) @@ -572,6 +572,7 @@ static void mtip_timeout_function(unsigned long int data) } /* clear the tag accumulator */ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); + num_command_slots = port->dd->slot_groups * 32; for (tag = 0; tag < num_command_slots; tag++) { /* @@ -2218,8 +2219,8 @@ static int exec_drive_taskfile(struct driver_data *dd, fis.device); /* check for erase mode support during secure erase.*/ - if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) - && (outbuf[0] & MTIP_SEC_ERASE_MODE)) { + if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf && + (outbuf[0] & MTIP_SEC_ERASE_MODE)) { erasemode = 1; } @@ -2439,7 +2440,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, * return value * None */ -static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, +static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector, int nsect, int nents, int tag, void *callback, void *data, int dir) { @@ -2447,6 +2448,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, struct mtip_port *port = dd->port; struct mtip_cmd *command = &port->commands[tag]; int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + u64 start = sector; /* Map the scatter list for DMA access */ nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); @@ -2465,8 +2467,12 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, fis->opts = 1 << 7; fis->command = (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE); - *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF); - *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF); + fis->lba_low = start & 0xFF; + fis->lba_mid = (start >> 8) & 0xFF; + fis->lba_hi = (start >> 16) & 0xFF; + fis->lba_low_ex = (start >> 24) & 0xFF; + fis->lba_mid_ex = (start >> 32) & 0xFF; + fis->lba_hi_ex = (start >> 40) & 0xFF; fis->device = 1 << 6; fis->features = nsect & 0xFF; fis->features_ex = (nsect >> 8) & 0xFF; diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.h b/trunk/drivers/block/mtip32xx/mtip32xx.h index 5f4a917bd8bb..b1742640556a 100644 --- a/trunk/drivers/block/mtip32xx/mtip32xx.h +++ b/trunk/drivers/block/mtip32xx/mtip32xx.h @@ -34,7 +34,7 @@ #define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48 /* check for erase mode support during secure erase */ -#define MTIP_SEC_ERASE_MODE 0x3 +#define MTIP_SEC_ERASE_MODE 0x2 /* # of times to retry timed out/failed IOs */ #define MTIP_MAX_RETRIES 2 @@ -155,14 +155,14 @@ enum { MTIP_DDF_REBUILD_FAILED_BIT = 8, }; -__packed struct smart_attr{ +struct smart_attr { u8 attr_id; u16 flags; u8 cur; u8 worst; u32 data; u8 res[3]; -}; +} __packed; /* Register Frame Information Structure (FIS), host to device. */ struct host_to_dev_fis { diff --git a/trunk/drivers/bluetooth/ath3k.c b/trunk/drivers/bluetooth/ath3k.c index fc2de5528dcc..b00000e8aef6 100644 --- a/trunk/drivers/bluetooth/ath3k.c +++ b/trunk/drivers/bluetooth/ath3k.c @@ -67,6 +67,7 @@ static struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x13d3, 0x3304) }, { USB_DEVICE(0x0930, 0x0215) }, { USB_DEVICE(0x0489, 0xE03D) }, + { USB_DEVICE(0x0489, 0xE027) }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03F0, 0x311D) }, diff --git a/trunk/drivers/bluetooth/btusb.c b/trunk/drivers/bluetooth/btusb.c index debda27df9b0..ee82f2fb65f0 100644 --- a/trunk/drivers/bluetooth/btusb.c +++ b/trunk/drivers/bluetooth/btusb.c @@ -124,6 +124,7 @@ static struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, diff --git a/trunk/drivers/bus/omap-ocp2scp.c b/trunk/drivers/bus/omap-ocp2scp.c index ff63560b8467..0c48b0e05ed6 100644 --- a/trunk/drivers/bus/omap-ocp2scp.c +++ b/trunk/drivers/bus/omap-ocp2scp.c @@ -22,6 +22,26 @@ #include #include #include +#include + +/** + * _count_resources - count for the number of resources + * @res: struct resource * + * + * Count and return the number of resources populated for the device that is + * connected to ocp2scp. + */ +static unsigned _count_resources(struct resource *res) +{ + int cnt = 0; + + while (res->start != res->end) { + cnt++; + res++; + } + + return cnt; +} static int ocp2scp_remove_devices(struct device *dev, void *c) { @@ -34,20 +54,62 @@ static int ocp2scp_remove_devices(struct device *dev, void *c) static int __devinit omap_ocp2scp_probe(struct platform_device *pdev) { - int ret; - struct device_node *np = pdev->dev.of_node; + int ret; + unsigned res_cnt, i; + struct device_node *np = pdev->dev.of_node; + struct platform_device *pdev_child; + struct omap_ocp2scp_platform_data *pdata = pdev->dev.platform_data; + struct omap_ocp2scp_dev *dev; if (np) { ret = of_platform_populate(np, NULL, NULL, &pdev->dev); if (ret) { - dev_err(&pdev->dev, "failed to add resources for ocp2scp child\n"); + dev_err(&pdev->dev, + "failed to add resources for ocp2scp child\n"); goto err0; } + } else if (pdata) { + for (i = 0, dev = *pdata->devices; i < pdata->dev_cnt; i++, + dev++) { + res_cnt = _count_resources(dev->res); + + pdev_child = platform_device_alloc(dev->drv_name, + PLATFORM_DEVID_AUTO); + if (!pdev_child) { + dev_err(&pdev->dev, + "failed to allocate mem for ocp2scp child\n"); + goto err0; + } + + ret = platform_device_add_resources(pdev_child, + dev->res, res_cnt); + if (ret) { + dev_err(&pdev->dev, + "failed to add resources for ocp2scp child\n"); + goto err1; + } + + pdev_child->dev.parent = &pdev->dev; + + ret = platform_device_add(pdev_child); + if (ret) { + dev_err(&pdev->dev, + "failed to register ocp2scp child device\n"); + goto err1; + } + } + } else { + dev_err(&pdev->dev, "OCP2SCP initialized without plat data\n"); + return -EINVAL; } + pm_runtime_enable(&pdev->dev); return 0; +err1: + platform_device_put(pdev_child); + err0: device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices); diff --git a/trunk/drivers/char/agp/intel-agp.h b/trunk/drivers/char/agp/intel-agp.h index 1042c1b90376..6ec0fff79bc2 100644 --- a/trunk/drivers/char/agp/intel-agp.h +++ b/trunk/drivers/char/agp/intel-agp.h @@ -62,6 +62,12 @@ #define I810_PTE_LOCAL 0x00000002 #define I810_PTE_VALID 0x00000001 #define I830_PTE_SYSTEM_CACHED 0x00000006 +/* GT PTE cache control fields */ +#define GEN6_PTE_UNCACHED 0x00000002 +#define HSW_PTE_UNCACHED 0x00000000 +#define GEN6_PTE_LLC 0x00000004 +#define GEN6_PTE_LLC_MLC 0x00000006 +#define GEN6_PTE_GFDT 0x00000008 #define I810_SMRAM_MISCC 0x70 #define I810_GFX_MEM_WIN_SIZE 0x00010000 @@ -91,6 +97,7 @@ #define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN) #define GFX_FLSH_CNTL 0x2170 /* 915+ */ +#define GFX_FLSH_CNTL_VLV 0x101008 #define I810_DRAM_CTL 0x3000 #define I810_DRAM_ROW_0 0x00000001 @@ -141,6 +148,29 @@ #define INTEL_I7505_AGPCTRL 0x70 #define INTEL_I7505_MCHCFG 0x50 +#define SNB_GMCH_CTRL 0x50 +#define SNB_GMCH_GMS_STOLEN_MASK 0xF8 +#define SNB_GMCH_GMS_STOLEN_32M (1 << 3) +#define SNB_GMCH_GMS_STOLEN_64M (2 << 3) +#define SNB_GMCH_GMS_STOLEN_96M (3 << 3) +#define SNB_GMCH_GMS_STOLEN_128M (4 << 3) +#define SNB_GMCH_GMS_STOLEN_160M (5 << 3) +#define SNB_GMCH_GMS_STOLEN_192M (6 << 3) +#define SNB_GMCH_GMS_STOLEN_224M (7 << 3) +#define SNB_GMCH_GMS_STOLEN_256M (8 << 3) +#define SNB_GMCH_GMS_STOLEN_288M (9 << 3) +#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) +#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) +#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) +#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) +#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) +#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) +#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) +#define SNB_GTT_SIZE_0M (0 << 8) +#define SNB_GTT_SIZE_1M (1 << 8) +#define SNB_GTT_SIZE_2M (2 << 8) +#define SNB_GTT_SIZE_MASK (3 << 8) + /* pci devices ids */ #define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a @@ -189,5 +219,66 @@ #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */ +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */ +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */ +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */ +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152 +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162 +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */ +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156 +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166 +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */ +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A +#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */ +#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30 +#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */ +#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402 +#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412 +#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422 +#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */ +#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406 +#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416 +#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426 +#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */ +#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a +#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a +#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a +#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A #endif diff --git a/trunk/drivers/char/agp/intel-gtt.c b/trunk/drivers/char/agp/intel-gtt.c index dbd901e94ea6..38390f7c6ab6 100644 --- a/trunk/drivers/char/agp/intel-gtt.c +++ b/trunk/drivers/char/agp/intel-gtt.c @@ -367,6 +367,62 @@ static unsigned int intel_gtt_stolen_size(void) stolen_size = 0; break; } + } else if (INTEL_GTT_GEN == 6) { + /* + * SandyBridge has new memory control reg at 0x50.w + */ + u16 snb_gmch_ctl; + pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); + switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { + case SNB_GMCH_GMS_STOLEN_32M: + stolen_size = MB(32); + break; + case SNB_GMCH_GMS_STOLEN_64M: + stolen_size = MB(64); + break; + case SNB_GMCH_GMS_STOLEN_96M: + stolen_size = MB(96); + break; + case SNB_GMCH_GMS_STOLEN_128M: + stolen_size = MB(128); + break; + case SNB_GMCH_GMS_STOLEN_160M: + stolen_size = MB(160); + break; + case SNB_GMCH_GMS_STOLEN_192M: + stolen_size = MB(192); + break; + case SNB_GMCH_GMS_STOLEN_224M: + stolen_size = MB(224); + break; + case SNB_GMCH_GMS_STOLEN_256M: + stolen_size = MB(256); + break; + case SNB_GMCH_GMS_STOLEN_288M: + stolen_size = MB(288); + break; + case SNB_GMCH_GMS_STOLEN_320M: + stolen_size = MB(320); + break; + case SNB_GMCH_GMS_STOLEN_352M: + stolen_size = MB(352); + break; + case SNB_GMCH_GMS_STOLEN_384M: + stolen_size = MB(384); + break; + case SNB_GMCH_GMS_STOLEN_416M: + stolen_size = MB(416); + break; + case SNB_GMCH_GMS_STOLEN_448M: + stolen_size = MB(448); + break; + case SNB_GMCH_GMS_STOLEN_480M: + stolen_size = MB(480); + break; + case SNB_GMCH_GMS_STOLEN_512M: + stolen_size = MB(512); + break; + } } else { switch (gmch_ctrl & I855_GMCH_GMS_MASK) { case I855_GMCH_GMS_STOLEN_1M: @@ -500,9 +556,29 @@ static unsigned int i965_gtt_total_entries(void) static unsigned int intel_gtt_total_entries(void) { + int size; + if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) return i965_gtt_total_entries(); - else { + else if (INTEL_GTT_GEN == 6) { + u16 snb_gmch_ctl; + + pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); + switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { + default: + case SNB_GTT_SIZE_0M: + printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); + size = MB(0); + break; + case SNB_GTT_SIZE_1M: + size = MB(1); + break; + case SNB_GTT_SIZE_2M: + size = MB(2); + break; + } + return size/4; + } else { /* On previous hardware, the GTT size was just what was * required to map the aperture. */ @@ -702,6 +778,9 @@ bool intel_enable_gtt(void) { u8 __iomem *reg; + if (INTEL_GTT_GEN >= 6) + return true; + if (INTEL_GTT_GEN == 2) { u16 gmch_ctrl; @@ -1070,6 +1149,85 @@ static void i965_write_entry(dma_addr_t addr, writel(addr | pte_flags, intel_private.gtt + entry); } +static bool gen6_check_flags(unsigned int flags) +{ + return true; +} + +static void haswell_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; + unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; + u32 pte_flags; + + if (type_mask == AGP_USER_MEMORY) + pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID; + else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { + pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } else { /* set 'normal'/'cached' to LLC by default */ + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } + + /* gen6 has bit11-4 for physical addr bit39-32 */ + addr |= (addr >> 28) & 0xff0; + writel(addr | pte_flags, intel_private.gtt + entry); +} + +static void gen6_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; + unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; + u32 pte_flags; + + if (type_mask == AGP_USER_MEMORY) + pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; + else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { + pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } else { /* set 'normal'/'cached' to LLC by default */ + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } + + /* gen6 has bit11-4 for physical addr bit39-32 */ + addr |= (addr >> 28) & 0xff0; + writel(addr | pte_flags, intel_private.gtt + entry); +} + +static void valleyview_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; + unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; + u32 pte_flags; + + if (type_mask == AGP_USER_MEMORY) + pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; + else { + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } + + /* gen6 has bit11-4 for physical addr bit39-32 */ + addr |= (addr >> 28) & 0xff0; + writel(addr | pte_flags, intel_private.gtt + entry); + + writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV); +} + +static void gen6_cleanup(void) +{ +} + /* Certain Gen5 chipsets require require idling the GPU before * unmapping anything from the GTT when VT-d is enabled. */ @@ -1091,29 +1249,41 @@ static inline int needs_idle_maps(void) static int i9xx_setup(void) { - u32 reg_addr, gtt_addr; + u32 reg_addr; int size = KB(512); pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr); reg_addr &= 0xfff80000; + if (INTEL_GTT_GEN >= 7) + size = MB(2); + intel_private.registers = ioremap(reg_addr, size); if (!intel_private.registers) return -ENOMEM; - switch (INTEL_GTT_GEN) { - case 3: + if (INTEL_GTT_GEN == 3) { + u32 gtt_addr; + pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, >t_addr); intel_private.gtt_bus_addr = gtt_addr; - break; - case 5: - intel_private.gtt_bus_addr = reg_addr + MB(2); - break; - default: - intel_private.gtt_bus_addr = reg_addr + KB(512); - break; + } else { + u32 gtt_offset; + + switch (INTEL_GTT_GEN) { + case 5: + case 6: + case 7: + gtt_offset = MB(2); + break; + case 4: + default: + gtt_offset = KB(512); + break; + } + intel_private.gtt_bus_addr = reg_addr + gtt_offset; } if (needs_idle_maps()) @@ -1225,6 +1395,32 @@ static const struct intel_gtt_driver ironlake_gtt_driver = { .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; +static const struct intel_gtt_driver sandybridge_gtt_driver = { + .gen = 6, + .setup = i9xx_setup, + .cleanup = gen6_cleanup, + .write_entry = gen6_write_entry, + .dma_mask_size = 40, + .check_flags = gen6_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver haswell_gtt_driver = { + .gen = 6, + .setup = i9xx_setup, + .cleanup = gen6_cleanup, + .write_entry = haswell_write_entry, + .dma_mask_size = 40, + .check_flags = gen6_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver valleyview_gtt_driver = { + .gen = 7, + .setup = i9xx_setup, + .cleanup = gen6_cleanup, + .write_entry = valleyview_write_entry, + .dma_mask_size = 40, + .check_flags = gen6_check_flags, +}; /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of * driver and gmch_driver must be non-null, and find_gmch will determine @@ -1305,6 +1501,106 @@ static const struct intel_gtt_driver_description { "HD Graphics", &ironlake_gtt_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, "HD Graphics", &ironlake_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, + "Sandybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, + "Sandybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, + "Sandybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, + "Sandybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, + "Sandybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, + "Sandybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, + "Sandybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG, + "Ivybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG, + "Ivybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG, + "Ivybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG, + "Ivybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG, + "Ivybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG, + "Ivybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG, + "ValleyView", &valleyview_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, { 0, NULL, NULL } }; @@ -1390,7 +1686,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, } EXPORT_SYMBOL(intel_gmch_probe); -struct intel_gtt *intel_gtt_get(void) +const struct intel_gtt *intel_gtt_get(void) { return &intel_private.base; } diff --git a/trunk/drivers/clk/ux500/u8500_clk.c b/trunk/drivers/clk/ux500/u8500_clk.c index ca4a25ed844c..e2c17d187d98 100644 --- a/trunk/drivers/clk/ux500/u8500_clk.c +++ b/trunk/drivers/clk/ux500/u8500_clk.c @@ -40,7 +40,7 @@ void u8500_clk_init(void) CLK_IS_ROOT|CLK_IGNORE_UNUSED, 32768); clk_register_clkdev(clk, "clk32k", NULL); - clk_register_clkdev(clk, NULL, "rtc-pl031"); + clk_register_clkdev(clk, "apb_pclk", "rtc-pl031"); /* PRCMU clocks */ fw_version = prcmu_get_fw_version(); @@ -228,10 +228,17 @@ void u8500_clk_init(void) clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", U8500_CLKRST1_BASE, BIT(2), 0); + clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1"); + clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", U8500_CLKRST1_BASE, BIT(3), 0); + clk_register_clkdev(clk, "apb_pclk", "msp0"); + clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.0"); + clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", U8500_CLKRST1_BASE, BIT(4), 0); + clk_register_clkdev(clk, "apb_pclk", "msp1"); + clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.1"); clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", U8500_CLKRST1_BASE, BIT(5), 0); @@ -239,6 +246,7 @@ void u8500_clk_init(void) clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", U8500_CLKRST1_BASE, BIT(6), 0); + clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2"); clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", U8500_CLKRST1_BASE, BIT(7), 0); @@ -246,6 +254,7 @@ void u8500_clk_init(void) clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", U8500_CLKRST1_BASE, BIT(8), 0); + clk_register_clkdev(clk, "apb_pclk", "slimbus0"); clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", U8500_CLKRST1_BASE, BIT(9), 0); @@ -255,11 +264,16 @@ void u8500_clk_init(void) clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", U8500_CLKRST1_BASE, BIT(10), 0); + clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4"); + clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", U8500_CLKRST1_BASE, BIT(11), 0); + clk_register_clkdev(clk, "apb_pclk", "msp3"); + clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.3"); clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", U8500_CLKRST2_BASE, BIT(0), 0); + clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3"); clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", U8500_CLKRST2_BASE, BIT(1), 0); @@ -279,12 +293,13 @@ void u8500_clk_init(void) clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", U8500_CLKRST2_BASE, BIT(5), 0); + clk_register_clkdev(clk, "apb_pclk", "msp2"); + clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.2"); clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", U8500_CLKRST2_BASE, BIT(6), 0); clk_register_clkdev(clk, "apb_pclk", "sdi1"); - clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", U8500_CLKRST2_BASE, BIT(7), 0); clk_register_clkdev(clk, "apb_pclk", "sdi3"); @@ -316,10 +331,15 @@ void u8500_clk_init(void) clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", U8500_CLKRST3_BASE, BIT(1), 0); + clk_register_clkdev(clk, "apb_pclk", "ssp0"); + clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", U8500_CLKRST3_BASE, BIT(2), 0); + clk_register_clkdev(clk, "apb_pclk", "ssp1"); + clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", U8500_CLKRST3_BASE, BIT(3), 0); + clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0"); clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", U8500_CLKRST3_BASE, BIT(4), 0); @@ -401,10 +421,17 @@ void u8500_clk_init(void) clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk", U8500_CLKRST1_BASE, BIT(2), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "nmk-i2c.1"); + clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk", U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "msp0"); + clk_register_clkdev(clk, NULL, "ux500-msp-i2s.0"); + clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk", U8500_CLKRST1_BASE, BIT(4), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "msp1"); + clk_register_clkdev(clk, NULL, "ux500-msp-i2s.1"); clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk", U8500_CLKRST1_BASE, BIT(5), CLK_SET_RATE_GATE); @@ -412,17 +439,25 @@ void u8500_clk_init(void) clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk", U8500_CLKRST1_BASE, BIT(6), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "nmk-i2c.2"); + clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk", - U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE); - /* FIXME: Redefinition of BIT(3). */ + U8500_CLKRST1_BASE, BIT(8), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "slimbus0"); + clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk", U8500_CLKRST1_BASE, BIT(9), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "nmk-i2c.4"); + clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk", U8500_CLKRST1_BASE, BIT(10), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "msp3"); + clk_register_clkdev(clk, NULL, "ux500-msp-i2s.3"); /* Periph2 */ clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk", U8500_CLKRST2_BASE, BIT(0), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "nmk-i2c.3"); clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk", U8500_CLKRST2_BASE, BIT(2), CLK_SET_RATE_GATE); @@ -430,6 +465,8 @@ void u8500_clk_init(void) clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk", U8500_CLKRST2_BASE, BIT(3), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "msp2"); + clk_register_clkdev(clk, NULL, "ux500-msp-i2s.2"); clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk", U8500_CLKRST2_BASE, BIT(4), CLK_SET_RATE_GATE); @@ -450,10 +487,15 @@ void u8500_clk_init(void) /* Periph3 */ clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk", U8500_CLKRST3_BASE, BIT(1), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "ssp0"); + clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk", U8500_CLKRST3_BASE, BIT(2), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "ssp1"); + clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk", U8500_CLKRST3_BASE, BIT(3), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "nmk-i2c.0"); clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk", U8500_CLKRST3_BASE, BIT(4), CLK_SET_RATE_GATE); diff --git a/trunk/drivers/edac/amd64_edac.h b/trunk/drivers/edac/amd64_edac.h index 8d4804732bac..8c4139647efc 100644 --- a/trunk/drivers/edac/amd64_edac.h +++ b/trunk/drivers/edac/amd64_edac.h @@ -33,7 +33,7 @@ * detection. The mods to Rev F required more family * information detection. * - * Changes/Fixes by Borislav Petkov : + * Changes/Fixes by Borislav Petkov : * - misc fixes and code cleanups * * This module is based on the following documents diff --git a/trunk/drivers/edac/edac_stub.c b/trunk/drivers/edac/edac_stub.c index 6c86f6e54558..351945fa2ecd 100644 --- a/trunk/drivers/edac/edac_stub.c +++ b/trunk/drivers/edac/edac_stub.c @@ -5,7 +5,7 @@ * * 2007 (c) MontaVista Software, Inc. * 2010 (c) Advanced Micro Devices Inc. - * Borislav Petkov + * Borislav Petkov * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/trunk/drivers/edac/mce_amd_inj.c b/trunk/drivers/edac/mce_amd_inj.c index 66b5151c1080..2ae78f20cc28 100644 --- a/trunk/drivers/edac/mce_amd_inj.c +++ b/trunk/drivers/edac/mce_amd_inj.c @@ -6,7 +6,7 @@ * This file may be distributed under the terms of the GNU General Public * License version 2. * - * Copyright (c) 2010: Borislav Petkov + * Copyright (c) 2010: Borislav Petkov * Advanced Micro Devices Inc. */ @@ -168,6 +168,6 @@ module_init(edac_init_mce_inject); module_exit(edac_exit_mce_inject); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Borislav Petkov "); +MODULE_AUTHOR("Borislav Petkov "); MODULE_AUTHOR("AMD Inc."); MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding"); diff --git a/trunk/drivers/firewire/sbp2.c b/trunk/drivers/firewire/sbp2.c index 1162d6b3bf85..bb1b392f5cda 100644 --- a/trunk/drivers/firewire/sbp2.c +++ b/trunk/drivers/firewire/sbp2.c @@ -1546,6 +1546,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) struct sbp2_logical_unit *lu = sdev->hostdata; sdev->use_10_for_rw = 1; + sdev->no_report_opcodes = 1; + sdev->no_write_same = 1; if (sbp2_param_exclusive_login) sdev->manage_start_stop = 1; diff --git a/trunk/drivers/gpio/Kconfig b/trunk/drivers/gpio/Kconfig index d055cee36942..47150f5ded04 100644 --- a/trunk/drivers/gpio/Kconfig +++ b/trunk/drivers/gpio/Kconfig @@ -47,7 +47,7 @@ if GPIOLIB config OF_GPIO def_bool y - depends on OF && !SPARC + depends on OF config DEBUG_GPIO bool "Debug GPIO calls" @@ -466,7 +466,7 @@ config GPIO_ADP5588_IRQ config GPIO_ADNP tristate "Avionic Design N-bit GPIO expander" - depends on I2C && OF + depends on I2C && OF_GPIO help This option enables support for N GPIOs found on Avionic Design I2C GPIO expanders. The register space will be extended by powers diff --git a/trunk/drivers/gpio/gpio-mcp23s08.c b/trunk/drivers/gpio/gpio-mcp23s08.c index 0f425189de11..ce1c84760076 100644 --- a/trunk/drivers/gpio/gpio-mcp23s08.c +++ b/trunk/drivers/gpio/gpio-mcp23s08.c @@ -77,7 +77,7 @@ struct mcp23s08_driver_data { /*----------------------------------------------------------------------*/ -#ifdef CONFIG_I2C +#if IS_ENABLED(CONFIG_I2C) static int mcp23008_read(struct mcp23s08 *mcp, unsigned reg) { @@ -399,7 +399,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, break; #endif /* CONFIG_SPI_MASTER */ -#ifdef CONFIG_I2C +#if IS_ENABLED(CONFIG_I2C) case MCP_TYPE_008: mcp->ops = &mcp23008_ops; mcp->chip.ngpio = 8; @@ -473,7 +473,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, /*----------------------------------------------------------------------*/ -#ifdef CONFIG_I2C +#if IS_ENABLED(CONFIG_I2C) static int __devinit mcp230xx_probe(struct i2c_client *client, const struct i2c_device_id *id) diff --git a/trunk/drivers/gpio/gpio-mvebu.c b/trunk/drivers/gpio/gpio-mvebu.c index cf7afb9eb61a..be65c0451ad5 100644 --- a/trunk/drivers/gpio/gpio-mvebu.c +++ b/trunk/drivers/gpio/gpio-mvebu.c @@ -92,6 +92,11 @@ static inline void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip) return mvchip->membase + GPIO_OUT_OFF; } +static inline void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip) +{ + return mvchip->membase + GPIO_BLINK_EN_OFF; +} + static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip) { return mvchip->membase + GPIO_IO_CONF_OFF; @@ -206,6 +211,23 @@ static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin) return (u >> pin) & 1; } +static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned pin, int value) +{ + struct mvebu_gpio_chip *mvchip = + container_of(chip, struct mvebu_gpio_chip, chip); + unsigned long flags; + u32 u; + + spin_lock_irqsave(&mvchip->lock, flags); + u = readl_relaxed(mvebu_gpioreg_blink(mvchip)); + if (value) + u |= 1 << pin; + else + u &= ~(1 << pin); + writel_relaxed(u, mvebu_gpioreg_blink(mvchip)); + spin_unlock_irqrestore(&mvchip->lock, flags); +} + static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin) { struct mvebu_gpio_chip *mvchip = @@ -244,6 +266,7 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin, if (ret) return ret; + mvebu_gpio_blink(chip, pin, 0); mvebu_gpio_set(chip, pin, value); spin_lock_irqsave(&mvchip->lock, flags); diff --git a/trunk/drivers/gpu/drm/Kconfig b/trunk/drivers/gpu/drm/Kconfig index 983201b450f1..18321b68b880 100644 --- a/trunk/drivers/gpu/drm/Kconfig +++ b/trunk/drivers/gpu/drm/Kconfig @@ -210,5 +210,3 @@ source "drivers/gpu/drm/mgag200/Kconfig" source "drivers/gpu/drm/cirrus/Kconfig" source "drivers/gpu/drm/shmobile/Kconfig" - -source "drivers/gpu/drm/tegra/Kconfig" diff --git a/trunk/drivers/gpu/drm/Makefile b/trunk/drivers/gpu/drm/Makefile index 6f58c81cfcbc..2ff5cefe9ead 100644 --- a/trunk/drivers/gpu/drm/Makefile +++ b/trunk/drivers/gpu/drm/Makefile @@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ drm_context.o drm_dma.o \ drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ - drm_agpsupport.o drm_scatter.o drm_pci.o \ + drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ drm_crtc.o drm_modes.o drm_edid.o \ drm_info.o drm_debugfs.o drm_encoder_slave.o \ @@ -16,11 +16,10 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o -drm-$(CONFIG_PCI) += ati_pcigart.o drm-usb-y := drm_usb.o -drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o +drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o @@ -49,5 +48,4 @@ obj-$(CONFIG_DRM_GMA500) += gma500/ obj-$(CONFIG_DRM_UDL) += udl/ obj-$(CONFIG_DRM_AST) += ast/ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ -obj-$(CONFIG_DRM_TEGRA) += tegra/ obj-y += i2c/ diff --git a/trunk/drivers/gpu/drm/ast/ast_ttm.c b/trunk/drivers/gpu/drm/ast/ast_ttm.c index 0a54f65a8ebb..1a026ac2dfb4 100644 --- a/trunk/drivers/gpu/drm/ast/ast_ttm.c +++ b/trunk/drivers/gpu/drm/ast/ast_ttm.c @@ -356,7 +356,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align, ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size, ttm_bo_type_device, &astbo->placement, - align >> PAGE_SHIFT, false, NULL, acc_size, + align >> PAGE_SHIFT, 0, false, NULL, acc_size, NULL, ast_bo_ttm_destroy); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c index dcd1a8c029eb..101e423c8991 100644 --- a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -35,15 +35,12 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { }; -static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev) +static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev) { struct apertures_struct *ap; bool primary = false; ap = alloc_apertures(1); - if (!ap) - return -ENOMEM; - ap->ranges[0].base = pci_resource_start(pdev, 0); ap->ranges[0].size = pci_resource_len(pdev, 0); @@ -52,18 +49,12 @@ static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev) #endif remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary); kfree(ap); - - return 0; } static int __devinit cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - int ret; - - ret = cirrus_kick_out_firmware_fb(pdev); - if (ret) - return ret; + cirrus_kick_out_firmware_fb(pdev); return drm_get_pci_dev(pdev, ent, &driver); } diff --git a/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c b/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c index 90d770143cc2..bc83f835c830 100644 --- a/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -361,7 +361,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align, ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size, ttm_bo_type_device, &cirrusbo->placement, - align >> PAGE_SHIFT, false, NULL, acc_size, + align >> PAGE_SHIFT, 0, false, NULL, acc_size, NULL, cirrus_bo_ttm_destroy); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c index f2d667b8bee2..ef1b22144d37 100644 --- a/trunk/drivers/gpu/drm/drm_crtc.c +++ b/trunk/drivers/gpu/drm/drm_crtc.c @@ -470,8 +470,10 @@ void drm_crtc_cleanup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - kfree(crtc->gamma_store); - crtc->gamma_store = NULL; + if (crtc->gamma_store) { + kfree(crtc->gamma_store); + crtc->gamma_store = NULL; + } drm_mode_object_put(dev, &crtc->base); list_del(&crtc->head); @@ -553,17 +555,16 @@ int drm_connector_init(struct drm_device *dev, INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->modes); connector->edid_blob_ptr = NULL; - connector->status = connector_status_unknown; list_add_tail(&connector->head, &dev->mode_config.connector_list); dev->mode_config.num_connector++; if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL) - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.edid_property, 0); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.dpms_property, 0); out: @@ -2279,21 +2280,13 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) for (i = 0; i < num_planes; i++) { unsigned int width = r->width / (i != 0 ? hsub : 1); - unsigned int height = r->height / (i != 0 ? vsub : 1); - unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i); if (!r->handles[i]) { DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i); return -EINVAL; } - if ((uint64_t) width * cpp > UINT_MAX) - return -ERANGE; - - if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX) - return -ERANGE; - - if (r->pitches[i] < width * cpp) { + if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) { DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i); return -EINVAL; } @@ -2330,11 +2323,6 @@ int drm_mode_addfb2(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - if (r->flags & ~DRM_MODE_FB_INTERLACED) { - DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); - return -EINVAL; - } - if ((config->min_width > r->width) || (r->width > config->max_width)) { DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", r->width, config->min_width, config->max_width); @@ -2928,6 +2916,27 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property) } EXPORT_SYMBOL(drm_property_destroy); +void drm_connector_attach_property(struct drm_connector *connector, + struct drm_property *property, uint64_t init_val) +{ + drm_object_attach_property(&connector->base, property, init_val); +} +EXPORT_SYMBOL(drm_connector_attach_property); + +int drm_connector_property_set_value(struct drm_connector *connector, + struct drm_property *property, uint64_t value) +{ + return drm_object_property_set_value(&connector->base, property, value); +} +EXPORT_SYMBOL(drm_connector_property_set_value); + +int drm_connector_property_get_value(struct drm_connector *connector, + struct drm_property *property, uint64_t *val) +{ + return drm_object_property_get_value(&connector->base, property, val); +} +EXPORT_SYMBOL(drm_connector_property_get_value); + void drm_object_attach_property(struct drm_mode_object *obj, struct drm_property *property, uint64_t init_val) @@ -3164,17 +3173,15 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, /* Delete edid, when there is none. */ if (!edid) { connector->edid_blob_ptr = NULL; - ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0); + ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0); return ret; } size = EDID_LENGTH * (1 + edid->extensions); connector->edid_blob_ptr = drm_property_create_blob(connector->dev, size, edid); - if (!connector->edid_blob_ptr) - return -EINVAL; - ret = drm_object_property_set_value(&connector->base, + ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, connector->edid_blob_ptr->base.id); @@ -3197,9 +3204,6 @@ static bool drm_property_change_is_valid(struct drm_property *property, for (i = 0; i < property->num_values; i++) valid_mask |= (1ULL << property->values[i]); return !(value & ~valid_mask); - } else if (property->flags & DRM_MODE_PROP_BLOB) { - /* Only the driver knows */ - return true; } else { int i; for (i = 0; i < property->num_values; i++) @@ -3241,7 +3245,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj, /* store the property value if successful */ if (!ret) - drm_object_property_set_value(&connector->base, property, value); + drm_connector_property_set_value(connector, property, value); return ret; } @@ -3652,12 +3656,9 @@ void drm_mode_config_reset(struct drm_device *dev) if (encoder->funcs->reset) encoder->funcs->reset(encoder); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - connector->status = connector_status_unknown; - + list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->funcs->reset) connector->funcs->reset(connector); - } } EXPORT_SYMBOL(drm_mode_config_reset); diff --git a/trunk/drivers/gpu/drm/drm_crtc_helper.c b/trunk/drivers/gpu/drm/drm_crtc_helper.c index 7b2d378b2576..1227adf74dbc 100644 --- a/trunk/drivers/gpu/drm/drm_crtc_helper.c +++ b/trunk/drivers/gpu/drm/drm_crtc_helper.c @@ -39,35 +39,6 @@ #include #include -/** - * drm_helper_move_panel_connectors_to_head() - move panels to the front in the - * connector list - * @dev: drm device to operate on - * - * Some userspace presumes that the first connected connector is the main - * display, where it's supposed to display e.g. the login screen. For - * laptops, this should be the main panel. Use this function to sort all - * (eDP/LVDS) panels to the front of the connector list, instead of - * painstakingly trying to initialize them in the right order. - */ -void drm_helper_move_panel_connectors_to_head(struct drm_device *dev) -{ - struct drm_connector *connector, *tmp; - struct list_head panel_list; - - INIT_LIST_HEAD(&panel_list); - - list_for_each_entry_safe(connector, tmp, - &dev->mode_config.connector_list, head) { - if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS || - connector->connector_type == DRM_MODE_CONNECTOR_eDP) - list_move_tail(&connector->head, &panel_list); - } - - list_splice(&panel_list, &dev->mode_config.connector_list); -} -EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head); - static bool drm_kms_helper_poll = true; module_param_named(poll, drm_kms_helper_poll, bool, 0600); @@ -93,21 +64,22 @@ static void drm_mode_validate_flag(struct drm_connector *connector, /** * drm_helper_probe_single_connector_modes - get complete set of display modes - * @connector: connector to probe + * @dev: DRM device * @maxX: max width for modes * @maxY: max height for modes * * LOCKING: * Caller must hold mode config lock. * - * Based on the helper callbacks implemented by @connector try to detect all - * valid modes. Modes will first be added to the connector's probed_modes list, - * then culled (based on validity and the @maxX, @maxY parameters) and put into - * the normal modes list. + * Based on @dev's mode_config layout, scan all the connectors and try to detect + * modes on them. Modes will first be added to the connector's probed_modes + * list, then culled (based on validity and the @maxX, @maxY parameters) and + * put into the normal modes list. * - * Intended to be use as a generic implementation of the ->probe() @connector - * callback for drivers that use the crtc helpers for output mode filtering and - * detection. + * Intended to be used either at bootup time or when major configuration + * changes have occurred. + * + * FIXME: take into account monitor limits * * RETURNS: * Number of modes found on @connector. @@ -137,13 +109,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, connector->funcs->force(connector); } else { connector->status = connector->funcs->detect(connector, true); - } - - /* Re-enable polling in case the global poll config changed. */ - if (drm_kms_helper_poll != dev->mode_config.poll_running) drm_kms_helper_poll_enable(dev); - - dev->mode_config.poll_running = drm_kms_helper_poll; + } if (connector->status == connector_status_disconnected) { DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", @@ -358,24 +325,17 @@ drm_crtc_prepare_encoders(struct drm_device *dev) } /** - * drm_crtc_helper_set_mode - internal helper to set a mode + * drm_crtc_set_mode - set a mode * @crtc: CRTC to program * @mode: mode to use - * @x: horizontal offset into the surface - * @y: vertical offset into the surface - * @old_fb: old framebuffer, for cleanup + * @x: width of mode + * @y: height of mode * * LOCKING: * Caller must hold mode config lock. * * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance - * to fixup or reject the mode prior to trying to set it. This is an internal - * helper that drivers could e.g. use to update properties that require the - * entire output pipe to be disabled and re-enabled in a new configuration. For - * example for changing whether audio is enabled on a hdmi link or for changing - * panel fitter or dither attributes. It is also called by the - * drm_crtc_helper_set_config() helper function to drive the mode setting - * sequence. + * to fixup or reject the mode prior to trying to set it. * * RETURNS: * True if the mode was set successfully, or false otherwise. @@ -531,19 +491,20 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) /** * drm_crtc_helper_set_config - set a new config from userspace - * @set: mode set configuration + * @crtc: CRTC to setup + * @crtc_info: user provided configuration + * @new_mode: new mode to set + * @connector_set: set of connectors for the new config + * @fb: new framebuffer * * LOCKING: * Caller must hold mode config lock. * - * Setup a new configuration, provided by the upper layers (either an ioctl call - * from userspace or internally e.g. from the fbdev suppport code) in @set, and - * enable it. This is the main helper functions for drivers that implement - * kernel mode setting with the crtc helper functions and the assorted - * ->prepare(), ->modeset() and ->commit() helper callbacks. + * Setup a new configuration, provided by the user in @crtc_info, and enable + * it. * * RETURNS: - * Returns 0 on success, -ERRNO on failure. + * Zero. (FIXME) */ int drm_crtc_helper_set_config(struct drm_mode_set *set) { @@ -839,14 +800,12 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) } /** - * drm_helper_connector_dpms() - connector dpms helper implementation - * @connector: affected connector - * @mode: DPMS mode + * drm_helper_connector_dpms + * @connector affected connector + * @mode DPMS mode * - * This is the main helper function provided by the crtc helper framework for - * implementing the DPMS connector attribute. It computes the new desired DPMS - * state for all encoders and crtcs in the output mesh and calls the ->dpms() - * callback provided by the driver appropriately. + * Calls the low-level connector DPMS function, then + * calls appropriate encoder and crtc DPMS functions as well */ void drm_helper_connector_dpms(struct drm_connector *connector, int mode) { @@ -959,15 +918,6 @@ int drm_helper_resume_force_mode(struct drm_device *dev) } EXPORT_SYMBOL(drm_helper_resume_force_mode); -void drm_kms_helper_hotplug_event(struct drm_device *dev) -{ - /* send a uevent + call fbdev */ - drm_sysfs_hotplug_event(dev); - if (dev->mode_config.funcs->output_poll_changed) - dev->mode_config.funcs->output_poll_changed(dev); -} -EXPORT_SYMBOL(drm_kms_helper_hotplug_event); - #define DRM_OUTPUT_POLL_PERIOD (10*HZ) static void output_poll_execute(struct work_struct *work) { @@ -983,22 +933,20 @@ static void output_poll_execute(struct work_struct *work) mutex_lock(&dev->mode_config.mutex); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - /* Ignore forced connectors. */ - if (connector->force) + /* if this is HPD or polled don't check it - + TV out for instance */ + if (!connector->polled) continue; - /* Ignore HPD capable connectors and connectors where we don't - * want any hotplug detection at all for polling. */ - if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD) - continue; - - repoll = true; + else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT)) + repoll = true; old_status = connector->status; /* if we are connected and don't want to poll for disconnect skip it */ if (old_status == connector_status_connected && - !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT)) + !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) && + !(connector->polled & DRM_CONNECTOR_POLL_HPD)) continue; connector->status = connector->funcs->detect(connector, false); @@ -1012,8 +960,12 @@ static void output_poll_execute(struct work_struct *work) mutex_unlock(&dev->mode_config.mutex); - if (changed) - drm_kms_helper_hotplug_event(dev); + if (changed) { + /* send a uevent + call fbdev */ + drm_sysfs_hotplug_event(dev); + if (dev->mode_config.funcs->output_poll_changed) + dev->mode_config.funcs->output_poll_changed(dev); + } if (repoll) schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); @@ -1036,8 +988,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev) return; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | - DRM_CONNECTOR_POLL_DISCONNECT)) + if (connector->polled) poll = true; } @@ -1063,34 +1014,12 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini); void drm_helper_hpd_irq_event(struct drm_device *dev) { - struct drm_connector *connector; - enum drm_connector_status old_status; - bool changed = false; - if (!dev->mode_config.poll_enabled) return; - mutex_lock(&dev->mode_config.mutex); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - - /* Only handle HPD capable connectors. */ - if (!(connector->polled & DRM_CONNECTOR_POLL_HPD)) - continue; - - old_status = connector->status; - - connector->status = connector->funcs->detect(connector, false); - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", - connector->base.id, - drm_get_connector_name(connector), - old_status, connector->status); - if (old_status != connector->status) - changed = true; - } - - mutex_unlock(&dev->mode_config.mutex); - - if (changed) - drm_kms_helper_hotplug_event(dev); + /* kill timer and schedule immediate execution, this doesn't block */ + cancel_delayed_work(&dev->mode_config.output_poll_work); + if (drm_kms_helper_poll) + schedule_delayed_work(&dev->mode_config.output_poll_work, 0); } EXPORT_SYMBOL(drm_helper_hpd_irq_event); diff --git a/trunk/drivers/gpu/drm/drm_dp_helper.c b/trunk/drivers/gpu/drm/drm_dp_i2c_helper.c similarity index 58% rename from trunk/drivers/gpu/drm/drm_dp_helper.c rename to trunk/drivers/gpu/drm/drm_dp_i2c_helper.c index 89e196627160..7f246f212457 100644 --- a/trunk/drivers/gpu/drm/drm_dp_helper.c +++ b/trunk/drivers/gpu/drm/drm_dp_i2c_helper.c @@ -30,15 +30,6 @@ #include #include -/** - * DOC: dp helpers - * - * These functions contain some common logic and helpers at various abstraction - * levels to deal with Display Port sink devices and related things like DP aux - * channel transfers, EDID reading over DP aux channels, decoding certain DPCD - * blocks, ... - */ - /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ static int i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, @@ -46,7 +37,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, { struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; int ret; - + ret = (*algo_data->aux_ch)(adapter, mode, write_byte, read_byte); return ret; @@ -191,6 +182,7 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter) { (void) i2c_algo_dp_aux_address(adapter, 0, false); (void) i2c_algo_dp_aux_stop(adapter, false); + } static int @@ -202,23 +194,11 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter) return 0; } -/** - * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper - * @adapter: i2c adapter to register - * - * This registers an i2c adapater that uses dp aux channel as it's underlaying - * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure - * and store it in the algo_data member of the @adapter argument. This will be - * used by the i2c over dp aux algorithm to drive the hardware. - * - * RETURNS: - * 0 on success, -ERRNO on failure. - */ int i2c_dp_aux_add_bus(struct i2c_adapter *adapter) { int error; - + error = i2c_dp_aux_prepare_bus(adapter); if (error) return error; @@ -226,123 +206,3 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter) return error; } EXPORT_SYMBOL(i2c_dp_aux_add_bus); - -/* Helpers for DP link training */ -static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) -{ - return link_status[r - DP_LANE0_1_STATUS]; -} - -static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], - int lane) -{ - int i = DP_LANE0_1_STATUS + (lane >> 1); - int s = (lane & 1) * 4; - u8 l = dp_link_status(link_status, i); - return (l >> s) & 0xf; -} - -bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], - int lane_count) -{ - u8 lane_align; - u8 lane_status; - int lane; - - lane_align = dp_link_status(link_status, - DP_LANE_ALIGN_STATUS_UPDATED); - if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) - return false; - for (lane = 0; lane < lane_count; lane++) { - lane_status = dp_get_lane_status(link_status, lane); - if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS) - return false; - } - return true; -} -EXPORT_SYMBOL(drm_dp_channel_eq_ok); - -bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], - int lane_count) -{ - int lane; - u8 lane_status; - - for (lane = 0; lane < lane_count; lane++) { - lane_status = dp_get_lane_status(link_status, lane); - if ((lane_status & DP_LANE_CR_DONE) == 0) - return false; - } - return true; -} -EXPORT_SYMBOL(drm_dp_clock_recovery_ok); - -u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], - int lane) -{ - int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); - int s = ((lane & 1) ? - DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : - DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); - u8 l = dp_link_status(link_status, i); - - return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; -} -EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage); - -u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], - int lane) -{ - int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); - int s = ((lane & 1) ? - DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : - DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); - u8 l = dp_link_status(link_status, i); - - return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; -} -EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis); - -void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { - if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) - udelay(100); - else - mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4); -} -EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); - -void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { - if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) - udelay(400); - else - mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4); -} -EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay); - -u8 drm_dp_link_rate_to_bw_code(int link_rate) -{ - switch (link_rate) { - case 162000: - default: - return DP_LINK_BW_1_62; - case 270000: - return DP_LINK_BW_2_7; - case 540000: - return DP_LINK_BW_5_4; - } -} -EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code); - -int drm_dp_bw_code_to_link_rate(u8 link_bw) -{ - switch (link_bw) { - case DP_LINK_BW_1_62: - default: - return 162000; - case DP_LINK_BW_2_7: - return 270000; - case DP_LINK_BW_5_4: - return 540000; - } -} -EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate); diff --git a/trunk/drivers/gpu/drm/drm_edid.c b/trunk/drivers/gpu/drm/drm_edid.c index 484c36a4b7a5..fadcd44ff196 100644 --- a/trunk/drivers/gpu/drm/drm_edid.c +++ b/trunk/drivers/gpu/drm/drm_edid.c @@ -307,9 +307,12 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, static bool drm_edid_is_zero(u8 *in_edid, int length) { - if (memchr_inv(in_edid, 0, length)) - return false; + int i; + u32 *raw_edid = (u32 *)in_edid; + for (i = 0; i < length / 4; i++) + if (*(raw_edid + i) != 0) + return false; return true; } @@ -1513,26 +1516,6 @@ u8 *drm_find_cea_extension(struct edid *edid) } EXPORT_SYMBOL(drm_find_cea_extension); -/* - * Looks for a CEA mode matching given drm_display_mode. - * Returns its CEA Video ID code, or 0 if not found. - */ -u8 drm_match_cea_mode(struct drm_display_mode *to_match) -{ - struct drm_display_mode *cea_mode; - u8 mode; - - for (mode = 0; mode < drm_num_cea_modes; mode++) { - cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode]; - - if (drm_mode_equal(to_match, cea_mode)) - return mode + 1; - } - return 0; -} -EXPORT_SYMBOL(drm_match_cea_mode); - - static int do_cea_modes (struct drm_connector *connector, u8 *db, u8 len) { @@ -1639,7 +1622,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db) if (len >= 12) connector->audio_latency[1] = db[12]; - DRM_DEBUG_KMS("HDMI: DVI dual %d, " + DRM_LOG_KMS("HDMI: DVI dual %d, " "max TMDS clock %d, " "latency present %d %d, " "video latency %d %d, " diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c index 954d175bd7fa..4d58d7e6af3f 100644 --- a/trunk/drivers/gpu/drm/drm_fb_helper.c +++ b/trunk/drivers/gpu/drm/drm_fb_helper.c @@ -27,8 +27,6 @@ * Dave Airlie * Jesse Barnes */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -45,15 +43,6 @@ MODULE_LICENSE("GPL and additional rights"); static LIST_HEAD(kernel_fb_helper_list); -/** - * DOC: fbdev helpers - * - * The fb helper functions are useful to provide an fbdev on top of a drm kernel - * mode setting driver. They can be used mostly independantely from the crtc - * helper functions used by many drivers to implement the kernel mode setting - * interfaces. - */ - /* simple single crtc case helper function */ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) { @@ -106,16 +95,10 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) if (mode->force) { const char *s; switch (mode->force) { - case DRM_FORCE_OFF: - s = "OFF"; - break; - case DRM_FORCE_ON_DIGITAL: - s = "ON - dig"; - break; + case DRM_FORCE_OFF: s = "OFF"; break; + case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break; default: - case DRM_FORCE_ON: - s = "ON"; - break; + case DRM_FORCE_ON: s = "ON"; break; } DRM_INFO("forcing %s connector %s\n", @@ -282,7 +265,7 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, if (panic_timeout < 0) return 0; - pr_err("panic occurred, switching back to text console\n"); + printk(KERN_ERR "panic occurred, switching back to text console\n"); return drm_fb_helper_force_kernel_mode(); } EXPORT_SYMBOL(drm_fb_helper_panic); @@ -348,7 +331,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) for (j = 0; j < fb_helper->connector_count; j++) { connector = fb_helper->connector_info[j]->connector; connector->funcs->dpms(connector, dpms_mode); - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, dev->mode_config.dpms_property, dpms_mode); } } @@ -450,7 +433,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) if (!list_empty(&fb_helper->kernel_fb_list)) { list_del(&fb_helper->kernel_fb_list); if (list_empty(&kernel_fb_helper_list)) { - pr_info("drm: unregistered panic notifier\n"); + printk(KERN_INFO "drm: unregistered panic notifier\n"); atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); @@ -741,9 +724,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, /* if driver picks 8 or 16 by default use that for both depth/bpp */ - if (preferred_bpp != sizes.surface_bpp) + if (preferred_bpp != sizes.surface_bpp) { sizes.surface_depth = sizes.surface_bpp = preferred_bpp; - + } /* first up get a count of crtcs now in use and new min/maxes width/heights */ for (i = 0; i < fb_helper->connector_count; i++) { struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; @@ -811,16 +794,18 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, info = fb_helper->fbdev; /* set the fb pointer */ - for (i = 0; i < fb_helper->crtc_count; i++) + for (i = 0; i < fb_helper->crtc_count; i++) { fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb; + } if (new_fb) { info->var.pixclock = 0; - if (register_framebuffer(info) < 0) + if (register_framebuffer(info) < 0) { return -EINVAL; + } - dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n", - info->node, info->fix.id); + printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, + info->fix.id); } else { drm_fb_helper_set_par(info); @@ -829,7 +814,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, /* Switch back to kernel console on panic */ /* multi card linked list maybe */ if (list_empty(&kernel_fb_helper_list)) { - dev_info(fb_helper->dev->dev, "registered panic notifier\n"); + printk(KERN_INFO "drm: registered panic notifier\n"); atomic_notifier_chain_register(&panic_notifier_list, &paniced); register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); @@ -1017,11 +1002,11 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict) { bool enable; - if (strict) + if (strict) { enable = connector->status == connector_status_connected; - else + } else { enable = connector->status != connector_status_disconnected; - + } return enable; } @@ -1206,8 +1191,9 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, for (c = 0; c < fb_helper->crtc_count; c++) { crtc = &fb_helper->crtc_info[c]; - if ((encoder->possible_crtcs & (1 << c)) == 0) + if ((encoder->possible_crtcs & (1 << c)) == 0) { continue; + } for (o = 0; o < n; o++) if (best_crtcs[o] == crtc) @@ -1260,11 +1246,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) sizeof(struct drm_display_mode *), GFP_KERNEL); enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), GFP_KERNEL); - if (!crtcs || !modes || !enabled) { - DRM_ERROR("Memory allocation failed\n"); - goto out; - } - drm_enable_connectors(fb_helper, enabled); @@ -1303,7 +1284,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) } } -out: kfree(crtcs); kfree(modes); kfree(enabled); @@ -1311,14 +1291,12 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) /** * drm_helper_initial_config - setup a sane initial connector configuration - * @fb_helper: fb_helper device struct - * @bpp_sel: bpp value to use for the framebuffer configuration + * @dev: DRM device * * LOCKING: - * Called at init time by the driver to set up the @fb_helper initial - * configuration, must take the mode config lock. + * Called at init time, must take mode config lock. * - * Scans the CRTCs and connectors and tries to put together an initial setup. + * Scan the CRTCs and connectors and try to put together an initial setup. * At the moment, this is a cloned configuration across all heads with * a new framebuffer object as the backing store. * @@ -1341,9 +1319,9 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) /* * we shouldn't end up with no modes here. */ - if (count == 0) - dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n"); - + if (count == 0) { + printk(KERN_INFO "No connectors reported connected with modes\n"); + } drm_setup_crtcs(fb_helper); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); @@ -1352,7 +1330,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config); /** * drm_fb_helper_hotplug_event - respond to a hotplug notification by - * probing all the outputs attached to the fb + * probing all the outputs attached to the fb. * @fb_helper: the drm_fb_helper * * LOCKING: diff --git a/trunk/drivers/gpu/drm/drm_fops.c b/trunk/drivers/gpu/drm/drm_fops.c index 7ef1b673e1be..133b4132983e 100644 --- a/trunk/drivers/gpu/drm/drm_fops.c +++ b/trunk/drivers/gpu/drm/drm_fops.c @@ -121,6 +121,8 @@ int drm_open(struct inode *inode, struct file *filp) int minor_id = iminor(inode); struct drm_minor *minor; int retcode = 0; + int need_setup = 0; + struct address_space *old_mapping; minor = idr_find(&drm_minors_idr, minor_id); if (!minor) @@ -132,23 +134,37 @@ int drm_open(struct inode *inode, struct file *filp) if (drm_device_is_unplugged(dev)) return -ENODEV; + if (!dev->open_count++) + need_setup = 1; + mutex_lock(&dev->struct_mutex); + old_mapping = dev->dev_mapping; + if (old_mapping == NULL) + dev->dev_mapping = &inode->i_data; + /* ihold ensures nobody can remove inode with our i_data */ + ihold(container_of(dev->dev_mapping, struct inode, i_data)); + inode->i_mapping = dev->dev_mapping; + filp->f_mapping = dev->dev_mapping; + mutex_unlock(&dev->struct_mutex); + retcode = drm_open_helper(inode, filp, dev); - if (!retcode) { - atomic_inc(&dev->counts[_DRM_STAT_OPENS]); - if (!dev->open_count++) - retcode = drm_setup(dev); - } - if (!retcode) { - mutex_lock(&dev->struct_mutex); - if (dev->dev_mapping == NULL) - dev->dev_mapping = &inode->i_data; - /* ihold ensures nobody can remove inode with our i_data */ - ihold(container_of(dev->dev_mapping, struct inode, i_data)); - inode->i_mapping = dev->dev_mapping; - filp->f_mapping = dev->dev_mapping; - mutex_unlock(&dev->struct_mutex); + if (retcode) + goto err_undo; + atomic_inc(&dev->counts[_DRM_STAT_OPENS]); + if (need_setup) { + retcode = drm_setup(dev); + if (retcode) + goto err_undo; } + return 0; +err_undo: + mutex_lock(&dev->struct_mutex); + filp->f_mapping = old_mapping; + inode->i_mapping = old_mapping; + iput(container_of(dev->dev_mapping, struct inode, i_data)); + dev->dev_mapping = old_mapping; + mutex_unlock(&dev->struct_mutex); + dev->open_count--; return retcode; } EXPORT_SYMBOL(drm_open); diff --git a/trunk/drivers/gpu/drm/drm_hashtab.c b/trunk/drivers/gpu/drm/drm_hashtab.c index 80254547a3f8..c3745c4d46d8 100644 --- a/trunk/drivers/gpu/drm/drm_hashtab.c +++ b/trunk/drivers/gpu/drm/drm_hashtab.c @@ -67,8 +67,10 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) hashed_key = hash_long(key, ht->order); DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); h_list = &ht->table[hashed_key]; - hlist_for_each_entry(entry, list, h_list, head) + hlist_for_each(list, h_list) { + entry = hlist_entry(list, struct drm_hash_item, head); DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); + } } static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, @@ -81,7 +83,8 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, hashed_key = hash_long(key, ht->order); h_list = &ht->table[hashed_key]; - hlist_for_each_entry(entry, list, h_list, head) { + hlist_for_each(list, h_list) { + entry = hlist_entry(list, struct drm_hash_item, head); if (entry->key == key) return list; if (entry->key > key) @@ -90,24 +93,6 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, return NULL; } -static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht, - unsigned long key) -{ - struct drm_hash_item *entry; - struct hlist_head *h_list; - struct hlist_node *list; - unsigned int hashed_key; - - hashed_key = hash_long(key, ht->order); - h_list = &ht->table[hashed_key]; - hlist_for_each_entry_rcu(entry, list, h_list, head) { - if (entry->key == key) - return list; - if (entry->key > key) - break; - } - return NULL; -} int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) { @@ -120,7 +105,8 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) hashed_key = hash_long(key, ht->order); h_list = &ht->table[hashed_key]; parent = NULL; - hlist_for_each_entry(entry, list, h_list, head) { + hlist_for_each(list, h_list) { + entry = hlist_entry(list, struct drm_hash_item, head); if (entry->key == key) return -EINVAL; if (entry->key > key) @@ -128,9 +114,9 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) parent = list; } if (parent) { - hlist_add_after_rcu(parent, &item->head); + hlist_add_after(parent, &item->head); } else { - hlist_add_head_rcu(&item->head, h_list); + hlist_add_head(&item->head, h_list); } return 0; } @@ -170,7 +156,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, { struct hlist_node *list; - list = drm_ht_find_key_rcu(ht, key); + list = drm_ht_find_key(ht, key); if (!list) return -EINVAL; @@ -185,7 +171,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) list = drm_ht_find_key(ht, key); if (list) { - hlist_del_init_rcu(list); + hlist_del_init(list); return 0; } return -EINVAL; @@ -193,7 +179,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) { - hlist_del_init_rcu(&item->head); + hlist_del_init(&item->head); return 0; } EXPORT_SYMBOL(drm_ht_remove_item); diff --git a/trunk/drivers/gpu/drm/drm_ioctl.c b/trunk/drivers/gpu/drm/drm_ioctl.c index e77bd8b57df2..23dd97506f28 100644 --- a/trunk/drivers/gpu/drm/drm_ioctl.c +++ b/trunk/drivers/gpu/drm/drm_ioctl.c @@ -287,9 +287,6 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0; req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0; break; - case DRM_CAP_TIMESTAMP_MONOTONIC: - req->value = drm_timestamp_monotonic; - break; default: return -EINVAL; } diff --git a/trunk/drivers/gpu/drm/drm_irq.c b/trunk/drivers/gpu/drm/drm_irq.c index 19c01ca3cc76..3a3d0ce891b9 100644 --- a/trunk/drivers/gpu/drm/drm_irq.c +++ b/trunk/drivers/gpu/drm/drm_irq.c @@ -106,7 +106,6 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) s64 diff_ns; int vblrc; struct timeval tvblank; - int count = DRM_TIMESTAMP_MAXRETRIES; /* Prevent vblank irq processing while disabling vblank irqs, * so no updates of timestamps or count can happen after we've @@ -132,10 +131,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) do { dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); - } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc); - - if (!count) - vblrc = 0; + } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc)); /* Compute time difference to stored timestamp of last vblank * as updated by last invocation of drm_handle_vblank() in vblank irq. @@ -580,8 +576,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, unsigned flags, struct drm_crtc *refcrtc) { - ktime_t stime, etime, mono_time_offset; - struct timeval tv_etime; + struct timeval stime, raw_time; struct drm_display_mode *mode; int vbl_status, vtotal, vdisplay; int vpos, hpos, i; @@ -630,15 +625,13 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, preempt_disable(); /* Get system timestamp before query. */ - stime = ktime_get(); + do_gettimeofday(&stime); /* Get vertical and horizontal scanout pos. vpos, hpos. */ vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos); /* Get system timestamp after query. */ - etime = ktime_get(); - if (!drm_timestamp_monotonic) - mono_time_offset = ktime_get_monotonic_offset(); + do_gettimeofday(&raw_time); preempt_enable(); @@ -649,7 +642,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, return -EIO; } - duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime); + duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime); /* Accept result with < max_error nsecs timing uncertainty. */ if (duration_ns <= (s64) *max_error) @@ -696,20 +689,14 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, vbl_status |= 0x8; } - if (!drm_timestamp_monotonic) - etime = ktime_sub(etime, mono_time_offset); - - /* save this only for debugging purposes */ - tv_etime = ktime_to_timeval(etime); /* Subtract time delta from raw timestamp to get final * vblank_time timestamp for end of vblank. */ - etime = ktime_sub_ns(etime, delta_ns); - *vblank_time = ktime_to_timeval(etime); + *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns); DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", crtc, (int)vbl_status, hpos, vpos, - (long)tv_etime.tv_sec, (long)tv_etime.tv_usec, + (long)raw_time.tv_sec, (long)raw_time.tv_usec, (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, (int)duration_ns/1000, i); @@ -721,17 +708,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, } EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos); -static struct timeval get_drm_timestamp(void) -{ - ktime_t now; - - now = ktime_get(); - if (!drm_timestamp_monotonic) - now = ktime_sub(now, ktime_get_monotonic_offset()); - - return ktime_to_timeval(now); -} - /** * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent * vblank interval. @@ -769,9 +745,9 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, } /* GPU high precision timestamp query unsupported or failed. - * Return current monotonic/gettimeofday timestamp as best estimate. + * Return gettimeofday timestamp as best estimate. */ - *tvblank = get_drm_timestamp(); + do_gettimeofday(tvblank); return 0; } @@ -826,47 +802,6 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, } EXPORT_SYMBOL(drm_vblank_count_and_time); -static void send_vblank_event(struct drm_device *dev, - struct drm_pending_vblank_event *e, - unsigned long seq, struct timeval *now) -{ - WARN_ON_SMP(!spin_is_locked(&dev->event_lock)); - e->event.sequence = seq; - e->event.tv_sec = now->tv_sec; - e->event.tv_usec = now->tv_usec; - - list_add_tail(&e->base.link, - &e->base.file_priv->event_list); - wake_up_interruptible(&e->base.file_priv->event_wait); - trace_drm_vblank_event_delivered(e->base.pid, e->pipe, - e->event.sequence); -} - -/** - * drm_send_vblank_event - helper to send vblank event after pageflip - * @dev: DRM device - * @crtc: CRTC in question - * @e: the event to send - * - * Updates sequence # and timestamp on event, and sends it to userspace. - * Caller must hold event lock. - */ -void drm_send_vblank_event(struct drm_device *dev, int crtc, - struct drm_pending_vblank_event *e) -{ - struct timeval now; - unsigned int seq; - if (crtc >= 0) { - seq = drm_vblank_count_and_time(dev, crtc, &now); - } else { - seq = 0; - - now = get_drm_timestamp(); - } - send_vblank_event(dev, e, seq, &now); -} -EXPORT_SYMBOL(drm_send_vblank_event); - /** * drm_update_vblank_count - update the master vblank counter * @dev: DRM device @@ -1001,13 +936,6 @@ void drm_vblank_put(struct drm_device *dev, int crtc) } EXPORT_SYMBOL(drm_vblank_put); -/** - * drm_vblank_off - disable vblank events on a CRTC - * @dev: DRM device - * @crtc: CRTC in question - * - * Caller must hold event lock. - */ void drm_vblank_off(struct drm_device *dev, int crtc) { struct drm_pending_vblank_event *e, *t; @@ -1021,19 +949,22 @@ void drm_vblank_off(struct drm_device *dev, int crtc) /* Send any queued vblank events, lest the natives grow disquiet */ seq = drm_vblank_count_and_time(dev, crtc, &now); - - spin_lock(&dev->event_lock); list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { if (e->pipe != crtc) continue; DRM_DEBUG("Sending premature vblank event on disable: \ wanted %d, current %d\n", e->event.sequence, seq); - list_del(&e->base.link); + + e->event.sequence = seq; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; drm_vblank_put(dev, e->pipe); - send_vblank_event(dev, e, seq, &now); + list_move_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + trace_drm_vblank_event_delivered(e->base.pid, e->pipe, + e->event.sequence); } - spin_unlock(&dev->event_lock); spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } @@ -1176,9 +1107,15 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, e->event.sequence = vblwait->request.sequence; if ((seq - vblwait->request.sequence) <= (1 << 23)) { + e->event.sequence = seq; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; drm_vblank_put(dev, pipe); - send_vblank_event(dev, e, seq, &now); + list_add_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); vblwait->reply.sequence = seq; + trace_drm_vblank_event_delivered(current->pid, pipe, + vblwait->request.sequence); } else { /* drm_handle_vblank_events will call drm_vblank_put */ list_add_tail(&e->base.link, &dev->vblank_event_list); @@ -1319,9 +1256,14 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc) DRM_DEBUG("vblank event on %d, current %d\n", e->event.sequence, seq); - list_del(&e->base.link); + e->event.sequence = seq; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; drm_vblank_put(dev, e->pipe); - send_vblank_event(dev, e, seq, &now); + list_move_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + trace_drm_vblank_event_delivered(e->base.pid, e->pipe, + e->event.sequence); } spin_unlock_irqrestore(&dev->event_lock, flags); diff --git a/trunk/drivers/gpu/drm/drm_modes.c b/trunk/drivers/gpu/drm/drm_modes.c index d8da30e90db5..59450f39bf96 100644 --- a/trunk/drivers/gpu/drm/drm_modes.c +++ b/trunk/drivers/gpu/drm/drm_modes.c @@ -46,7 +46,7 @@ * * Describe @mode using DRM_DEBUG. */ -void drm_mode_debug_printmodeline(const struct drm_display_mode *mode) +void drm_mode_debug_printmodeline(struct drm_display_mode *mode) { DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d " "0x%x 0x%x\n", @@ -558,7 +558,7 @@ EXPORT_SYMBOL(drm_mode_list_concat); * RETURNS: * @mode->hdisplay */ -int drm_mode_width(const struct drm_display_mode *mode) +int drm_mode_width(struct drm_display_mode *mode) { return mode->hdisplay; @@ -579,7 +579,7 @@ EXPORT_SYMBOL(drm_mode_width); * RETURNS: * @mode->vdisplay */ -int drm_mode_height(const struct drm_display_mode *mode) +int drm_mode_height(struct drm_display_mode *mode) { return mode->vdisplay; } @@ -768,7 +768,7 @@ EXPORT_SYMBOL(drm_mode_duplicate); * RETURNS: * True if the modes are equal, false otherwise. */ -bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) +bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2) { /* do clock check convert to PICOS so fb modes get matched * the same */ diff --git a/trunk/drivers/gpu/drm/drm_pci.c b/trunk/drivers/gpu/drm/drm_pci.c index 754bc96e10c7..ba33144257e5 100644 --- a/trunk/drivers/gpu/drm/drm_pci.c +++ b/trunk/drivers/gpu/drm/drm_pci.c @@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask) { struct pci_dev *root; int pos; - u32 lnkcap = 0, lnkcap2 = 0; + u32 lnkcap, lnkcap2; *mask = 0; if (!dev->pdev) diff --git a/trunk/drivers/gpu/drm/drm_stub.c b/trunk/drivers/gpu/drm/drm_stub.c index 200e104f1fa0..c236fd27eba6 100644 --- a/trunk/drivers/gpu/drm/drm_stub.c +++ b/trunk/drivers/gpu/drm/drm_stub.c @@ -46,24 +46,16 @@ EXPORT_SYMBOL(drm_vblank_offdelay); unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ EXPORT_SYMBOL(drm_timestamp_precision); -/* - * Default to use monotonic timestamps for wait-for-vblank and page-flip - * complete events. - */ -unsigned int drm_timestamp_monotonic = 1; - MODULE_AUTHOR(CORE_AUTHOR); MODULE_DESCRIPTION(CORE_DESC); MODULE_LICENSE("GPL and additional rights"); MODULE_PARM_DESC(debug, "Enable debug output"); MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); -MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); module_param_named(debug, drm_debug, int, 0600); module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); -module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); struct idr drm_minors_idr; @@ -229,20 +221,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, if (!file_priv->master) return -EINVAL; - if (file_priv->minor->master) - return -EINVAL; - - mutex_lock(&dev->struct_mutex); - file_priv->minor->master = drm_master_get(file_priv->master); - file_priv->is_master = 1; - if (dev->driver->master_set) { - ret = dev->driver->master_set(dev, file_priv, false); - if (unlikely(ret != 0)) { - file_priv->is_master = 0; - drm_master_put(&file_priv->minor->master); + if (!file_priv->minor->master && + file_priv->minor->master != file_priv->master) { + mutex_lock(&dev->struct_mutex); + file_priv->minor->master = drm_master_get(file_priv->master); + file_priv->is_master = 1; + if (dev->driver->master_set) { + ret = dev->driver->master_set(dev, file_priv, false); + if (unlikely(ret != 0)) { + file_priv->is_master = 0; + drm_master_put(&file_priv->minor->master); + } } + mutex_unlock(&dev->struct_mutex); } - mutex_unlock(&dev->struct_mutex); return 0; } @@ -500,7 +492,10 @@ void drm_put_dev(struct drm_device *dev) drm_put_minor(&dev->primary); list_del(&dev->driver_item); - kfree(dev->devname); + if (dev->devname) { + kfree(dev->devname); + dev->devname = NULL; + } kfree(dev); } EXPORT_SYMBOL(drm_put_dev); diff --git a/trunk/drivers/gpu/drm/drm_sysfs.c b/trunk/drivers/gpu/drm/drm_sysfs.c index 02296653a058..05cd8fe062af 100644 --- a/trunk/drivers/gpu/drm/drm_sysfs.c +++ b/trunk/drivers/gpu/drm/drm_sysfs.c @@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device, uint64_t dpms_status; int ret; - ret = drm_object_property_get_value(&connector->base, + ret = drm_connector_property_get_value(connector, dev->mode_config.dpms_property, &dpms_status); if (ret) @@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device, return 0; } - ret = drm_object_property_get_value(&connector->base, prop, &subconnector); + ret = drm_connector_property_get_value(connector, prop, &subconnector); if (ret) return 0; @@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device, return 0; } - ret = drm_object_property_get_value(&connector->base, prop, &subconnector); + ret = drm_connector_property_get_value(connector, prop, &subconnector); if (ret) return 0; diff --git a/trunk/drivers/gpu/drm/exynos/Kconfig b/trunk/drivers/gpu/drm/exynos/Kconfig index 86fb75d3fcad..fc345d4ebb03 100644 --- a/trunk/drivers/gpu/drm/exynos/Kconfig +++ b/trunk/drivers/gpu/drm/exynos/Kconfig @@ -10,12 +10,6 @@ config DRM_EXYNOS Choose this option if you have a Samsung SoC EXYNOS chipset. If M is selected the module will be called exynosdrm. -config DRM_EXYNOS_IOMMU - bool "EXYNOS DRM IOMMU Support" - depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU - help - Choose this option if you want to use IOMMU feature for DRM. - config DRM_EXYNOS_DMABUF bool "EXYNOS DRM DMABUF" depends on DRM_EXYNOS diff --git a/trunk/drivers/gpu/drm/exynos/Makefile b/trunk/drivers/gpu/drm/exynos/Makefile index 26813b8a5056..eb651ca8e2a8 100644 --- a/trunk/drivers/gpu/drm/exynos/Makefile +++ b/trunk/drivers/gpu/drm/exynos/Makefile @@ -8,7 +8,6 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \ exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ exynos_drm_plane.o -exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \ diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c index 72bf97b96ba0..118c117b3226 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c @@ -33,42 +33,73 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, unsigned int flags, struct exynos_drm_gem_buf *buf) { + dma_addr_t start_addr; + unsigned int npages, i = 0; + struct scatterlist *sgl; int ret = 0; - enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS; DRM_DEBUG_KMS("%s\n", __FILE__); + if (IS_NONCONTIG_BUFFER(flags)) { + DRM_DEBUG_KMS("not support allocation type.\n"); + return -EINVAL; + } + if (buf->dma_addr) { DRM_DEBUG_KMS("already allocated.\n"); return 0; } - init_dma_attrs(&buf->dma_attrs); + if (buf->size >= SZ_1M) { + npages = buf->size >> SECTION_SHIFT; + buf->page_size = SECTION_SIZE; + } else if (buf->size >= SZ_64K) { + npages = buf->size >> 16; + buf->page_size = SZ_64K; + } else { + npages = buf->size >> PAGE_SHIFT; + buf->page_size = PAGE_SIZE; + } - if (flags & EXYNOS_BO_NONCONTIG) - attr = DMA_ATTR_WRITE_COMBINE; + buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!buf->sgt) { + DRM_ERROR("failed to allocate sg table.\n"); + return -ENOMEM; + } - dma_set_attr(attr, &buf->dma_attrs); + ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); + if (ret < 0) { + DRM_ERROR("failed to initialize sg table.\n"); + kfree(buf->sgt); + buf->sgt = NULL; + return -ENOMEM; + } - buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size, - &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs); + buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size, + &buf->dma_addr, GFP_KERNEL); if (!buf->kvaddr) { DRM_ERROR("failed to allocate buffer.\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err1; } - buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); - if (!buf->sgt) { - DRM_ERROR("failed to allocate sg table.\n"); + buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); + if (!buf->pages) { + DRM_ERROR("failed to allocate pages.\n"); ret = -ENOMEM; - goto err_free_attrs; + goto err2; } - ret = dma_get_sgtable(dev->dev, buf->sgt, buf->kvaddr, buf->dma_addr, - buf->size); - if (ret < 0) { - DRM_ERROR("failed to get sgtable.\n"); - goto err_free_sgt; + sgl = buf->sgt->sgl; + start_addr = buf->dma_addr; + + while (i < npages) { + buf->pages[i] = phys_to_page(start_addr); + sg_set_page(sgl, buf->pages[i], buf->page_size, 0); + sg_dma_address(sgl) = start_addr; + start_addr += buf->page_size; + sgl = sg_next(sgl); + i++; } DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", @@ -77,14 +108,14 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, buf->size); return ret; - -err_free_sgt: +err2: + dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, + (dma_addr_t)buf->dma_addr); + buf->dma_addr = (dma_addr_t)NULL; +err1: + sg_free_table(buf->sgt); kfree(buf->sgt); buf->sgt = NULL; -err_free_attrs: - dma_free_attrs(dev->dev, buf->size, buf->kvaddr, - (dma_addr_t)buf->dma_addr, &buf->dma_attrs); - buf->dma_addr = (dma_addr_t)NULL; return ret; } @@ -94,6 +125,16 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev, { DRM_DEBUG_KMS("%s.\n", __FILE__); + /* + * release only physically continuous memory and + * non-continuous memory would be released by exynos + * gem framework. + */ + if (IS_NONCONTIG_BUFFER(flags)) { + DRM_DEBUG_KMS("not support allocation type.\n"); + return; + } + if (!buf->dma_addr) { DRM_DEBUG_KMS("dma_addr is invalid.\n"); return; @@ -109,8 +150,11 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev, kfree(buf->sgt); buf->sgt = NULL; - dma_free_attrs(dev->dev, buf->size, buf->kvaddr, - (dma_addr_t)buf->dma_addr, &buf->dma_attrs); + kfree(buf->pages); + buf->pages = NULL; + + dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, + (dma_addr_t)buf->dma_addr); buf->dma_addr = (dma_addr_t)NULL; } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h index 25cf16285033..3388e4eb4ba2 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h @@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, void exynos_drm_fini_buf(struct drm_device *dev, struct exynos_drm_gem_buf *buffer); -/* allocate physical memory region and setup sgt. */ +/* allocate physical memory region and setup sgt and pages. */ int exynos_drm_alloc_buf(struct drm_device *dev, struct exynos_drm_gem_buf *buf, unsigned int flags); -/* release physical memory region, and sgt. */ +/* release physical memory region, sgt and pages. */ void exynos_drm_free_buf(struct drm_device *dev, unsigned int flags, struct exynos_drm_gem_buf *buffer); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 2efa4b031d73..fce245f64c4f 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -236,21 +236,16 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, goto out; } - spin_lock_irq(&dev->event_lock); list_add_tail(&event->base.link, &dev_priv->pageflip_event_list); - spin_unlock_irq(&dev->event_lock); crtc->fb = fb; ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL); if (ret) { crtc->fb = old_fb; - - spin_lock_irq(&dev->event_lock); drm_vblank_put(dev, exynos_crtc->pipe); list_del(&event->base.link); - spin_unlock_irq(&dev->event_lock); goto out; } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index 539da9f4eb97..fae1f2ec886c 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -30,22 +30,26 @@ #include -static struct sg_table *exynos_get_sgt(struct drm_device *drm_dev, - struct exynos_drm_gem_buf *buf) +static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages, + unsigned int page_size) { struct sg_table *sgt = NULL; - int ret; + struct scatterlist *sgl; + int i, ret; sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) goto out; - ret = dma_get_sgtable(drm_dev->dev, sgt, buf->kvaddr, - buf->dma_addr, buf->size); - if (ret < 0) { - DRM_ERROR("failed to get sgtable.\n"); + ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL); + if (ret) goto err_free_sgt; - } + + if (page_size < PAGE_SIZE) + page_size = PAGE_SIZE; + + for_each_sg(sgt->sgl, sgl, nr_pages, i) + sg_set_page(sgl, pages[i], page_size, 0); return sgt; @@ -64,30 +68,32 @@ static struct sg_table * struct drm_device *dev = gem_obj->base.dev; struct exynos_drm_gem_buf *buf; struct sg_table *sgt = NULL; + unsigned int npages; int nents; DRM_DEBUG_PRIME("%s\n", __FILE__); - buf = gem_obj->buffer; - if (!buf) { - DRM_ERROR("buffer is null.\n"); - return sgt; - } - mutex_lock(&dev->struct_mutex); - sgt = exynos_get_sgt(dev, buf); - if (!sgt) + buf = gem_obj->buffer; + + /* there should always be pages allocated. */ + if (!buf->pages) { + DRM_ERROR("pages is null.\n"); goto err_unlock; + } - nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); - if (!nents) { - DRM_ERROR("failed to map sgl with iommu.\n"); - sgt = NULL; + npages = buf->size / buf->page_size; + + sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size); + if (!sgt) { + DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n"); goto err_unlock; } + nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); - DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size); + DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n", + npages, buf->size, buf->page_size); err_unlock: mutex_unlock(&dev->struct_mutex); @@ -99,7 +105,6 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); - sg_free_table(sgt); kfree(sgt); sgt = NULL; @@ -191,6 +196,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, struct scatterlist *sgl; struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_buf *buffer; + struct page *page; int ret; DRM_DEBUG_PRIME("%s\n", __FILE__); @@ -227,27 +233,38 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, goto err_unmap_attach; } + buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL); + if (!buffer->pages) { + DRM_ERROR("failed to allocate pages.\n"); + ret = -ENOMEM; + goto err_free_buffer; + } + exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); if (!exynos_gem_obj) { ret = -ENOMEM; - goto err_free_buffer; + goto err_free_pages; } sgl = sgt->sgl; - buffer->size = dma_buf->size; - buffer->dma_addr = sg_dma_address(sgl); - if (sgt->nents == 1) { + buffer->dma_addr = sg_dma_address(sgt->sgl); + buffer->size = sg_dma_len(sgt->sgl); + /* always physically continuous memory if sgt->nents is 1. */ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; } else { - /* - * this case could be CONTIG or NONCONTIG type but for now - * sets NONCONTIG. - * TODO. we have to find a way that exporter can notify - * the type of its own buffer to importer. - */ + unsigned int i = 0; + + buffer->dma_addr = sg_dma_address(sgl); + while (i < sgt->nents) { + buffer->pages[i] = sg_page(sgl); + buffer->size += sg_dma_len(sgl); + sgl = sg_next(sgl); + i++; + } + exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; } @@ -260,6 +277,9 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, return &exynos_gem_obj->base; +err_free_pages: + kfree(buffer->pages); + buffer->pages = NULL; err_free_buffer: kfree(buffer); buffer = NULL; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c index 2b287d2fc92e..1de7baafddd0 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -40,7 +40,6 @@ #include "exynos_drm_vidi.h" #include "exynos_drm_dmabuf.h" #include "exynos_drm_g2d.h" -#include "exynos_drm_iommu.h" #define DRIVER_NAME "exynos" #define DRIVER_DESC "Samsung SoC DRM" @@ -67,18 +66,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) INIT_LIST_HEAD(&private->pageflip_event_list); dev->dev_private = (void *)private; - /* - * create mapping to manage iommu table and set a pointer to iommu - * mapping structure to iommu_mapping of private data. - * also this iommu_mapping can be used to check if iommu is supported - * or not. - */ - ret = drm_create_iommu_mapping(dev); - if (ret < 0) { - DRM_ERROR("failed to create iommu mapping.\n"); - goto err_crtc; - } - drm_mode_config_init(dev); /* init kms poll for handling hpd */ @@ -93,7 +80,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) for (nr = 0; nr < MAX_CRTC; nr++) { ret = exynos_drm_crtc_create(dev, nr); if (ret) - goto err_release_iommu_mapping; + goto err_crtc; } for (nr = 0; nr < MAX_PLANE; nr++) { @@ -102,12 +89,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) plane = exynos_plane_init(dev, possible_crtcs, false); if (!plane) - goto err_release_iommu_mapping; + goto err_crtc; } ret = drm_vblank_init(dev, MAX_CRTC); if (ret) - goto err_release_iommu_mapping; + goto err_crtc; /* * probe sub drivers such as display controller and hdmi driver, @@ -139,8 +126,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) exynos_drm_device_unregister(dev); err_vblank: drm_vblank_cleanup(dev); -err_release_iommu_mapping: - drm_release_iommu_mapping(dev); err_crtc: drm_mode_config_cleanup(dev); kfree(private); @@ -157,8 +142,6 @@ static int exynos_drm_unload(struct drm_device *dev) drm_vblank_cleanup(dev); drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); - - drm_release_iommu_mapping(dev); kfree(dev->dev_private); dev->dev_private = NULL; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h index 9c9c2dc75828..a34231036496 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -231,7 +231,8 @@ struct exynos_drm_g2d_private { struct device *dev; struct list_head inuse_cmdlist; struct list_head event_list; - struct list_head userptr_list; + struct list_head gem_list; + unsigned int gem_nr; }; struct drm_exynos_file_private { @@ -240,13 +241,6 @@ struct drm_exynos_file_private { /* * Exynos drm private structure. - * - * @da_start: start address to device address space. - * with iommu, device address space starts from this address - * otherwise default one. - * @da_space_size: size of device address space. - * if 0 then default value is used for it. - * @da_space_order: order to device address space. */ struct exynos_drm_private { struct drm_fb_helper *fb_helper; @@ -261,10 +255,6 @@ struct exynos_drm_private { struct drm_crtc *crtc[MAX_CRTC]; struct drm_property *plane_zpos_property; struct drm_property *crtc_mode_property; - - unsigned long da_start; - unsigned long da_space_size; - unsigned long da_space_order; }; /* diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c index d9afb11aac76..f2df06c603f7 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c @@ -234,39 +234,6 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder) exynos_encoder->dpms = DRM_MODE_DPMS_ON; } -void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb) -{ - struct exynos_drm_encoder *exynos_encoder; - struct exynos_drm_overlay_ops *overlay_ops; - struct exynos_drm_manager *manager; - struct drm_device *dev = fb->dev; - struct drm_encoder *encoder; - - /* - * make sure that overlay data are updated to real hardware - * for all encoders. - */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - exynos_encoder = to_exynos_encoder(encoder); - - /* if exynos was disabled, just ignor it. */ - if (exynos_encoder->dpms > DRM_MODE_DPMS_ON) - continue; - - manager = exynos_encoder->manager; - overlay_ops = manager->overlay_ops; - - /* - * wait for vblank interrupt - * - this makes sure that overlay data are updated to - * real hardware. - */ - if (overlay_ops->wait_for_vblank) - overlay_ops->wait_for_vblank(manager->dev); - } -} - - static void exynos_drm_encoder_disable(struct drm_encoder *encoder) { struct drm_plane *plane; @@ -538,4 +505,14 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data) if (overlay_ops && overlay_ops->disable) overlay_ops->disable(manager->dev, zpos); + + /* + * wait for vblank interrupt + * - this makes sure that hardware overlay is disabled to avoid + * for the dma accesses to memory after gem buffer was released + * because the setting for disabling the overlay will be updated + * at vsync. + */ + if (overlay_ops && overlay_ops->wait_for_vblank) + overlay_ops->wait_for_vblank(manager->dev); } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h index 88bb25a2a917..6470d9ddf5a1 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h @@ -46,6 +46,5 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data); -void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb); #endif diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c index 7413f4b729b0..4ef4cd3f9936 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -30,13 +30,10 @@ #include #include #include -#include #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" #include "exynos_drm_gem.h" -#include "exynos_drm_iommu.h" -#include "exynos_drm_encoder.h" #define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) @@ -53,32 +50,6 @@ struct exynos_drm_fb { struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER]; }; -static int check_fb_gem_memory_type(struct drm_device *drm_dev, - struct exynos_drm_gem_obj *exynos_gem_obj) -{ - unsigned int flags; - - /* - * if exynos drm driver supports iommu then framebuffer can use - * all the buffer types. - */ - if (is_drm_iommu_supported(drm_dev)) - return 0; - - flags = exynos_gem_obj->flags; - - /* - * without iommu support, not support physically non-continuous memory - * for framebuffer. - */ - if (IS_NONCONTIG_BUFFER(flags)) { - DRM_ERROR("cannot use this gem memory type for fb.\n"); - return -EINVAL; - } - - return 0; -} - static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) { struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); @@ -86,9 +57,6 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) DRM_DEBUG_KMS("%s\n", __FILE__); - /* make sure that overlay data are updated before relesing fb. */ - exynos_drm_encoder_complete_scanout(fb); - drm_framebuffer_cleanup(fb); for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { @@ -160,25 +128,14 @@ exynos_drm_framebuffer_init(struct drm_device *dev, struct drm_gem_object *obj) { struct exynos_drm_fb *exynos_fb; - struct exynos_drm_gem_obj *exynos_gem_obj; int ret; - exynos_gem_obj = to_exynos_gem_obj(obj); - - ret = check_fb_gem_memory_type(dev, exynos_gem_obj); - if (ret < 0) { - DRM_ERROR("cannot use this gem memory type for fb.\n"); - return ERR_PTR(-EINVAL); - } - exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); if (!exynos_fb) { DRM_ERROR("failed to allocate exynos drm framebuffer\n"); return ERR_PTR(-ENOMEM); } - exynos_fb->exynos_gem_obj[0] = exynos_gem_obj; - ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); if (ret) { DRM_ERROR("failed to initialize framebuffer\n"); @@ -186,6 +143,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev, } drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); + exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj); return &exynos_fb->fb; } @@ -256,9 +214,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); for (i = 1; i < exynos_fb->buf_cnt; i++) { - struct exynos_drm_gem_obj *exynos_gem_obj; - int ret; - obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]); if (!obj) { @@ -267,15 +222,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, return ERR_PTR(-ENOENT); } - exynos_gem_obj = to_exynos_gem_obj(obj); - - ret = check_fb_gem_memory_type(dev, exynos_gem_obj); - if (ret < 0) { - DRM_ERROR("cannot use this gem memory type for fb.\n"); - exynos_drm_fb_destroy(fb); - return ERR_PTR(ret); - } - exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index a2232792e0c0..e7466c4414cb 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -46,38 +46,8 @@ struct exynos_drm_fbdev { struct exynos_drm_gem_obj *exynos_gem_obj; }; -static int exynos_drm_fb_mmap(struct fb_info *info, - struct vm_area_struct *vma) -{ - struct drm_fb_helper *helper = info->par; - struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper); - struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj; - struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer; - unsigned long vm_size; - int ret; - - DRM_DEBUG_KMS("%s\n", __func__); - - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - - vm_size = vma->vm_end - vma->vm_start; - - if (vm_size > buffer->size) - return -EINVAL; - - ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->kvaddr, - buffer->dma_addr, buffer->size, &buffer->dma_attrs); - if (ret < 0) { - DRM_ERROR("failed to mmap.\n"); - return ret; - } - - return 0; -} - static struct fb_ops exynos_drm_fb_ops = { .owner = THIS_MODULE, - .fb_mmap = exynos_drm_fb_mmap, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, @@ -117,8 +87,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; fbi->screen_base = buffer->kvaddr + offset; - fbi->fix.smem_start = (unsigned long) - (page_to_phys(sg_page(buffer->sgt->sgl)) + offset); + fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) + + offset); fbi->screen_size = size; fbi->fix.smem_len = size; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 00bd266a31bb..e08478f19f1a 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -25,7 +25,6 @@ #include "exynos_drm_drv.h" #include "exynos_drm_fbdev.h" #include "exynos_drm_crtc.h" -#include "exynos_drm_iommu.h" /* * FIMD is stand for Fully Interactive Mobile Display and @@ -624,6 +623,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc) struct drm_pending_vblank_event *e, *t; struct timeval now; unsigned long flags; + bool is_checked = false; spin_lock_irqsave(&drm_dev->event_lock, flags); @@ -633,6 +633,8 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc) if (crtc != e->pipe) continue; + is_checked = true; + do_gettimeofday(&now); e->event.sequence = 0; e->event.tv_sec = now.tv_sec; @@ -640,7 +642,22 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc) list_move_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); - drm_vblank_put(drm_dev, crtc); + } + + if (is_checked) { + /* + * call drm_vblank_put only in case that drm_vblank_get was + * called. + */ + if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0) + drm_vblank_put(drm_dev, crtc); + + /* + * don't off vblank if vblank_disable_allowed is 1, + * because vblank would be off by timer handler. + */ + if (!drm_dev->vblank_disable_allowed) + drm_vblank_off(drm_dev, crtc); } spin_unlock_irqrestore(&drm_dev->event_lock, flags); @@ -692,10 +709,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev) */ drm_dev->vblank_disable_allowed = 1; - /* attach this sub driver to iommu mapping if supported. */ - if (is_drm_iommu_supported(drm_dev)) - drm_iommu_attach_device(drm_dev, dev); - return 0; } @@ -703,9 +716,7 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev) { DRM_DEBUG_KMS("%s\n", __FILE__); - /* detach this sub driver from iommu mapping if supported. */ - if (is_drm_iommu_supported(drm_dev)) - drm_iommu_detach_device(drm_dev, dev); + /* TODO. */ } static int fimd_calc_clkdiv(struct fimd_context *ctx, @@ -846,16 +857,18 @@ static int __devinit fimd_probe(struct platform_device *pdev) if (!ctx) return -ENOMEM; - ctx->bus_clk = devm_clk_get(dev, "fimd"); + ctx->bus_clk = clk_get(dev, "fimd"); if (IS_ERR(ctx->bus_clk)) { dev_err(dev, "failed to get bus clock\n"); - return PTR_ERR(ctx->bus_clk); + ret = PTR_ERR(ctx->bus_clk); + goto err_clk_get; } - ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd"); + ctx->lcd_clk = clk_get(dev, "sclk_fimd"); if (IS_ERR(ctx->lcd_clk)) { dev_err(dev, "failed to get lcd clock\n"); - return PTR_ERR(ctx->lcd_clk); + ret = PTR_ERR(ctx->lcd_clk); + goto err_bus_clk; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -863,13 +876,14 @@ static int __devinit fimd_probe(struct platform_device *pdev) ctx->regs = devm_request_and_ioremap(&pdev->dev, res); if (!ctx->regs) { dev_err(dev, "failed to map registers\n"); - return -ENXIO; + ret = -ENXIO; + goto err_clk; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(dev, "irq request failed.\n"); - return -ENXIO; + goto err_clk; } ctx->irq = res->start; @@ -878,7 +892,7 @@ static int __devinit fimd_probe(struct platform_device *pdev) 0, "drm_fimd", ctx); if (ret) { dev_err(dev, "irq request failed.\n"); - return ret; + goto err_clk; } ctx->vidcon0 = pdata->vidcon0; @@ -912,6 +926,17 @@ static int __devinit fimd_probe(struct platform_device *pdev) exynos_drm_subdrv_register(subdrv); return 0; + +err_clk: + clk_disable(ctx->lcd_clk); + clk_put(ctx->lcd_clk); + +err_bus_clk: + clk_disable(ctx->bus_clk); + clk_put(ctx->bus_clk); + +err_clk_get: + return ret; } static int __devexit fimd_remove(struct platform_device *pdev) @@ -935,6 +960,9 @@ static int __devexit fimd_remove(struct platform_device *pdev) out: pm_runtime_disable(dev); + clk_put(ctx->lcd_clk); + clk_put(ctx->bus_clk); + return 0; } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 6ffa0763c078..f7aab24ea46c 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -17,14 +17,11 @@ #include #include #include -#include -#include #include #include #include "exynos_drm_drv.h" #include "exynos_drm_gem.h" -#include "exynos_drm_iommu.h" #define G2D_HW_MAJOR_VER 4 #define G2D_HW_MINOR_VER 1 @@ -95,21 +92,11 @@ #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) -#define MAX_BUF_ADDR_NR 6 - -/* maximum buffer pool size of userptr is 64MB as default */ -#define MAX_POOL (64 * 1024 * 1024) - -enum { - BUF_TYPE_GEM = 1, - BUF_TYPE_USERPTR, -}; - /* cmdlist data structure */ struct g2d_cmdlist { - u32 head; - unsigned long data[G2D_CMDLIST_DATA_NUM]; - u32 last; /* last data offset */ + u32 head; + u32 data[G2D_CMDLIST_DATA_NUM]; + u32 last; /* last data offset */ }; struct drm_exynos_pending_g2d_event { @@ -117,26 +104,15 @@ struct drm_exynos_pending_g2d_event { struct drm_exynos_g2d_event event; }; -struct g2d_cmdlist_userptr { +struct g2d_gem_node { struct list_head list; - dma_addr_t dma_addr; - unsigned long userptr; - unsigned long size; - struct page **pages; - unsigned int npages; - struct sg_table *sgt; - struct vm_area_struct *vma; - atomic_t refcount; - bool in_pool; - bool out_of_list; + unsigned int handle; }; struct g2d_cmdlist_node { struct list_head list; struct g2d_cmdlist *cmdlist; - unsigned int map_nr; - unsigned long handles[MAX_BUF_ADDR_NR]; - unsigned int obj_type[MAX_BUF_ADDR_NR]; + unsigned int gem_nr; dma_addr_t dma_addr; struct drm_exynos_pending_g2d_event *event; @@ -146,7 +122,6 @@ struct g2d_runqueue_node { struct list_head list; struct list_head run_cmdlist; struct list_head event_list; - struct drm_file *filp; pid_t pid; struct completion complete; int async; @@ -168,33 +143,23 @@ struct g2d_data { struct mutex cmdlist_mutex; dma_addr_t cmdlist_pool; void *cmdlist_pool_virt; - struct dma_attrs cmdlist_dma_attrs; /* runqueue*/ struct g2d_runqueue_node *runqueue_node; struct list_head runqueue; struct mutex runqueue_mutex; struct kmem_cache *runqueue_slab; - - unsigned long current_pool; - unsigned long max_pool; }; static int g2d_init_cmdlist(struct g2d_data *g2d) { struct device *dev = g2d->dev; struct g2d_cmdlist_node *node = g2d->cmdlist_node; - struct exynos_drm_subdrv *subdrv = &g2d->subdrv; int nr; int ret; - init_dma_attrs(&g2d->cmdlist_dma_attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); - - g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev, - G2D_CMDLIST_POOL_SIZE, - &g2d->cmdlist_pool, GFP_KERNEL, - &g2d->cmdlist_dma_attrs); + g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE, + &g2d->cmdlist_pool, GFP_KERNEL); if (!g2d->cmdlist_pool_virt) { dev_err(dev, "failed to allocate dma memory\n"); return -ENOMEM; @@ -219,20 +184,18 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) return 0; err: - dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, - g2d->cmdlist_pool_virt, - g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); + dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, + g2d->cmdlist_pool); return ret; } static void g2d_fini_cmdlist(struct g2d_data *g2d) { - struct exynos_drm_subdrv *subdrv = &g2d->subdrv; + struct device *dev = g2d->dev; kfree(g2d->cmdlist_node); - dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, - g2d->cmdlist_pool_virt, - g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); + dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, + g2d->cmdlist_pool); } static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) @@ -282,300 +245,62 @@ static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv, list_add_tail(&node->event->base.link, &g2d_priv->event_list); } -static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev, - unsigned long obj, - bool force) -{ - struct g2d_cmdlist_userptr *g2d_userptr = - (struct g2d_cmdlist_userptr *)obj; - - if (!obj) - return; - - if (force) - goto out; - - atomic_dec(&g2d_userptr->refcount); - - if (atomic_read(&g2d_userptr->refcount) > 0) - return; - - if (g2d_userptr->in_pool) - return; - -out: - exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, - DMA_BIDIRECTIONAL); - - exynos_gem_put_pages_to_userptr(g2d_userptr->pages, - g2d_userptr->npages, - g2d_userptr->vma); - - if (!g2d_userptr->out_of_list) - list_del_init(&g2d_userptr->list); - - sg_free_table(g2d_userptr->sgt); - kfree(g2d_userptr->sgt); - g2d_userptr->sgt = NULL; - - kfree(g2d_userptr->pages); - g2d_userptr->pages = NULL; - kfree(g2d_userptr); - g2d_userptr = NULL; -} - -dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, - unsigned long userptr, - unsigned long size, - struct drm_file *filp, - unsigned long *obj) -{ - struct drm_exynos_file_private *file_priv = filp->driver_priv; - struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; - struct g2d_cmdlist_userptr *g2d_userptr; - struct g2d_data *g2d; - struct page **pages; - struct sg_table *sgt; - struct vm_area_struct *vma; - unsigned long start, end; - unsigned int npages, offset; - int ret; - - if (!size) { - DRM_ERROR("invalid userptr size.\n"); - return ERR_PTR(-EINVAL); - } - - g2d = dev_get_drvdata(g2d_priv->dev); - - /* check if userptr already exists in userptr_list. */ - list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) { - if (g2d_userptr->userptr == userptr) { - /* - * also check size because there could be same address - * and different size. - */ - if (g2d_userptr->size == size) { - atomic_inc(&g2d_userptr->refcount); - *obj = (unsigned long)g2d_userptr; - - return &g2d_userptr->dma_addr; - } - - /* - * at this moment, maybe g2d dma is accessing this - * g2d_userptr memory region so just remove this - * g2d_userptr object from userptr_list not to be - * referred again and also except it the userptr - * pool to be released after the dma access completion. - */ - g2d_userptr->out_of_list = true; - g2d_userptr->in_pool = false; - list_del_init(&g2d_userptr->list); - - break; - } - } - - g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL); - if (!g2d_userptr) { - DRM_ERROR("failed to allocate g2d_userptr.\n"); - return ERR_PTR(-ENOMEM); - } - - atomic_set(&g2d_userptr->refcount, 1); - - start = userptr & PAGE_MASK; - offset = userptr & ~PAGE_MASK; - end = PAGE_ALIGN(userptr + size); - npages = (end - start) >> PAGE_SHIFT; - g2d_userptr->npages = npages; - - pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL); - if (!pages) { - DRM_ERROR("failed to allocate pages.\n"); - kfree(g2d_userptr); - return ERR_PTR(-ENOMEM); - } - - vma = find_vma(current->mm, userptr); - if (!vma) { - DRM_ERROR("failed to get vm region.\n"); - ret = -EFAULT; - goto err_free_pages; - } - - if (vma->vm_end < userptr + size) { - DRM_ERROR("vma is too small.\n"); - ret = -EFAULT; - goto err_free_pages; - } - - g2d_userptr->vma = exynos_gem_get_vma(vma); - if (!g2d_userptr->vma) { - DRM_ERROR("failed to copy vma.\n"); - ret = -ENOMEM; - goto err_free_pages; - } - - g2d_userptr->size = size; - - ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK, - npages, pages, vma); - if (ret < 0) { - DRM_ERROR("failed to get user pages from userptr.\n"); - goto err_put_vma; - } - - g2d_userptr->pages = pages; - - sgt = kzalloc(sizeof *sgt, GFP_KERNEL); - if (!sgt) { - DRM_ERROR("failed to allocate sg table.\n"); - ret = -ENOMEM; - goto err_free_userptr; - } - - ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, - size, GFP_KERNEL); - if (ret < 0) { - DRM_ERROR("failed to get sgt from pages.\n"); - goto err_free_sgt; - } - - g2d_userptr->sgt = sgt; - - ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt, - DMA_BIDIRECTIONAL); - if (ret < 0) { - DRM_ERROR("failed to map sgt with dma region.\n"); - goto err_free_sgt; - } - - g2d_userptr->dma_addr = sgt->sgl[0].dma_address; - g2d_userptr->userptr = userptr; - - list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list); - - if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) { - g2d->current_pool += npages << PAGE_SHIFT; - g2d_userptr->in_pool = true; - } - - *obj = (unsigned long)g2d_userptr; - - return &g2d_userptr->dma_addr; - -err_free_sgt: - sg_free_table(sgt); - kfree(sgt); - sgt = NULL; - -err_free_userptr: - exynos_gem_put_pages_to_userptr(g2d_userptr->pages, - g2d_userptr->npages, - g2d_userptr->vma); - -err_put_vma: - exynos_gem_put_vma(g2d_userptr->vma); - -err_free_pages: - kfree(pages); - kfree(g2d_userptr); - pages = NULL; - g2d_userptr = NULL; - - return ERR_PTR(ret); -} - -static void g2d_userptr_free_all(struct drm_device *drm_dev, - struct g2d_data *g2d, - struct drm_file *filp) +static int g2d_get_cmdlist_gem(struct drm_device *drm_dev, + struct drm_file *file, + struct g2d_cmdlist_node *node) { - struct drm_exynos_file_private *file_priv = filp->driver_priv; + struct drm_exynos_file_private *file_priv = file->driver_priv; struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; - struct g2d_cmdlist_userptr *g2d_userptr, *n; - - list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list) - if (g2d_userptr->in_pool) - g2d_userptr_put_dma_addr(drm_dev, - (unsigned long)g2d_userptr, - true); - - g2d->current_pool = 0; -} - -static int g2d_map_cmdlist_gem(struct g2d_data *g2d, - struct g2d_cmdlist_node *node, - struct drm_device *drm_dev, - struct drm_file *file) -{ struct g2d_cmdlist *cmdlist = node->cmdlist; + dma_addr_t *addr; int offset; int i; - for (i = 0; i < node->map_nr; i++) { - unsigned long handle; - dma_addr_t *addr; + for (i = 0; i < node->gem_nr; i++) { + struct g2d_gem_node *gem_node; + + gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL); + if (!gem_node) { + dev_err(g2d_priv->dev, "failed to allocate gem node\n"); + return -ENOMEM; + } offset = cmdlist->last - (i * 2 + 1); - handle = cmdlist->data[offset]; - - if (node->obj_type[i] == BUF_TYPE_GEM) { - addr = exynos_drm_gem_get_dma_addr(drm_dev, handle, - file); - if (IS_ERR(addr)) { - node->map_nr = i; - return -EFAULT; - } - } else { - struct drm_exynos_g2d_userptr g2d_userptr; - - if (copy_from_user(&g2d_userptr, (void __user *)handle, - sizeof(struct drm_exynos_g2d_userptr))) { - node->map_nr = i; - return -EFAULT; - } - - addr = g2d_userptr_get_dma_addr(drm_dev, - g2d_userptr.userptr, - g2d_userptr.size, - file, - &handle); - if (IS_ERR(addr)) { - node->map_nr = i; - return -EFAULT; - } + gem_node->handle = cmdlist->data[offset]; + + addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle, + file); + if (IS_ERR(addr)) { + node->gem_nr = i; + kfree(gem_node); + return PTR_ERR(addr); } cmdlist->data[offset] = *addr; - node->handles[i] = handle; + list_add_tail(&gem_node->list, &g2d_priv->gem_list); + g2d_priv->gem_nr++; } return 0; } -static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, - struct g2d_cmdlist_node *node, - struct drm_file *filp) +static void g2d_put_cmdlist_gem(struct drm_device *drm_dev, + struct drm_file *file, + unsigned int nr) { - struct exynos_drm_subdrv *subdrv = &g2d->subdrv; - int i; - - for (i = 0; i < node->map_nr; i++) { - unsigned long handle = node->handles[i]; + struct drm_exynos_file_private *file_priv = file->driver_priv; + struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; + struct g2d_gem_node *node, *n; - if (node->obj_type[i] == BUF_TYPE_GEM) - exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, - filp); - else - g2d_userptr_put_dma_addr(subdrv->drm_dev, handle, - false); + list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) { + if (!nr) + break; - node->handles[i] = 0; + exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file); + list_del_init(&node->list); + kfree(node); + nr--; } - - node->map_nr = 0; } static void g2d_dma_start(struct g2d_data *g2d, @@ -612,18 +337,10 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d) static void g2d_free_runqueue_node(struct g2d_data *g2d, struct g2d_runqueue_node *runqueue_node) { - struct g2d_cmdlist_node *node; - if (!runqueue_node) return; mutex_lock(&g2d->cmdlist_mutex); - /* - * commands in run_cmdlist have been completed so unmap all gem - * objects in each command node so that they are unreferenced. - */ - list_for_each_entry(node, &runqueue_node->run_cmdlist, list) - g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp); list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist); mutex_unlock(&g2d->cmdlist_mutex); @@ -713,28 +430,15 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } -static int g2d_check_reg_offset(struct device *dev, - struct g2d_cmdlist_node *node, +static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist, int nr, bool for_addr) { - struct g2d_cmdlist *cmdlist = node->cmdlist; int reg_offset; int index; int i; for (i = 0; i < nr; i++) { index = cmdlist->last - 2 * (i + 1); - - if (for_addr) { - /* check userptr buffer type. */ - reg_offset = (cmdlist->data[index] & - ~0x7fffffff) >> 31; - if (reg_offset) { - node->obj_type[i] = BUF_TYPE_USERPTR; - cmdlist->data[index] &= ~G2D_BUF_USERPTR; - } - } - reg_offset = cmdlist->data[index] & ~0xfffff000; if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) @@ -751,9 +455,6 @@ static int g2d_check_reg_offset(struct device *dev, case G2D_MSK_BASE_ADDR: if (!for_addr) goto err; - - if (node->obj_type[i] != BUF_TYPE_USERPTR) - node->obj_type[i] = BUF_TYPE_GEM; break; default: if (for_addr) @@ -765,7 +466,7 @@ static int g2d_check_reg_offset(struct device *dev, return 0; err: - dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]); + dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]); return -EINVAL; } @@ -865,7 +566,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, } /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ - size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2; + size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2; if (size > G2D_CMDLIST_DATA_NUM) { dev_err(dev, "cmdlist size is too big\n"); ret = -EINVAL; @@ -882,29 +583,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, } cmdlist->last += req->cmd_nr * 2; - ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false); + ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false); if (ret < 0) goto err_free_event; - node->map_nr = req->cmd_buf_nr; - if (req->cmd_buf_nr) { - struct drm_exynos_g2d_cmd *cmd_buf; + node->gem_nr = req->cmd_gem_nr; + if (req->cmd_gem_nr) { + struct drm_exynos_g2d_cmd *cmd_gem; - cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf; + cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem; if (copy_from_user(cmdlist->data + cmdlist->last, - (void __user *)cmd_buf, - sizeof(*cmd_buf) * req->cmd_buf_nr)) { + (void __user *)cmd_gem, + sizeof(*cmd_gem) * req->cmd_gem_nr)) { ret = -EFAULT; goto err_free_event; } - cmdlist->last += req->cmd_buf_nr * 2; + cmdlist->last += req->cmd_gem_nr * 2; - ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true); + ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true); if (ret < 0) goto err_free_event; - ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file); + ret = g2d_get_cmdlist_gem(drm_dev, file, node); if (ret < 0) goto err_unmap; } @@ -923,7 +624,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, return 0; err_unmap: - g2d_unmap_cmdlist_gem(g2d, node, file); + g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr); err_free_event: if (node->event) { spin_lock_irqsave(&drm_dev->event_lock, flags); @@ -979,7 +680,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, mutex_lock(&g2d->runqueue_mutex); runqueue_node->pid = current->pid; - runqueue_node->filp = file; list_add_tail(&runqueue_node->list, &g2d->runqueue); if (!g2d->runqueue_node) g2d_exec_runqueue(g2d); @@ -996,43 +696,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, } EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); -static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) -{ - struct g2d_data *g2d; - int ret; - - g2d = dev_get_drvdata(dev); - if (!g2d) - return -EFAULT; - - /* allocate dma-aware cmdlist buffer. */ - ret = g2d_init_cmdlist(g2d); - if (ret < 0) { - dev_err(dev, "cmdlist init failed\n"); - return ret; - } - - if (!is_drm_iommu_supported(drm_dev)) - return 0; - - ret = drm_iommu_attach_device(drm_dev, dev); - if (ret < 0) { - dev_err(dev, "failed to enable iommu.\n"); - g2d_fini_cmdlist(g2d); - } - - return ret; - -} - -static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev) -{ - if (!is_drm_iommu_supported(drm_dev)) - return; - - drm_iommu_detach_device(drm_dev, dev); -} - static int g2d_open(struct drm_device *drm_dev, struct device *dev, struct drm_file *file) { @@ -1050,7 +713,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev, INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); INIT_LIST_HEAD(&g2d_priv->event_list); - INIT_LIST_HEAD(&g2d_priv->userptr_list); + INIT_LIST_HEAD(&g2d_priv->gem_list); return 0; } @@ -1071,21 +734,11 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev, return; mutex_lock(&g2d->cmdlist_mutex); - list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) { - /* - * unmap all gem objects not completed. - * - * P.S. if current process was terminated forcely then - * there may be some commands in inuse_cmdlist so unmap - * them. - */ - g2d_unmap_cmdlist_gem(g2d, node, file); + list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) list_move_tail(&node->list, &g2d->free_cmdlist); - } mutex_unlock(&g2d->cmdlist_mutex); - /* release all g2d_userptr in pool. */ - g2d_userptr_free_all(drm_dev, g2d, file); + g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr); kfree(file_priv->g2d_priv); } @@ -1125,11 +778,15 @@ static int __devinit g2d_probe(struct platform_device *pdev) mutex_init(&g2d->cmdlist_mutex); mutex_init(&g2d->runqueue_mutex); - g2d->gate_clk = devm_clk_get(dev, "fimg2d"); + ret = g2d_init_cmdlist(g2d); + if (ret < 0) + goto err_destroy_workqueue; + + g2d->gate_clk = clk_get(dev, "fimg2d"); if (IS_ERR(g2d->gate_clk)) { dev_err(dev, "failed to get gate clock\n"); ret = PTR_ERR(g2d->gate_clk); - goto err_destroy_workqueue; + goto err_fini_cmdlist; } pm_runtime_enable(dev); @@ -1157,14 +814,10 @@ static int __devinit g2d_probe(struct platform_device *pdev) goto err_put_clk; } - g2d->max_pool = MAX_POOL; - platform_set_drvdata(pdev, g2d); subdrv = &g2d->subdrv; subdrv->dev = dev; - subdrv->probe = g2d_subdrv_probe; - subdrv->remove = g2d_subdrv_remove; subdrv->open = g2d_open; subdrv->close = g2d_close; @@ -1181,6 +834,9 @@ static int __devinit g2d_probe(struct platform_device *pdev) err_put_clk: pm_runtime_disable(dev); + clk_put(g2d->gate_clk); +err_fini_cmdlist: + g2d_fini_cmdlist(g2d); err_destroy_workqueue: destroy_workqueue(g2d->g2d_workq); err_destroy_slab: @@ -1201,6 +857,7 @@ static int __devexit g2d_remove(struct platform_device *pdev) } pm_runtime_disable(&pdev->dev); + clk_put(g2d->gate_clk); g2d_fini_cmdlist(g2d); destroy_workqueue(g2d->g2d_workq); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c index 99227246ce82..d2545560664f 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -83,40 +83,157 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj, static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) { - /* TODO */ - + if (!IS_NONCONTIG_BUFFER(flags)) { + if (size >= SZ_1M) + return roundup(size, SECTION_SIZE); + else if (size >= SZ_64K) + return roundup(size, SZ_64K); + else + goto out; + } +out: return roundup(size, PAGE_SIZE); } -static int exynos_drm_gem_map_buf(struct drm_gem_object *obj, +struct page **exynos_gem_get_pages(struct drm_gem_object *obj, + gfp_t gfpmask) +{ + struct page *p, **pages; + int i, npages; + + npages = obj->size >> PAGE_SHIFT; + + pages = drm_malloc_ab(npages, sizeof(struct page *)); + if (pages == NULL) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < npages; i++) { + p = alloc_page(gfpmask); + if (IS_ERR(p)) + goto fail; + pages[i] = p; + } + + return pages; + +fail: + while (--i) + __free_page(pages[i]); + + drm_free_large(pages); + return ERR_CAST(p); +} + +static void exynos_gem_put_pages(struct drm_gem_object *obj, + struct page **pages) +{ + int npages; + + npages = obj->size >> PAGE_SHIFT; + + while (--npages >= 0) + __free_page(pages[npages]); + + drm_free_large(pages); +} + +static int exynos_drm_gem_map_pages(struct drm_gem_object *obj, struct vm_area_struct *vma, unsigned long f_vaddr, pgoff_t page_offset) { struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; - struct scatterlist *sgl; unsigned long pfn; - int i; - if (!buf->sgt) - return -EINTR; + if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { + if (!buf->pages) + return -EINTR; + + pfn = page_to_pfn(buf->pages[page_offset++]); + } else + pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset; + + return vm_insert_mixed(vma, f_vaddr, pfn); +} - if (page_offset >= (buf->size >> PAGE_SHIFT)) { - DRM_ERROR("invalid page offset\n"); +static int exynos_drm_gem_get_pages(struct drm_gem_object *obj) +{ + struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); + struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; + struct scatterlist *sgl; + struct page **pages; + unsigned int npages, i = 0; + int ret; + + if (buf->pages) { + DRM_DEBUG_KMS("already allocated.\n"); return -EINVAL; } + pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE); + if (IS_ERR(pages)) { + DRM_ERROR("failed to get pages.\n"); + return PTR_ERR(pages); + } + + npages = obj->size >> PAGE_SHIFT; + buf->page_size = PAGE_SIZE; + + buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!buf->sgt) { + DRM_ERROR("failed to allocate sg table.\n"); + ret = -ENOMEM; + goto err; + } + + ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); + if (ret < 0) { + DRM_ERROR("failed to initialize sg table.\n"); + ret = -EFAULT; + goto err1; + } + sgl = buf->sgt->sgl; - for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) { - if (page_offset < (sgl->length >> PAGE_SHIFT)) - break; - page_offset -= (sgl->length >> PAGE_SHIFT); + + /* set all pages to sg list. */ + while (i < npages) { + sg_set_page(sgl, pages[i], PAGE_SIZE, 0); + sg_dma_address(sgl) = page_to_phys(pages[i]); + i++; + sgl = sg_next(sgl); } - pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset; + /* add some codes for UNCACHED type here. TODO */ - return vm_insert_mixed(vma, f_vaddr, pfn); + buf->pages = pages; + return ret; +err1: + kfree(buf->sgt); + buf->sgt = NULL; +err: + exynos_gem_put_pages(obj, pages); + return ret; + +} + +static void exynos_drm_gem_put_pages(struct drm_gem_object *obj) +{ + struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); + struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; + + /* + * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages + * allocated at gem fault handler. + */ + sg_free_table(buf->sgt); + kfree(buf->sgt); + buf->sgt = NULL; + + exynos_gem_put_pages(obj, buf->pages); + buf->pages = NULL; + + /* add some codes for UNCACHED type here. TODO */ } static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, @@ -153,6 +270,9 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); + if (!buf->pages) + return; + /* * do not release memory region from exporter. * @@ -162,7 +282,10 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) if (obj->import_attach) goto out; - exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf); + if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) + exynos_drm_gem_put_pages(obj); + else + exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf); out: exynos_drm_fini_buf(obj->dev, buf); @@ -241,10 +364,22 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, /* set memory type and cache attribute from user side. */ exynos_gem_obj->flags = flags; - ret = exynos_drm_alloc_buf(dev, buf, flags); - if (ret < 0) { - drm_gem_object_release(&exynos_gem_obj->base); - goto err_fini_buf; + /* + * allocate all pages as desired size if user wants to allocate + * physically non-continuous memory. + */ + if (flags & EXYNOS_BO_NONCONTIG) { + ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base); + if (ret < 0) { + drm_gem_object_release(&exynos_gem_obj->base); + goto err_fini_buf; + } + } else { + ret = exynos_drm_alloc_buf(dev, buf, flags); + if (ret < 0) { + drm_gem_object_release(&exynos_gem_obj->base); + goto err_fini_buf; + } } return exynos_gem_obj; @@ -277,14 +412,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, return 0; } -dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, +void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, unsigned int gem_handle, - struct drm_file *filp) + struct drm_file *file_priv) { struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_gem_object *obj; - obj = drm_gem_object_lookup(dev, filp, gem_handle); + obj = drm_gem_object_lookup(dev, file_priv, gem_handle); if (!obj) { DRM_ERROR("failed to lookup gem object.\n"); return ERR_PTR(-EINVAL); @@ -292,17 +427,25 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, exynos_gem_obj = to_exynos_gem_obj(obj); + if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { + DRM_DEBUG_KMS("not support NONCONTIG type.\n"); + drm_gem_object_unreference_unlocked(obj); + + /* TODO */ + return ERR_PTR(-EINVAL); + } + return &exynos_gem_obj->buffer->dma_addr; } void exynos_drm_gem_put_dma_addr(struct drm_device *dev, unsigned int gem_handle, - struct drm_file *filp) + struct drm_file *file_priv) { struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_gem_object *obj; - obj = drm_gem_object_lookup(dev, filp, gem_handle); + obj = drm_gem_object_lookup(dev, file_priv, gem_handle); if (!obj) { DRM_ERROR("failed to lookup gem object.\n"); return; @@ -310,6 +453,14 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, exynos_gem_obj = to_exynos_gem_obj(obj); + if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { + DRM_DEBUG_KMS("not support NONCONTIG type.\n"); + drm_gem_object_unreference_unlocked(obj); + + /* TODO */ + return; + } + drm_gem_object_unreference_unlocked(obj); /* @@ -338,57 +489,22 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, &args->offset); } -static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev, - struct file *filp) -{ - struct drm_file *file_priv; - - mutex_lock(&drm_dev->struct_mutex); - - /* find current process's drm_file from filelist. */ - list_for_each_entry(file_priv, &drm_dev->filelist, lhead) { - if (file_priv->filp == filp) { - mutex_unlock(&drm_dev->struct_mutex); - return file_priv; - } - } - - mutex_unlock(&drm_dev->struct_mutex); - WARN_ON(1); - - return ERR_PTR(-EFAULT); -} - static int exynos_drm_gem_mmap_buffer(struct file *filp, struct vm_area_struct *vma) { struct drm_gem_object *obj = filp->private_data; struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); - struct drm_device *drm_dev = obj->dev; struct exynos_drm_gem_buf *buffer; - struct drm_file *file_priv; - unsigned long vm_size; + unsigned long pfn, vm_size, usize, uaddr = vma->vm_start; int ret; DRM_DEBUG_KMS("%s\n", __FILE__); vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - vma->vm_private_data = obj; - vma->vm_ops = drm_dev->driver->gem_vm_ops; - - /* restore it to driver's fops. */ - filp->f_op = fops_get(drm_dev->driver->fops); - - file_priv = exynos_drm_find_drm_file(drm_dev, filp); - if (IS_ERR(file_priv)) - return PTR_ERR(file_priv); - - /* restore it to drm_file. */ - filp->private_data = file_priv; update_vm_cache_attr(exynos_gem_obj, vma); - vm_size = vma->vm_end - vma->vm_start; + vm_size = usize = vma->vm_end - vma->vm_start; /* * a buffer contains information to physically continuous memory @@ -400,23 +516,40 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, if (vm_size > buffer->size) return -EINVAL; - ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->kvaddr, - buffer->dma_addr, buffer->size, - &buffer->dma_attrs); - if (ret < 0) { - DRM_ERROR("failed to mmap.\n"); - return ret; - } + if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { + int i = 0; - /* - * take a reference to this mapping of the object. And this reference - * is unreferenced by the corresponding vm_close call. - */ - drm_gem_object_reference(obj); + if (!buffer->pages) + return -EINVAL; + + vma->vm_flags |= VM_MIXEDMAP; - mutex_lock(&drm_dev->struct_mutex); - drm_vm_open_locked(drm_dev, vma); - mutex_unlock(&drm_dev->struct_mutex); + do { + ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); + if (ret) { + DRM_ERROR("failed to remap user space.\n"); + return ret; + } + + uaddr += PAGE_SIZE; + usize -= PAGE_SIZE; + } while (usize > 0); + } else { + /* + * get page frame number to physical memory to be mapped + * to user space. + */ + pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> + PAGE_SHIFT; + + DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); + + if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size, + vma->vm_page_prot)) { + DRM_ERROR("failed to remap pfn range.\n"); + return -EAGAIN; + } + } return 0; } @@ -445,29 +578,16 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - /* - * Set specific mmper's fops. And it will be restored by - * exynos_drm_gem_mmap_buffer to dev->driver->fops. - * This is used to call specific mapper temporarily. - */ - file_priv->filp->f_op = &exynos_drm_gem_fops; + obj->filp->f_op = &exynos_drm_gem_fops; + obj->filp->private_data = obj; - /* - * Set gem object to private_data so that specific mmaper - * can get the gem object. And it will be restored by - * exynos_drm_gem_mmap_buffer to drm_file. - */ - file_priv->filp->private_data = obj; - - addr = vm_mmap(file_priv->filp, 0, args->size, + addr = vm_mmap(obj->filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, 0); drm_gem_object_unreference_unlocked(obj); - if (IS_ERR((void *)addr)) { - file_priv->filp->private_data = file_priv; + if (IS_ERR((void *)addr)) return PTR_ERR((void *)addr); - } args->mapped = addr; @@ -502,129 +622,6 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, return 0; } -struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma) -{ - struct vm_area_struct *vma_copy; - - vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL); - if (!vma_copy) - return NULL; - - if (vma->vm_ops && vma->vm_ops->open) - vma->vm_ops->open(vma); - - if (vma->vm_file) - get_file(vma->vm_file); - - memcpy(vma_copy, vma, sizeof(*vma)); - - vma_copy->vm_mm = NULL; - vma_copy->vm_next = NULL; - vma_copy->vm_prev = NULL; - - return vma_copy; -} - -void exynos_gem_put_vma(struct vm_area_struct *vma) -{ - if (!vma) - return; - - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); - - if (vma->vm_file) - fput(vma->vm_file); - - kfree(vma); -} - -int exynos_gem_get_pages_from_userptr(unsigned long start, - unsigned int npages, - struct page **pages, - struct vm_area_struct *vma) -{ - int get_npages; - - /* the memory region mmaped with VM_PFNMAP. */ - if (vma_is_io(vma)) { - unsigned int i; - - for (i = 0; i < npages; ++i, start += PAGE_SIZE) { - unsigned long pfn; - int ret = follow_pfn(vma, start, &pfn); - if (ret) - return ret; - - pages[i] = pfn_to_page(pfn); - } - - if (i != npages) { - DRM_ERROR("failed to get user_pages.\n"); - return -EINVAL; - } - - return 0; - } - - get_npages = get_user_pages(current, current->mm, start, - npages, 1, 1, pages, NULL); - get_npages = max(get_npages, 0); - if (get_npages != npages) { - DRM_ERROR("failed to get user_pages.\n"); - while (get_npages) - put_page(pages[--get_npages]); - return -EFAULT; - } - - return 0; -} - -void exynos_gem_put_pages_to_userptr(struct page **pages, - unsigned int npages, - struct vm_area_struct *vma) -{ - if (!vma_is_io(vma)) { - unsigned int i; - - for (i = 0; i < npages; i++) { - set_page_dirty_lock(pages[i]); - - /* - * undo the reference we took when populating - * the table. - */ - put_page(pages[i]); - } - } -} - -int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, - struct sg_table *sgt, - enum dma_data_direction dir) -{ - int nents; - - mutex_lock(&drm_dev->struct_mutex); - - nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); - if (!nents) { - DRM_ERROR("failed to map sgl with dma.\n"); - mutex_unlock(&drm_dev->struct_mutex); - return nents; - } - - mutex_unlock(&drm_dev->struct_mutex); - return 0; -} - -void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev, - struct sg_table *sgt, - enum dma_data_direction dir) -{ - dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); -} - int exynos_drm_gem_init_object(struct drm_gem_object *obj) { DRM_DEBUG_KMS("%s\n", __FILE__); @@ -756,9 +753,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) mutex_lock(&dev->struct_mutex); - ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset); + ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); if (ret < 0) - DRM_ERROR("failed to map a buffer with user.\n"); + DRM_ERROR("failed to map pages.\n"); mutex_unlock(&dev->struct_mutex); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.h index d3ea106a9a77..085b2a5d5f70 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -35,25 +35,21 @@ * exynos drm gem buffer structure. * * @kvaddr: kernel virtual address to allocated memory region. - * *userptr: user space address. * @dma_addr: bus address(accessed by dma) to allocated memory region. * - this address could be physical address without IOMMU and * device address with IOMMU. - * @write: whether pages will be written to by the caller. * @sgt: sg table to transfer page data. + * @pages: contain all pages to allocated memory region. + * @page_size: could be 4K, 64K or 1MB. * @size: size of allocated memory region. - * @pfnmap: indicate whether memory region from userptr is mmaped with - * VM_PFNMAP or not. */ struct exynos_drm_gem_buf { void __iomem *kvaddr; - unsigned long userptr; dma_addr_t dma_addr; - struct dma_attrs dma_attrs; - unsigned int write; struct sg_table *sgt; + struct page **pages; + unsigned long page_size; unsigned long size; - bool pfnmap; }; /* @@ -69,7 +65,6 @@ struct exynos_drm_gem_buf { * or at framebuffer creation. * @size: size requested from user, in bytes and this size is aligned * in page unit. - * @vma: a pointer to vm_area. * @flags: indicate memory type to allocated buffer and cache attruibute. * * P.S. this object would be transfered to user as kms_bo.handle so @@ -79,7 +74,6 @@ struct exynos_drm_gem_obj { struct drm_gem_object base; struct exynos_drm_gem_buf *buffer; unsigned long size; - struct vm_area_struct *vma; unsigned int flags; }; @@ -110,9 +104,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, * other drivers such as 2d/3d acceleration drivers. * with this function call, gem object reference count would be increased. */ -dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, +void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, unsigned int gem_handle, - struct drm_file *filp); + struct drm_file *file_priv); /* * put dma address from gem handle and this function could be used for @@ -121,7 +115,7 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, */ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, unsigned int gem_handle, - struct drm_file *filp); + struct drm_file *file_priv); /* get buffer offset to map to user space. */ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, @@ -134,10 +128,6 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -/* map user space allocated by malloc to pages. */ -int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); - /* get buffer information to memory region allocated by gem. */ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -173,36 +163,4 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); /* set vm_flags and we can change the vm attribute to other one at here. */ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); -static inline int vma_is_io(struct vm_area_struct *vma) -{ - return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); -} - -/* get a copy of a virtual memory region. */ -struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma); - -/* release a userspace virtual memory area. */ -void exynos_gem_put_vma(struct vm_area_struct *vma); - -/* get pages from user space. */ -int exynos_gem_get_pages_from_userptr(unsigned long start, - unsigned int npages, - struct page **pages, - struct vm_area_struct *vma); - -/* drop the reference to pages. */ -void exynos_gem_put_pages_to_userptr(struct page **pages, - unsigned int npages, - struct vm_area_struct *vma); - -/* map sgt with dma region. */ -int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, - struct sg_table *sgt, - enum dma_data_direction dir); - -/* unmap sgt from dma region. */ -void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev, - struct sg_table *sgt, - enum dma_data_direction dir); - #endif diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index 2d11e70b601a..c3b9e2b45185 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c @@ -346,23 +346,9 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev, ctx->hdmi_ctx->drm_dev = drm_dev; ctx->mixer_ctx->drm_dev = drm_dev; - if (mixer_ops->iommu_on) - mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true); - return 0; } -static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev) -{ - struct drm_hdmi_context *ctx; - struct exynos_drm_subdrv *subdrv = to_subdrv(dev); - - ctx = get_ctx_from_subdrv(subdrv); - - if (mixer_ops->iommu_on) - mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false); -} - static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -382,7 +368,6 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) subdrv->dev = dev; subdrv->manager = &hdmi_manager; subdrv->probe = hdmi_subdrv_probe; - subdrv->remove = hdmi_subdrv_remove; platform_set_drvdata(pdev, subdrv); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.h index 54b522353e48..2da5ffd3a059 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.h +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.h @@ -62,7 +62,6 @@ struct exynos_hdmi_ops { struct exynos_mixer_ops { /* manager */ - int (*iommu_on)(void *ctx, bool enable); int (*enable_vblank)(void *ctx, int pipe); void (*disable_vblank)(void *ctx); void (*dpms)(void *ctx, int mode); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.c deleted file mode 100644 index 09db1983eb1a..000000000000 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.c +++ /dev/null @@ -1,150 +0,0 @@ -/* exynos_drm_iommu.c - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd. - * Author: Inki Dae - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include - -#include -#include -#include - -#include - -#include "exynos_drm_drv.h" -#include "exynos_drm_iommu.h" - -/* - * drm_create_iommu_mapping - create a mapping structure - * - * @drm_dev: DRM device - */ -int drm_create_iommu_mapping(struct drm_device *drm_dev) -{ - struct dma_iommu_mapping *mapping = NULL; - struct exynos_drm_private *priv = drm_dev->dev_private; - struct device *dev = drm_dev->dev; - - if (!priv->da_start) - priv->da_start = EXYNOS_DEV_ADDR_START; - if (!priv->da_space_size) - priv->da_space_size = EXYNOS_DEV_ADDR_SIZE; - if (!priv->da_space_order) - priv->da_space_order = EXYNOS_DEV_ADDR_ORDER; - - mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start, - priv->da_space_size, - priv->da_space_order); - if (!mapping) - return -ENOMEM; - - dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), - GFP_KERNEL); - dma_set_max_seg_size(dev, 0xffffffffu); - dev->archdata.mapping = mapping; - - return 0; -} - -/* - * drm_release_iommu_mapping - release iommu mapping structure - * - * @drm_dev: DRM device - * - * if mapping->kref becomes 0 then all things related to iommu mapping - * will be released - */ -void drm_release_iommu_mapping(struct drm_device *drm_dev) -{ - struct device *dev = drm_dev->dev; - - arm_iommu_release_mapping(dev->archdata.mapping); -} - -/* - * drm_iommu_attach_device- attach device to iommu mapping - * - * @drm_dev: DRM device - * @subdrv_dev: device to be attach - * - * This function should be called by sub drivers to attach it to iommu - * mapping. - */ -int drm_iommu_attach_device(struct drm_device *drm_dev, - struct device *subdrv_dev) -{ - struct device *dev = drm_dev->dev; - int ret; - - if (!dev->archdata.mapping) { - DRM_ERROR("iommu_mapping is null.\n"); - return -EFAULT; - } - - subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev, - sizeof(*subdrv_dev->dma_parms), - GFP_KERNEL); - dma_set_max_seg_size(subdrv_dev, 0xffffffffu); - - ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping); - if (ret < 0) { - DRM_DEBUG_KMS("failed iommu attach.\n"); - return ret; - } - - /* - * Set dma_ops to drm_device just one time. - * - * The dma mapping api needs device object and the api is used - * to allocate physial memory and map it with iommu table. - * If iommu attach succeeded, the sub driver would have dma_ops - * for iommu and also all sub drivers have same dma_ops. - */ - if (!dev->archdata.dma_ops) - dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops; - - return 0; -} - -/* - * drm_iommu_detach_device -detach device address space mapping from device - * - * @drm_dev: DRM device - * @subdrv_dev: device to be detached - * - * This function should be called by sub drivers to detach it from iommu - * mapping - */ -void drm_iommu_detach_device(struct drm_device *drm_dev, - struct device *subdrv_dev) -{ - struct device *dev = drm_dev->dev; - struct dma_iommu_mapping *mapping = dev->archdata.mapping; - - if (!mapping || !mapping->domain) - return; - - iommu_detach_device(mapping->domain, subdrv_dev); - drm_release_iommu_mapping(drm_dev); -} diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.h deleted file mode 100644 index 18a0ca190b98..000000000000 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.h +++ /dev/null @@ -1,85 +0,0 @@ -/* exynos_drm_iommu.h - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd. - * Authoer: Inki Dae - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef _EXYNOS_DRM_IOMMU_H_ -#define _EXYNOS_DRM_IOMMU_H_ - -#define EXYNOS_DEV_ADDR_START 0x20000000 -#define EXYNOS_DEV_ADDR_SIZE 0x40000000 -#define EXYNOS_DEV_ADDR_ORDER 0x4 - -#ifdef CONFIG_DRM_EXYNOS_IOMMU - -int drm_create_iommu_mapping(struct drm_device *drm_dev); - -void drm_release_iommu_mapping(struct drm_device *drm_dev); - -int drm_iommu_attach_device(struct drm_device *drm_dev, - struct device *subdrv_dev); - -void drm_iommu_detach_device(struct drm_device *dev_dev, - struct device *subdrv_dev); - -static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) -{ -#ifdef CONFIG_ARM_DMA_USE_IOMMU - struct device *dev = drm_dev->dev; - - return dev->archdata.mapping ? true : false; -#else - return false; -#endif -} - -#else - -struct dma_iommu_mapping; -static inline int drm_create_iommu_mapping(struct drm_device *drm_dev) -{ - return 0; -} - -static inline void drm_release_iommu_mapping(struct drm_device *drm_dev) -{ -} - -static inline int drm_iommu_attach_device(struct drm_device *drm_dev, - struct device *subdrv_dev) -{ - return 0; -} - -static inline void drm_iommu_detach_device(struct drm_device *drm_dev, - struct device *subdrv_dev) -{ -} - -static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) -{ - return false; -} - -#endif -#endif diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 4b0c16bfd1da..e4b8a8f741f7 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -382,6 +382,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc) struct drm_pending_vblank_event *e, *t; struct timeval now; unsigned long flags; + bool is_checked = false; spin_lock_irqsave(&drm_dev->event_lock, flags); @@ -391,6 +392,8 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc) if (crtc != e->pipe) continue; + is_checked = true; + do_gettimeofday(&now); e->event.sequence = 0; e->event.tv_sec = now.tv_sec; @@ -398,7 +401,22 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc) list_move_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); - drm_vblank_put(drm_dev, crtc); + } + + if (is_checked) { + /* + * call drm_vblank_put only in case that drm_vblank_get was + * called. + */ + if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0) + drm_vblank_put(drm_dev, crtc); + + /* + * don't off vblank if vblank_disable_allowed is 1, + * because vblank would be off by timer handler. + */ + if (!drm_dev->vblank_disable_allowed) + drm_vblank_off(drm_dev, crtc); } spin_unlock_irqrestore(&drm_dev->event_lock, flags); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c b/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c index bafb65389562..2c115f8a62a3 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -50,29 +50,6 @@ #define MAX_HEIGHT 1080 #define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) -/* AVI header and aspect ratio */ -#define HDMI_AVI_VERSION 0x02 -#define HDMI_AVI_LENGTH 0x0D -#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4) -#define AVI_SAME_AS_PIC_ASPECT_RATIO 8 - -/* AUI header info */ -#define HDMI_AUI_VERSION 0x01 -#define HDMI_AUI_LENGTH 0x0A - -/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */ -enum HDMI_PACKET_TYPE { - /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */ - /* InfoFrame packet type */ - HDMI_PACKET_TYPE_INFOFRAME = 0x80, - /* Vendor-Specific InfoFrame */ - HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1, - /* Auxiliary Video information InfoFrame */ - HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2, - /* Audio information InfoFrame */ - HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4 -}; - enum hdmi_type { HDMI_TYPE13, HDMI_TYPE14, @@ -97,7 +74,6 @@ struct hdmi_context { struct mutex hdmi_mutex; void __iomem *regs; - void *parent_ctx; int external_irq; int internal_irq; @@ -108,6 +84,7 @@ struct hdmi_context { int cur_conf; struct hdmi_resources res; + void *parent_ctx; int hpd_gpio; @@ -205,7 +182,6 @@ struct hdmi_v13_conf { int height; int vrefresh; bool interlace; - int cea_video_id; const u8 *hdmiphy_data; const struct hdmi_v13_preset_conf *conf; }; @@ -377,20 +353,15 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = { }; static const struct hdmi_v13_conf hdmi_v13_confs[] = { - { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25, - &hdmi_v13_conf_720p60 }, - { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25, - &hdmi_v13_conf_720p60 }, - { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027, - &hdmi_v13_conf_480p }, - { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25, - &hdmi_v13_conf_1080i50 }, - { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5, - &hdmi_v13_conf_1080p50 }, - { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25, - &hdmi_v13_conf_1080i60 }, - { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5, - &hdmi_v13_conf_1080p60 }, + { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, + { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, + { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p }, + { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 }, + { 1920, 1080, 50, false, hdmiphy_v13_conf148_5, + &hdmi_v13_conf_1080p50 }, + { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 }, + { 1920, 1080, 60, false, hdmiphy_v13_conf148_5, + &hdmi_v13_conf_1080p60 }, }; /* HDMI Version 1.4 */ @@ -508,7 +479,6 @@ struct hdmi_conf { int height; int vrefresh; bool interlace; - int cea_video_id; const u8 *hdmiphy_data; const struct hdmi_preset_conf *conf; }; @@ -964,21 +934,16 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = { }; static const struct hdmi_conf hdmi_confs[] = { - { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 }, - { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 }, - { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 }, - { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 }, - { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 }, - { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 }, - { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 }, - { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 }, + { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 }, + { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 }, + { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 }, + { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 }, + { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 }, + { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 }, + { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 }, + { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 }, }; -struct hdmi_infoframe { - enum HDMI_PACKET_TYPE type; - u8 ver; - u8 len; -}; static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) { @@ -1302,88 +1267,6 @@ static int hdmi_conf_index(struct hdmi_context *hdata, return hdmi_v14_conf_index(mode); } -static u8 hdmi_chksum(struct hdmi_context *hdata, - u32 start, u8 len, u32 hdr_sum) -{ - int i; - - /* hdr_sum : header0 + header1 + header2 - * start : start address of packet byte1 - * len : packet bytes - 1 */ - for (i = 0; i < len; ++i) - hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4); - - /* return 2's complement of 8 bit hdr_sum */ - return (u8)(~(hdr_sum & 0xff) + 1); -} - -static void hdmi_reg_infoframe(struct hdmi_context *hdata, - struct hdmi_infoframe *infoframe) -{ - u32 hdr_sum; - u8 chksum; - u32 aspect_ratio; - u32 mod; - u32 vic; - - DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); - - mod = hdmi_reg_read(hdata, HDMI_MODE_SEL); - if (hdata->dvi_mode) { - hdmi_reg_writeb(hdata, HDMI_VSI_CON, - HDMI_VSI_CON_DO_NOT_TRANSMIT); - hdmi_reg_writeb(hdata, HDMI_AVI_CON, - HDMI_AVI_CON_DO_NOT_TRANSMIT); - hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN); - return; - } - - switch (infoframe->type) { - case HDMI_PACKET_TYPE_AVI: - hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); - hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type); - hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver); - hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len); - hdr_sum = infoframe->type + infoframe->ver + infoframe->len; - - /* Output format zero hardcoded ,RGB YBCR selection */ - hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | - AVI_ACTIVE_FORMAT_VALID | - AVI_UNDERSCANNED_DISPLAY_VALID); - - aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9; - - hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio | - AVI_SAME_AS_PIC_ASPECT_RATIO); - - if (hdata->type == HDMI_TYPE13) - vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id; - else - vic = hdmi_confs[hdata->cur_conf].cea_video_id; - - hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); - - chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), - infoframe->len, hdr_sum); - DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); - hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); - break; - case HDMI_PACKET_TYPE_AUI: - hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); - hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type); - hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver); - hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len); - hdr_sum = infoframe->type + infoframe->ver + infoframe->len; - chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), - infoframe->len, hdr_sum); - DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); - hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); - break; - default: - break; - } -} - static bool hdmi_is_connected(void *ctx) { struct hdmi_context *hdata = ctx; @@ -1410,7 +1293,6 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector, DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), raw_edid->width_cm, raw_edid->height_cm); - kfree(raw_edid); } else { return -ENODEV; } @@ -1659,8 +1541,6 @@ static void hdmi_conf_reset(struct hdmi_context *hdata) static void hdmi_conf_init(struct hdmi_context *hdata) { - struct hdmi_infoframe infoframe; - /* disable HPD interrupts */ hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); @@ -1695,17 +1575,9 @@ static void hdmi_conf_init(struct hdmi_context *hdata) hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); } else { - infoframe.type = HDMI_PACKET_TYPE_AVI; - infoframe.ver = HDMI_AVI_VERSION; - infoframe.len = HDMI_AVI_LENGTH; - hdmi_reg_infoframe(hdata, &infoframe); - - infoframe.type = HDMI_PACKET_TYPE_AUI; - infoframe.ver = HDMI_AUI_VERSION; - infoframe.len = HDMI_AUI_LENGTH; - hdmi_reg_infoframe(hdata, &infoframe); - /* enable AVI packet every vsync, fixes purple line problem */ + hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02); + hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5); hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5); } } @@ -2106,18 +1978,9 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector, index = hdmi_v14_conf_index(m); if (index >= 0) { - struct drm_mode_object base; - struct list_head head; - DRM_INFO("desired mode doesn't exist so\n"); DRM_INFO("use the most suitable mode among modes.\n"); - - /* preserve display mode header while copying. */ - head = adjusted_mode->head; - base = adjusted_mode->base; memcpy(adjusted_mode, m, sizeof(*m)); - adjusted_mode->head = head; - adjusted_mode->base = base; break; } } @@ -2303,27 +2166,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata) memset(res, 0, sizeof(*res)); /* get clocks, power */ - res->hdmi = devm_clk_get(dev, "hdmi"); + res->hdmi = clk_get(dev, "hdmi"); if (IS_ERR_OR_NULL(res->hdmi)) { DRM_ERROR("failed to get clock 'hdmi'\n"); goto fail; } - res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); + res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); if (IS_ERR_OR_NULL(res->sclk_hdmi)) { DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); goto fail; } - res->sclk_pixel = devm_clk_get(dev, "sclk_pixel"); + res->sclk_pixel = clk_get(dev, "sclk_pixel"); if (IS_ERR_OR_NULL(res->sclk_pixel)) { DRM_ERROR("failed to get clock 'sclk_pixel'\n"); goto fail; } - res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy"); + res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy"); if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) { DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); goto fail; } - res->hdmiphy = devm_clk_get(dev, "hdmiphy"); + res->hdmiphy = clk_get(dev, "hdmiphy"); if (IS_ERR_OR_NULL(res->hdmiphy)) { DRM_ERROR("failed to get clock 'hdmiphy'\n"); goto fail; @@ -2331,7 +2194,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata) clk_set_parent(res->sclk_hdmi, res->sclk_pixel); - res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) * + res->regul_bulk = kzalloc(ARRAY_SIZE(supply) * sizeof(res->regul_bulk[0]), GFP_KERNEL); if (!res->regul_bulk) { DRM_ERROR("failed to get memory for regulators\n"); @@ -2341,7 +2204,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata) res->regul_bulk[i].supply = supply[i]; res->regul_bulk[i].consumer = NULL; } - ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); + ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); if (ret) { DRM_ERROR("failed to get regulators\n"); goto fail; @@ -2354,6 +2217,28 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata) return -ENODEV; } +static int hdmi_resources_cleanup(struct hdmi_context *hdata) +{ + struct hdmi_resources *res = &hdata->res; + + regulator_bulk_free(res->regul_count, res->regul_bulk); + /* kfree is NULL-safe */ + kfree(res->regul_bulk); + if (!IS_ERR_OR_NULL(res->hdmiphy)) + clk_put(res->hdmiphy); + if (!IS_ERR_OR_NULL(res->sclk_hdmiphy)) + clk_put(res->sclk_hdmiphy); + if (!IS_ERR_OR_NULL(res->sclk_pixel)) + clk_put(res->sclk_pixel); + if (!IS_ERR_OR_NULL(res->sclk_hdmi)) + clk_put(res->sclk_hdmi); + if (!IS_ERR_OR_NULL(res->hdmi)) + clk_put(res->hdmi); + memset(res, 0, sizeof(*res)); + + return 0; +} + static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy; void hdmi_attach_ddc_client(struct i2c_client *ddc) @@ -2493,32 +2378,36 @@ static int __devinit hdmi_probe(struct platform_device *pdev) ret = hdmi_resources_init(hdata); if (ret) { + ret = -EINVAL; DRM_ERROR("hdmi_resources_init failed\n"); - return -EINVAL; + goto err_data; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { DRM_ERROR("failed to find registers\n"); - return -ENOENT; + ret = -ENOENT; + goto err_resource; } hdata->regs = devm_request_and_ioremap(&pdev->dev, res); if (!hdata->regs) { DRM_ERROR("failed to map registers\n"); - return -ENXIO; + ret = -ENXIO; + goto err_resource; } - ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD"); + ret = gpio_request(hdata->hpd_gpio, "HPD"); if (ret) { DRM_ERROR("failed to request HPD gpio\n"); - return ret; + goto err_resource; } /* DDC i2c driver */ if (i2c_add_driver(&ddc_driver)) { DRM_ERROR("failed to register ddc i2c driver\n"); - return -ENOENT; + ret = -ENOENT; + goto err_gpio; } hdata->ddc_port = hdmi_ddc; @@ -2581,6 +2470,11 @@ static int __devinit hdmi_probe(struct platform_device *pdev) i2c_del_driver(&hdmiphy_driver); err_ddc: i2c_del_driver(&ddc_driver); +err_gpio: + gpio_free(hdata->hpd_gpio); +err_resource: + hdmi_resources_cleanup(hdata); +err_data: return ret; } @@ -2597,6 +2491,9 @@ static int __devexit hdmi_remove(struct platform_device *pdev) free_irq(hdata->internal_irq, hdata); free_irq(hdata->external_irq, hdata); + gpio_free(hdata->hpd_gpio); + + hdmi_resources_cleanup(hdata); /* hdmiphy i2c driver */ i2c_del_driver(&hdmiphy_driver); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_mixer.c b/trunk/drivers/gpu/drm/exynos/exynos_mixer.c index 40a6e1906fbb..e7fbb823fd8e 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_mixer.c @@ -36,7 +36,6 @@ #include "exynos_drm_drv.h" #include "exynos_drm_hdmi.h" -#include "exynos_drm_iommu.h" #define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) @@ -81,7 +80,6 @@ enum mixer_version_id { struct mixer_context { struct device *dev; - struct drm_device *drm_dev; int pipe; bool interlace; bool powered; @@ -92,7 +90,6 @@ struct mixer_context { struct mixer_resources mixer_res; struct hdmi_win_data win_data[MIXER_WIN_NR]; enum mixer_version_id mxr_ver; - void *parent_ctx; }; struct mixer_drv_data { @@ -668,24 +665,6 @@ static void mixer_win_reset(struct mixer_context *ctx) spin_unlock_irqrestore(&res->reg_slock, flags); } -static int mixer_iommu_on(void *ctx, bool enable) -{ - struct exynos_drm_hdmi_context *drm_hdmi_ctx; - struct mixer_context *mdata = ctx; - struct drm_device *drm_dev; - - drm_hdmi_ctx = mdata->parent_ctx; - drm_dev = drm_hdmi_ctx->drm_dev; - - if (is_drm_iommu_supported(drm_dev)) { - if (enable) - return drm_iommu_attach_device(drm_dev, mdata->dev); - - drm_iommu_detach_device(drm_dev, mdata->dev); - } - return 0; -} - static void mixer_poweron(struct mixer_context *ctx) { struct mixer_resources *res = &ctx->mixer_res; @@ -887,7 +866,6 @@ static void mixer_win_disable(void *ctx, int win) static struct exynos_mixer_ops mixer_ops = { /* manager */ - .iommu_on = mixer_iommu_on, .enable_vblank = mixer_enable_vblank, .disable_vblank = mixer_disable_vblank, .dpms = mixer_dpms, @@ -906,6 +884,7 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc) struct drm_pending_vblank_event *e, *t; struct timeval now; unsigned long flags; + bool is_checked = false; spin_lock_irqsave(&drm_dev->event_lock, flags); @@ -915,6 +894,7 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc) if (crtc != e->pipe) continue; + is_checked = true; do_gettimeofday(&now); e->event.sequence = 0; e->event.tv_sec = now.tv_sec; @@ -922,9 +902,16 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc) list_move_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); - drm_vblank_put(drm_dev, crtc); } + if (is_checked) + /* + * call drm_vblank_put only in case that drm_vblank_get was + * called. + */ + if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0) + drm_vblank_put(drm_dev, crtc); + spin_unlock_irqrestore(&drm_dev->event_lock, flags); } @@ -984,45 +971,57 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx, spin_lock_init(&mixer_res->reg_slock); - mixer_res->mixer = devm_clk_get(dev, "mixer"); + mixer_res->mixer = clk_get(dev, "mixer"); if (IS_ERR_OR_NULL(mixer_res->mixer)) { dev_err(dev, "failed to get clock 'mixer'\n"); - return -ENODEV; + ret = -ENODEV; + goto fail; } - mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); + mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) { dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); - return -ENODEV; + ret = -ENODEV; + goto fail; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(dev, "get memory resource failed.\n"); - return -ENXIO; + ret = -ENXIO; + goto fail; } mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (mixer_res->mixer_regs == NULL) { dev_err(dev, "register mapping failed.\n"); - return -ENXIO; + ret = -ENXIO; + goto fail; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(dev, "get interrupt resource failed.\n"); - return -ENXIO; + ret = -ENXIO; + goto fail; } ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler, 0, "drm_mixer", ctx); if (ret) { dev_err(dev, "request interrupt failed.\n"); - return ret; + goto fail; } mixer_res->irq = res->start; return 0; + +fail: + if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) + clk_put(mixer_res->sclk_hdmi); + if (!IS_ERR_OR_NULL(mixer_res->mixer)) + clk_put(mixer_res->mixer); + return ret; } static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, @@ -1032,21 +1031,25 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, struct device *dev = &pdev->dev; struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; struct resource *res; + int ret; - mixer_res->vp = devm_clk_get(dev, "vp"); + mixer_res->vp = clk_get(dev, "vp"); if (IS_ERR_OR_NULL(mixer_res->vp)) { dev_err(dev, "failed to get clock 'vp'\n"); - return -ENODEV; + ret = -ENODEV; + goto fail; } - mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer"); + mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer"); if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { dev_err(dev, "failed to get clock 'sclk_mixer'\n"); - return -ENODEV; + ret = -ENODEV; + goto fail; } - mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac"); + mixer_res->sclk_dac = clk_get(dev, "sclk_dac"); if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { dev_err(dev, "failed to get clock 'sclk_dac'\n"); - return -ENODEV; + ret = -ENODEV; + goto fail; } if (mixer_res->sclk_hdmi) @@ -1055,17 +1058,28 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res == NULL) { dev_err(dev, "get memory resource failed.\n"); - return -ENXIO; + ret = -ENXIO; + goto fail; } mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (mixer_res->vp_regs == NULL) { dev_err(dev, "register mapping failed.\n"); - return -ENXIO; + ret = -ENXIO; + goto fail; } return 0; + +fail: + if (!IS_ERR_OR_NULL(mixer_res->sclk_dac)) + clk_put(mixer_res->sclk_dac); + if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer)) + clk_put(mixer_res->sclk_mixer); + if (!IS_ERR_OR_NULL(mixer_res->vp)) + clk_put(mixer_res->vp); + return ret; } static struct mixer_drv_data exynos5_mxr_drv_data = { @@ -1135,7 +1149,6 @@ static int __devinit mixer_probe(struct platform_device *pdev) } ctx->dev = &pdev->dev; - ctx->parent_ctx = (void *)drm_hdmi_ctx; drm_hdmi_ctx->ctx = (void *)ctx; ctx->vp_enabled = drv->is_vp_enabled; ctx->mxr_ver = drv->version; diff --git a/trunk/drivers/gpu/drm/exynos/regs-hdmi.h b/trunk/drivers/gpu/drm/exynos/regs-hdmi.h index 970cdb518eb1..9cc7c5e9718c 100644 --- a/trunk/drivers/gpu/drm/exynos/regs-hdmi.h +++ b/trunk/drivers/gpu/drm/exynos/regs-hdmi.h @@ -298,14 +298,14 @@ #define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714) #define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718) #define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C) -#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1)) +#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n)) #define HDMI_AUI_CON HDMI_CORE_BASE(0x0800) #define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810) #define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814) #define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818) #define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C) -#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1)) +#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n)) #define HDMI_MPG_CON HDMI_CORE_BASE(0x0900) #define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C) @@ -338,19 +338,6 @@ #define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60) #define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64) -/* AVI bit definition */ -#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1) -#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1) - -#define AVI_ACTIVE_FORMAT_VALID (1 << 4) -#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1) - -/* AUI bit definition */ -#define HDMI_AUI_CON_NO_TRAN (0 << 0) - -/* VSI bit definition */ -#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0) - /* HDCP related registers */ #define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n)) #define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n)) diff --git a/trunk/drivers/gpu/drm/gma500/cdv_device.c b/trunk/drivers/gpu/drm/gma500/cdv_device.c index 23e14e93991f..1ceca3d13b65 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_device.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_device.c @@ -523,7 +523,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector) dev_priv->force_audio_property = prop; } - drm_object_attach_property(&connector->base, prop, 0); + drm_connector_attach_property(connector, prop, 0); } @@ -553,7 +553,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector) dev_priv->broadcast_rgb_property = prop; } - drm_object_attach_property(&connector->base, prop, 0); + drm_connector_attach_property(connector, prop, 0); } /* Cedarview */ diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c index 51044cc55cf2..e3a3978cf320 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -1650,7 +1650,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector, struct cdv_intel_dp *intel_dp = encoder->dev_priv; int ret; - ret = drm_object_property_set_value(&connector->base, property, val); + ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index e223b500022e..7272a461edfe 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -185,14 +185,14 @@ static int cdv_hdmi_set_property(struct drm_connector *connector, return -1; } - if (drm_object_property_get_value(&connector->base, + if (drm_connector_property_get_value(connector, property, &curValue)) return -1; if (curValue == value) return 0; - if (drm_object_property_set_value(&connector->base, + if (drm_connector_property_set_value(connector, property, value)) return -1; @@ -341,7 +341,7 @@ void cdv_hdmi_init(struct drm_device *dev, connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c index d81dbc3368f0..b362dd39bf5a 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -479,7 +479,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector, return -1; } - if (drm_object_property_get_value(&connector->base, + if (drm_connector_property_get_value(connector, property, &curValue)) return -1; @@ -487,7 +487,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector, if (curValue == value) return 0; - if (drm_object_property_set_value(&connector->base, + if (drm_connector_property_set_value(connector, property, value)) return -1; @@ -502,7 +502,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector, return -1; } } else if (!strcmp(property->name, "backlight") && encoder) { - if (drm_object_property_set_value(&connector->base, + if (drm_connector_property_set_value(connector, property, value)) return -1; @@ -671,10 +671,10 @@ void cdv_intel_lvds_init(struct drm_device *dev, connector->doublescan_allowed = false; /*Attach connector properties*/ - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL); diff --git a/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c index 2d4ab48f07a2..32dba2ab53e1 100644 --- a/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -265,13 +265,13 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector, goto set_prop_error; } - if (drm_object_property_get_value(&connector->base, property, &val)) + if (drm_connector_property_get_value(connector, property, &val)) goto set_prop_error; if (val == value) goto set_prop_done; - if (drm_object_property_set_value(&connector->base, + if (drm_connector_property_set_value(connector, property, value)) goto set_prop_error; @@ -296,7 +296,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector, } } } else if (!strcmp(property->name, "backlight") && encoder) { - if (drm_object_property_set_value(&connector->base, property, + if (drm_connector_property_set_value(connector, property, value)) goto set_prop_error; else @@ -506,7 +506,7 @@ void mdfld_dsi_output_init(struct drm_device *dev, dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe); - if (pipe != 0 && pipe != 2) { + if (!dev || ((pipe != 0) && (pipe != 2))) { DRM_ERROR("Invalid parameter\n"); return; } @@ -572,10 +572,10 @@ void mdfld_dsi_output_init(struct drm_device *dev, connector->doublescan_allowed = false; /*attach properties*/ - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev_priv->backlight_property, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL); diff --git a/trunk/drivers/gpu/drm/gma500/mdfld_intel_display.c b/trunk/drivers/gpu/drm/gma500/mdfld_intel_display.c index 74485dc43945..dec6a9aea3c6 100644 --- a/trunk/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ b/trunk/drivers/gpu/drm/gma500/mdfld_intel_display.c @@ -820,7 +820,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc, REG_WRITE(map->pos, 0); if (psb_intel_encoder) - drm_object_property_get_value(&connector->base, + drm_connector_property_get_value(connector, dev->mode_config.scaling_mode_property, &scalingType); if (scalingType == DRM_MODE_SCALE_NO_SCALE) { diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail.h b/trunk/drivers/gpu/drm/gma500/oaktrail.h index 30adbbe23024..f2f9f38a5362 100644 --- a/trunk/drivers/gpu/drm/gma500/oaktrail.h +++ b/trunk/drivers/gpu/drm/gma500/oaktrail.h @@ -249,9 +249,3 @@ extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev); extern void oaktrail_hdmi_save(struct drm_device *dev); extern void oaktrail_hdmi_restore(struct drm_device *dev); extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev); -extern int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, int x, int y, - struct drm_framebuffer *old_fb); -extern void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode); - - diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_crtc.c b/trunk/drivers/gpu/drm/gma500/oaktrail_crtc.c index 3071526bc3c1..cdafd2acc72f 100644 --- a/trunk/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/trunk/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -168,11 +168,6 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode) const struct psb_offset *map = &dev_priv->regmap[pipe]; u32 temp; - if (pipe == 1) { - oaktrail_crtc_hdmi_dpms(crtc, mode); - return; - } - if (!gma_power_begin(dev, true)) return; @@ -307,9 +302,6 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc, uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; struct drm_connector *connector; - if (pipe == 1) - return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb); - if (!gma_power_begin(dev, true)) return 0; @@ -351,7 +343,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc, (mode->crtc_vdisplay - 1)); if (psb_intel_encoder) - drm_object_property_get_value(&connector->base, + drm_connector_property_get_value(connector, dev->mode_config.scaling_mode_property, &scalingType); if (scalingType == DRM_MODE_SCALE_NO_SCALE) { diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_device.c b/trunk/drivers/gpu/drm/gma500/oaktrail_device.c index 08747fd7105c..010b820744a5 100644 --- a/trunk/drivers/gpu/drm/gma500/oaktrail_device.c +++ b/trunk/drivers/gpu/drm/gma500/oaktrail_device.c @@ -544,7 +544,7 @@ const struct psb_ops oaktrail_chip_ops = { .accel_2d = 1, .pipes = 2, .crtcs = 2, - .hdmi_mask = (1 << 1), + .hdmi_mask = (1 << 0), .lvds_mask = (1 << 0), .cursor_needs_phys = 0, .sgx_offset = MRST_SGX_OFFSET, diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c index f036f1fc161e..69e51e903f35 100644 --- a/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -155,345 +155,6 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev) HDMI_READ(HDMI_HCR); } -static void wait_for_vblank(struct drm_device *dev) -{ - /* Wait for 20ms, i.e. one cycle at 50hz. */ - mdelay(20); -} - -static unsigned int htotal_calculate(struct drm_display_mode *mode) -{ - u32 htotal, new_crtc_htotal; - - htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16); - - /* - * 1024 x 768 new_crtc_htotal = 0x1024; - * 1280 x 1024 new_crtc_htotal = 0x0c34; - */ - new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock; - - DRM_DEBUG_KMS("new crtc htotal 0x%4x\n", new_crtc_htotal); - return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16); -} - -static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target, - int refclk, struct oaktrail_hdmi_clock *best_clock) -{ - int np_min, np_max, nr_min, nr_max; - int np, nr, nf; - - np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10); - np_max = oaktrail_hdmi_limit.vco.max / (target * 10); - if (np_min < oaktrail_hdmi_limit.np.min) - np_min = oaktrail_hdmi_limit.np.min; - if (np_max > oaktrail_hdmi_limit.np.max) - np_max = oaktrail_hdmi_limit.np.max; - - nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max)); - nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min)); - if (nr_min < oaktrail_hdmi_limit.nr.min) - nr_min = oaktrail_hdmi_limit.nr.min; - if (nr_max > oaktrail_hdmi_limit.nr.max) - nr_max = oaktrail_hdmi_limit.nr.max; - - np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max)); - nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np)); - nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk); - DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf); - - /* - * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000; - * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000; - */ - best_clock->np = np; - best_clock->nr = nr - 1; - best_clock->nf = (nf << 14); -} - -static void scu_busy_loop(void __iomem *scu_base) -{ - u32 status = 0; - u32 loop_count = 0; - - status = readl(scu_base + 0x04); - while (status & 1) { - udelay(1); /* scu processing time is in few u secods */ - status = readl(scu_base + 0x04); - loop_count++; - /* break if scu doesn't reset busy bit after huge retry */ - if (loop_count > 1000) { - DRM_DEBUG_KMS("SCU IPC timed out"); - return; - } - } -} - -/* - * You don't want to know, you really really don't want to know.... - * - * This is magic. However it's safe magic because of the way the platform - * works and it is necessary magic. - */ -static void oaktrail_hdmi_reset(struct drm_device *dev) -{ - void __iomem *base; - unsigned long scu_ipc_mmio = 0xff11c000UL; - int scu_len = 1024; - - base = ioremap((resource_size_t)scu_ipc_mmio, scu_len); - if (base == NULL) { - DRM_ERROR("failed to map scu mmio\n"); - return; - } - - /* scu ipc: assert hdmi controller reset */ - writel(0xff11d118, base + 0x0c); - writel(0x7fffffdf, base + 0x80); - writel(0x42005, base + 0x0); - scu_busy_loop(base); - - /* scu ipc: de-assert hdmi controller reset */ - writel(0xff11d118, base + 0x0c); - writel(0x7fffffff, base + 0x80); - writel(0x42005, base + 0x0); - scu_busy_loop(base); - - iounmap(base); -} - -int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *old_fb) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; - int pipe = 1; - int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; - int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; - int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; - int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; - int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; - int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; - int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; - int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; - int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; - int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; - int refclk; - struct oaktrail_hdmi_clock clock; - u32 dspcntr, pipeconf, dpll, temp; - int dspcntr_reg = DSPBCNTR; - - if (!gma_power_begin(dev, true)) - return 0; - - /* Disable the VGA plane that we never use */ - REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); - - /* Disable dpll if necessary */ - dpll = REG_READ(DPLL_CTRL); - if ((dpll & DPLL_PWRDN) == 0) { - REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET)); - REG_WRITE(DPLL_DIV_CTRL, 0x00000000); - REG_WRITE(DPLL_STATUS, 0x1); - } - udelay(150); - - /* Reset controller */ - oaktrail_hdmi_reset(dev); - - /* program and enable dpll */ - refclk = 25000; - oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock); - - /* Set the DPLL */ - dpll = REG_READ(DPLL_CTRL); - dpll &= ~DPLL_PDIV_MASK; - dpll &= ~(DPLL_PWRDN | DPLL_RESET); - REG_WRITE(DPLL_CTRL, 0x00000008); - REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr)); - REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1)); - REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN)); - REG_WRITE(DPLL_UPDATE, 0x80000000); - REG_WRITE(DPLL_CLK_ENABLE, 0x80050102); - udelay(150); - - /* configure HDMI */ - HDMI_WRITE(0x1004, 0x1fd); - HDMI_WRITE(0x2000, 0x1); - HDMI_WRITE(0x2008, 0x0); - HDMI_WRITE(0x3130, 0x8); - HDMI_WRITE(0x101c, 0x1800810); - - temp = htotal_calculate(adjusted_mode); - REG_WRITE(htot_reg, temp); - REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); - REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); - REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); - REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); - REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); - REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1)); - - REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); - REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); - REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); - REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); - REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); - REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); - REG_WRITE(PCH_PIPEBSRC, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1)); - - temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; - HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp); - - REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); - REG_WRITE(dsppos_reg, 0); - - /* Flush the plane changes */ - { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - crtc_funcs->mode_set_base(crtc, x, y, old_fb); - } - - /* Set up the display plane register */ - dspcntr = REG_READ(dspcntr_reg); - dspcntr |= DISPPLANE_GAMMA_ENABLE; - dspcntr |= DISPPLANE_SEL_PIPE_B; - dspcntr |= DISPLAY_PLANE_ENABLE; - - /* setup pipeconf */ - pipeconf = REG_READ(pipeconf_reg); - pipeconf |= PIPEACONF_ENABLE; - - REG_WRITE(pipeconf_reg, pipeconf); - REG_READ(pipeconf_reg); - - REG_WRITE(PCH_PIPEBCONF, pipeconf); - REG_READ(PCH_PIPEBCONF); - wait_for_vblank(dev); - - REG_WRITE(dspcntr_reg, dspcntr); - wait_for_vblank(dev); - - gma_power_end(dev); - - return 0; -} - -void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode) -{ - struct drm_device *dev = crtc->dev; - u32 temp; - - DRM_DEBUG_KMS("%s %d\n", __func__, mode); - - switch (mode) { - case DRM_MODE_DPMS_OFF: - REG_WRITE(VGACNTRL, 0x80000000); - - /* Disable plane */ - temp = REG_READ(DSPBCNTR); - if ((temp & DISPLAY_PLANE_ENABLE) != 0) { - REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE); - REG_READ(DSPBCNTR); - /* Flush the plane changes */ - REG_WRITE(DSPBSURF, REG_READ(DSPBSURF)); - REG_READ(DSPBSURF); - } - - /* Disable pipe B */ - temp = REG_READ(PIPEBCONF); - if ((temp & PIPEACONF_ENABLE) != 0) { - REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE); - REG_READ(PIPEBCONF); - } - - /* Disable LNW Pipes, etc */ - temp = REG_READ(PCH_PIPEBCONF); - if ((temp & PIPEACONF_ENABLE) != 0) { - REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE); - REG_READ(PCH_PIPEBCONF); - } - - /* wait for pipe off */ - udelay(150); - - /* Disable dpll */ - temp = REG_READ(DPLL_CTRL); - if ((temp & DPLL_PWRDN) == 0) { - REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET)); - REG_WRITE(DPLL_STATUS, 0x1); - } - - /* wait for dpll off */ - udelay(150); - - break; - case DRM_MODE_DPMS_ON: - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - /* Enable dpll */ - temp = REG_READ(DPLL_CTRL); - if ((temp & DPLL_PWRDN) != 0) { - REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET)); - temp = REG_READ(DPLL_CLK_ENABLE); - REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI); - REG_READ(DPLL_CLK_ENABLE); - } - /* wait for dpll warm up */ - udelay(150); - - /* Enable pipe B */ - temp = REG_READ(PIPEBCONF); - if ((temp & PIPEACONF_ENABLE) == 0) { - REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE); - REG_READ(PIPEBCONF); - } - - /* Enable LNW Pipe B */ - temp = REG_READ(PCH_PIPEBCONF); - if ((temp & PIPEACONF_ENABLE) == 0) { - REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE); - REG_READ(PCH_PIPEBCONF); - } - - wait_for_vblank(dev); - - /* Enable plane */ - temp = REG_READ(DSPBCNTR); - if ((temp & DISPLAY_PLANE_ENABLE) == 0) { - REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - REG_WRITE(DSPBSURF, REG_READ(DSPBSURF)); - REG_READ(DSPBSURF); - } - - psb_intel_crtc_load_lut(crtc); - } - - /* DSPARB */ - REG_WRITE(DSPARB, 0x00003fbf); - - /* FW1 */ - REG_WRITE(0x70034, 0x3f880a0a); - - /* FW2 */ - REG_WRITE(0x70038, 0x0b060808); - - /* FW4 */ - REG_WRITE(0x70050, 0x08030404); - - /* FW5 */ - REG_WRITE(0x70054, 0x04040404); - - /* LNC Chicken Bits - Squawk! */ - REG_WRITE(0x70400, 0x4000); - - return; -} - static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode) { static int dpms_mode = -1; @@ -572,15 +233,13 @@ static const unsigned char raw_edid[] = { static int oaktrail_hdmi_get_modes(struct drm_connector *connector) { + struct drm_device *dev = connector->dev; + struct drm_psb_private *dev_priv = dev->dev_private; struct i2c_adapter *i2c_adap; struct edid *edid; - int ret = 0; + struct drm_display_mode *mode, *t; + int i = 0, ret = 0; - /* - * FIXME: We need to figure this lot out. In theory we can - * read the EDID somehow but I've yet to find working reference - * code. - */ i2c_adap = i2c_get_adapter(3); if (i2c_adap == NULL) { DRM_ERROR("No ddc adapter available!\n"); @@ -594,7 +253,17 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector) drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); } - return ret; + + /* + * prune modes that require frame buffer bigger than stolen mem + */ + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { + if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) { + i++; + drm_mode_remove(connector, mode); + } + } + return ret - i; } static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder, @@ -680,7 +349,6 @@ void oaktrail_hdmi_init(struct drm_device *dev, connector->interlace_allowed = false; connector->doublescan_allowed = false; drm_sysfs_connector_add(connector); - dev_info(dev->dev, "HDMI initialised.\n"); return; @@ -735,9 +403,6 @@ void oaktrail_hdmi_setup(struct drm_device *dev) dev_priv->hdmi_priv = hdmi_dev; oaktrail_hdmi_audio_disable(dev); - - dev_info(dev->dev, "HDMI hardware present.\n"); - return; free: diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_lvds.c b/trunk/drivers/gpu/drm/gma500/oaktrail_lvds.c index 325013a9c48c..558c77fb55ec 100644 --- a/trunk/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/trunk/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -133,8 +133,8 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder, return; } - drm_object_property_get_value( - &connector->base, + drm_connector_property_get_value( + connector, dev->mode_config.scaling_mode_property, &v); @@ -363,10 +363,10 @@ void oaktrail_lvds_init(struct drm_device *dev, connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL); diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c b/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c index 9fa5fa2e6192..2a4c3a9e33e3 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -603,7 +603,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector, goto set_prop_error; } - if (drm_object_property_get_value(&connector->base, + if (drm_connector_property_get_value(connector, property, &curval)) goto set_prop_error; @@ -611,7 +611,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector, if (curval == value) goto set_prop_done; - if (drm_object_property_set_value(&connector->base, + if (drm_connector_property_set_value(connector, property, value)) goto set_prop_error; @@ -626,7 +626,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector, goto set_prop_error; } } else if (!strcmp(property->name, "backlight")) { - if (drm_object_property_set_value(&connector->base, + if (drm_connector_property_set_value(connector, property, value)) goto set_prop_error; @@ -746,10 +746,10 @@ void psb_intel_lvds_init(struct drm_device *dev, connector->doublescan_allowed = false; /*Attach connector properties*/ - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL); diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c index a4cc777ab7a6..fc9292705dbf 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -1694,7 +1694,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector, uint8_t cmd; int ret; - ret = drm_object_property_set_value(&connector->base, property, val); + ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; @@ -1749,7 +1749,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector, } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) { temp_value = val; if (psb_intel_sdvo_connector->left == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, psb_intel_sdvo_connector->right, val); if (psb_intel_sdvo_connector->left_margin == temp_value) return 0; @@ -1761,7 +1761,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector, cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (psb_intel_sdvo_connector->right == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, psb_intel_sdvo_connector->left, val); if (psb_intel_sdvo_connector->right_margin == temp_value) return 0; @@ -1773,7 +1773,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector, cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (psb_intel_sdvo_connector->top == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, psb_intel_sdvo_connector->bottom, val); if (psb_intel_sdvo_connector->top_margin == temp_value) return 0; @@ -1785,7 +1785,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector, cmd = SDVO_CMD_SET_OVERSCAN_V; goto set_value; } else if (psb_intel_sdvo_connector->bottom == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, psb_intel_sdvo_connector->top, val); if (psb_intel_sdvo_connector->bottom_margin == temp_value) return 0; @@ -2286,7 +2286,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]); psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0]; - drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base, + drm_connector_attach_property(&psb_intel_sdvo_connector->base.base, psb_intel_sdvo_connector->tv_format, 0); return true; @@ -2302,7 +2302,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s psb_intel_sdvo_connector->name = \ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ if (!psb_intel_sdvo_connector->name) return false; \ - drm_object_attach_property(&connector->base, \ + drm_connector_attach_property(connector, \ psb_intel_sdvo_connector->name, \ psb_intel_sdvo_connector->cur_##name); \ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ @@ -2339,7 +2339,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo, if (!psb_intel_sdvo_connector->left) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, psb_intel_sdvo_connector->left, psb_intel_sdvo_connector->left_margin); @@ -2348,7 +2348,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo, if (!psb_intel_sdvo_connector->right) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, psb_intel_sdvo_connector->right, psb_intel_sdvo_connector->right_margin); DRM_DEBUG_KMS("h_overscan: max %d, " @@ -2375,7 +2375,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo, if (!psb_intel_sdvo_connector->top) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, psb_intel_sdvo_connector->top, psb_intel_sdvo_connector->top_margin); @@ -2384,7 +2384,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo, if (!psb_intel_sdvo_connector->bottom) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, psb_intel_sdvo_connector->bottom, psb_intel_sdvo_connector->bottom_margin); DRM_DEBUG_KMS("v_overscan: max %d, " @@ -2416,7 +2416,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo, if (!psb_intel_sdvo_connector->dot_crawl) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, psb_intel_sdvo_connector->dot_crawl, psb_intel_sdvo_connector->cur_dot_crawl); DRM_DEBUG_KMS("dot crawl: current %d\n", response); diff --git a/trunk/drivers/gpu/drm/i2c/ch7006_drv.c b/trunk/drivers/gpu/drm/i2c/ch7006_drv.c index b865d0728e28..599099fe76e3 100644 --- a/trunk/drivers/gpu/drm/i2c/ch7006_drv.c +++ b/trunk/drivers/gpu/drm/i2c/ch7006_drv.c @@ -214,7 +214,7 @@ static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encod else priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, encoder->dev->mode_config.tv_subconnector_property, priv->subconnector); @@ -254,23 +254,23 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder, priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2); - drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property, + drm_connector_attach_property(connector, conf->tv_select_subconnector_property, priv->select_subconnector); - drm_object_attach_property(&connector->base, conf->tv_subconnector_property, + drm_connector_attach_property(connector, conf->tv_subconnector_property, priv->subconnector); - drm_object_attach_property(&connector->base, conf->tv_left_margin_property, + drm_connector_attach_property(connector, conf->tv_left_margin_property, priv->hmargin); - drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property, + drm_connector_attach_property(connector, conf->tv_bottom_margin_property, priv->vmargin); - drm_object_attach_property(&connector->base, conf->tv_mode_property, + drm_connector_attach_property(connector, conf->tv_mode_property, priv->norm); - drm_object_attach_property(&connector->base, conf->tv_brightness_property, + drm_connector_attach_property(connector, conf->tv_brightness_property, priv->brightness); - drm_object_attach_property(&connector->base, conf->tv_contrast_property, + drm_connector_attach_property(connector, conf->tv_contrast_property, priv->contrast); - drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property, + drm_connector_attach_property(connector, conf->tv_flicker_reduction_property, priv->flicker); - drm_object_attach_property(&connector->base, priv->scale_property, + drm_connector_attach_property(connector, priv->scale_property, priv->scale); return 0; diff --git a/trunk/drivers/gpu/drm/i915/i915_debugfs.c b/trunk/drivers/gpu/drm/i915/i915_debugfs.c index 4568e7d8a060..dde8b505bf7f 100644 --- a/trunk/drivers/gpu/drm/i915/i915_debugfs.c +++ b/trunk/drivers/gpu/drm/i915/i915_debugfs.c @@ -1068,7 +1068,7 @@ static int gen6_drpc_info(struct seq_file *m) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; + u32 rpmodectl1, gt_core_status, rcctl1; unsigned forcewake_count; int count=0, ret; @@ -1097,9 +1097,6 @@ static int gen6_drpc_info(struct seq_file *m) rpmodectl1 = I915_READ(GEN6_RP_CONTROL); rcctl1 = I915_READ(GEN6_RC_CONTROL); mutex_unlock(&dev->struct_mutex); - mutex_lock(&dev_priv->rps.hw_lock); - sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); - mutex_unlock(&dev_priv->rps.hw_lock); seq_printf(m, "Video Turbo Mode: %s\n", yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); @@ -1151,12 +1148,6 @@ static int gen6_drpc_info(struct seq_file *m) seq_printf(m, "RC6++ residency since boot: %u\n", I915_READ(GEN6_GT_GFX_RC6pp)); - seq_printf(m, "RC6 voltage: %dmV\n", - GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); - seq_printf(m, "RC6+ voltage: %dmV\n", - GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); - seq_printf(m, "RC6++ voltage: %dmV\n", - GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); return 0; } @@ -1282,7 +1273,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) return 0; } - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; @@ -1291,14 +1282,19 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) for (gpu_freq = dev_priv->rps.min_delay; gpu_freq <= dev_priv->rps.max_delay; gpu_freq++) { - ia_freq = gpu_freq; - sandybridge_pcode_read(dev_priv, - GEN6_PCODE_READ_MIN_FREQ_TABLE, - &ia_freq); + I915_WRITE(GEN6_PCODE_DATA, gpu_freq); + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | + GEN6_PCODE_READ_MIN_FREQ_TABLE); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & + GEN6_PCODE_READY) == 0, 10)) { + DRM_ERROR("pcode read of freq table timed out\n"); + continue; + } + ia_freq = I915_READ(GEN6_PCODE_DATA); seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -1402,15 +1398,15 @@ static int i915_context_status(struct seq_file *m, void *unused) if (ret) return ret; - if (dev_priv->ips.pwrctx) { + if (dev_priv->pwrctx) { seq_printf(m, "power context "); - describe_obj(m, dev_priv->ips.pwrctx); + describe_obj(m, dev_priv->pwrctx); seq_printf(m, "\n"); } - if (dev_priv->ips.renderctx) { + if (dev_priv->renderctx) { seq_printf(m, "render context "); - describe_obj(m, dev_priv->ips.renderctx); + describe_obj(m, dev_priv->renderctx); seq_printf(m, "\n"); } @@ -1715,13 +1711,13 @@ i915_max_freq_read(struct file *filp, if (!(IS_GEN6(dev) || IS_GEN7(dev))) return -ENODEV; - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; len = snprintf(buf, sizeof(buf), "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); if (len > sizeof(buf)) len = sizeof(buf); @@ -1756,7 +1752,7 @@ i915_max_freq_write(struct file *filp, DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; @@ -1766,7 +1762,7 @@ i915_max_freq_write(struct file *filp, dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return cnt; } @@ -1791,13 +1787,13 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, if (!(IS_GEN6(dev) || IS_GEN7(dev))) return -ENODEV; - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; len = snprintf(buf, sizeof(buf), "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); if (len > sizeof(buf)) len = sizeof(buf); @@ -1830,7 +1826,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; @@ -1840,7 +1836,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER; gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return cnt; } diff --git a/trunk/drivers/gpu/drm/i915/i915_dma.c b/trunk/drivers/gpu/drm/i915/i915_dma.c index a48e4910ea2c..61ae104dca8c 100644 --- a/trunk/drivers/gpu/drm/i915/i915_dma.c +++ b/trunk/drivers/gpu/drm/i915/i915_dma.c @@ -103,6 +103,32 @@ static void i915_write_hws_pga(struct drm_device *dev) I915_WRITE(HWS_PGA, addr); } +/** + * Sets up the hardware status page for devices that need a physical address + * in the register. + */ +static int i915_init_phys_hws(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + /* Program Hardware Status Page */ + dev_priv->status_page_dmah = + drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); + + if (!dev_priv->status_page_dmah) { + DRM_ERROR("Can not allocate hardware status page\n"); + return -ENOMEM; + } + + memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr, + 0, PAGE_SIZE); + + i915_write_hws_pga(dev); + + DRM_DEBUG_DRIVER("Enabled hardware status page\n"); + return 0; +} + /** * Frees the hardware status page, whether it's a physical address or a virtual * address set up by the X Server. @@ -425,16 +451,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - dev_priv->dri1.counter++; - if (dev_priv->dri1.counter > 0x7FFFFFFFUL) - dev_priv->dri1.counter = 0; + dev_priv->counter++; + if (dev_priv->counter > 0x7FFFFFFFUL) + dev_priv->counter = 0; if (master_priv->sarea_priv) - master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; + master_priv->sarea_priv->last_enqueue = dev_priv->counter; if (BEGIN_LP_RING(4) == 0) { OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->dri1.counter); + OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); } @@ -576,12 +602,12 @@ static int i915_dispatch_flip(struct drm_device * dev) ADVANCE_LP_RING(); - master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; + master_priv->sarea_priv->last_enqueue = dev_priv->counter++; if (BEGIN_LP_RING(4) == 0) { OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->dri1.counter); + OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); } @@ -749,21 +775,21 @@ static int i915_emit_irq(struct drm_device * dev) DRM_DEBUG_DRIVER("\n"); - dev_priv->dri1.counter++; - if (dev_priv->dri1.counter > 0x7FFFFFFFUL) - dev_priv->dri1.counter = 1; + dev_priv->counter++; + if (dev_priv->counter > 0x7FFFFFFFUL) + dev_priv->counter = 1; if (master_priv->sarea_priv) - master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; + master_priv->sarea_priv->last_enqueue = dev_priv->counter; if (BEGIN_LP_RING(4) == 0) { OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->dri1.counter); + OUT_RING(dev_priv->counter); OUT_RING(MI_USER_INTERRUPT); ADVANCE_LP_RING(); } - return dev_priv->dri1.counter; + return dev_priv->counter; } static int i915_wait_irq(struct drm_device * dev, int irq_nr) @@ -794,7 +820,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) if (ret == -EBUSY) { DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", - READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); + READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); } return ret; @@ -988,9 +1014,6 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_PRIME_VMAP_FLUSH: value = 1; break; - case I915_PARAM_HAS_SECURE_BATCHES: - value = capable(CAP_SYS_ADMIN); - break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); @@ -1303,8 +1326,6 @@ static int i915_load_modeset_init(struct drm_device *dev) intel_modeset_gem_init(dev); - INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); - ret = drm_irq_install(dev); if (ret) goto cleanup_gem; @@ -1470,9 +1491,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto free_priv; } - ret = i915_gem_gtt_init(dev); - if (ret) + ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); + if (!ret) { + DRM_ERROR("failed to set up gmch\n"); + ret = -EIO; goto put_bridge; + } + + dev_priv->mm.gtt = intel_gtt_get(); + if (!dev_priv->mm.gtt) { + DRM_ERROR("Failed to initialize GTT\n"); + ret = -ENODEV; + goto put_gmch; + } if (drm_core_check_feature(dev, DRIVER_MODESET)) i915_kick_out_firmware_fb(dev_priv); @@ -1559,10 +1590,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_setup_gmbus(dev); intel_opregion_setup(dev); + /* Make sure the bios did its job and set up vital registers */ intel_setup_bios(dev); i915_gem_load(dev); + /* Init HWS */ + if (!I915_NEED_GFX_HWS(dev)) { + ret = i915_init_phys_hws(dev); + if (ret) + goto out_gem_unload; + } + /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there * according to the published specs. It doesn't appear to function @@ -1582,8 +1621,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->rps.lock); spin_lock_init(&dev_priv->dpio_lock); - mutex_init(&dev_priv->rps.hw_lock); - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) dev_priv->num_pipe = 3; else if (IS_MOBILE(dev) || !IS_GEN2(dev)) @@ -1641,7 +1678,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) out_rmmap: pci_iounmap(dev->pdev, dev_priv->regs); put_gmch: - i915_gem_gtt_fini(dev); + intel_gmch_remove(); put_bridge: pci_dev_put(dev_priv->bridge_dev); free_priv: @@ -1684,7 +1721,6 @@ int i915_driver_unload(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_fbdev_fini(dev); intel_modeset_cleanup(dev); - cancel_work_sync(&dev_priv->console_resume_work); /* * free the memory space allocated for the child device diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c index 6745c7f976db..6770ee6084b4 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.c +++ b/trunk/drivers/gpu/drm/i915/i915_drv.c @@ -47,11 +47,11 @@ MODULE_PARM_DESC(modeset, unsigned int i915_fbpercrtc __always_unused = 0; module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); -int i915_panel_ignore_lid __read_mostly = 1; +int i915_panel_ignore_lid __read_mostly = 0; module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); MODULE_PARM_DESC(panel_ignore_lid, - "Override lid status (0=autodetect, 1=autodetect disabled [default], " - "-1=force lid closed, -2=force lid open)"); + "Override lid status (0=autodetect [default], 1=lid open, " + "-1=lid closed)"); unsigned int i915_powersave __read_mostly = 1; module_param_named(powersave, i915_powersave, int, 0600); @@ -396,6 +396,12 @@ static const struct pci_device_id pciidlist[] = { /* aka */ MODULE_DEVICE_TABLE(pci, pciidlist); #endif +#define INTEL_PCH_DEVICE_ID_MASK 0xff00 +#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 +#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 +#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 +#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 + void intel_detect_pch(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -410,36 +416,26 @@ void intel_detect_pch(struct drm_device *dev) pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); if (pch) { if (pch->vendor == PCI_VENDOR_ID_INTEL) { - unsigned short id; + int id; id = pch->device & INTEL_PCH_DEVICE_ID_MASK; - dev_priv->pch_id = id; if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_IBX; dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); - WARN_ON(!IS_GEN5(dev)); } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_CPT; dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found CougarPoint PCH\n"); - WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { /* PantherPoint is CPT compatible */ dev_priv->pch_type = PCH_CPT; dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found PatherPoint PCH\n"); - WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_LPT; dev_priv->num_pch_pll = 0; DRM_DEBUG_KMS("Found LynxPoint PCH\n"); - WARN_ON(!IS_HASWELL(dev)); - } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { - dev_priv->pch_type = PCH_LPT; - dev_priv->num_pch_pll = 0; - DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); - WARN_ON(!IS_HASWELL(dev)); } BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); } @@ -481,8 +477,6 @@ static int i915_drm_freeze(struct drm_device *dev) return error; } - cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); - intel_modeset_disable(dev); drm_irq_uninstall(dev); @@ -532,23 +526,17 @@ int i915_suspend(struct drm_device *dev, pm_message_t state) return 0; } -void intel_console_resume(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, - console_resume_work); - struct drm_device *dev = dev_priv->dev; - - console_lock(); - intel_fbdev_set_suspend(dev, 0); - console_unlock(); -} - -static int __i915_drm_thaw(struct drm_device *dev) +static int i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + mutex_lock(&dev->struct_mutex); + i915_gem_restore_gtt_mappings(dev); + mutex_unlock(&dev->struct_mutex); + } + i915_restore_state(dev); intel_opregion_setup(dev); @@ -565,6 +553,7 @@ static int __i915_drm_thaw(struct drm_device *dev) intel_modeset_init_hw(dev); intel_modeset_setup_hw_state(dev); + drm_mode_config_reset(dev); drm_irq_install(dev); } @@ -572,41 +561,14 @@ static int __i915_drm_thaw(struct drm_device *dev) dev_priv->modeset_on_lid = 0; - /* - * The console lock can be pretty contented on resume due - * to all the printk activity. Try to keep it out of the hot - * path of resume if possible. - */ - if (console_trylock()) { - intel_fbdev_set_suspend(dev, 0); - console_unlock(); - } else { - schedule_work(&dev_priv->console_resume_work); - } - - return error; -} - -static int i915_drm_thaw(struct drm_device *dev) -{ - int error = 0; - - intel_gt_reset(dev); - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - mutex_lock(&dev->struct_mutex); - i915_gem_restore_gtt_mappings(dev); - mutex_unlock(&dev->struct_mutex); - } - - __i915_drm_thaw(dev); - + console_lock(); + intel_fbdev_set_suspend(dev, 0); + console_unlock(); return error; } int i915_resume(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) @@ -617,20 +579,7 @@ int i915_resume(struct drm_device *dev) pci_set_master(dev->pdev); - intel_gt_reset(dev); - - /* - * Platforms with opregion should have sane BIOS, older ones (gen3 and - * earlier) need this since the BIOS might clear all our scratch PTEs. - */ - if (drm_core_check_feature(dev, DRIVER_MODESET) && - !dev_priv->opregion.header) { - mutex_lock(&dev->struct_mutex); - i915_gem_restore_gtt_mappings(dev); - mutex_unlock(&dev->struct_mutex); - } - - ret = __i915_drm_thaw(dev); + ret = i915_drm_thaw(dev); if (ret) return ret; @@ -884,7 +833,7 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct intel_device_info *intel_info = (struct intel_device_info *) ent->driver_data; - if (intel_info->is_valleyview) + if (intel_info->is_haswell || intel_info->is_valleyview) if(!i915_preliminary_hw_support) { DRM_ERROR("Preliminary hardware support disabled\n"); return -ENODEV; @@ -1191,40 +1140,12 @@ static bool IS_DISPLAYREG(u32 reg) if (reg == GEN6_GDRST) return false; - switch (reg) { - case _3D_CHICKEN3: - case IVB_CHICKEN3: - case GEN7_COMMON_SLICE_CHICKEN1: - case GEN7_L3CNTLREG1: - case GEN7_L3_CHICKEN_MODE_REGISTER: - case GEN7_ROW_CHICKEN2: - case GEN7_L3SQCREG4: - case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG: - case GEN7_HALF_SLICE_CHICKEN1: - case GEN6_MBCTL: - case GEN6_UCGCTL2: - return false; - default: - break; - } - return true; } -static void -ilk_dummy_write(struct drm_i915_private *dev_priv) -{ - /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the - * chip from rc6 before touching it for real. MI_MODE is masked, hence - * harmless to write 0 into. */ - I915_WRITE_NOTRACE(MI_MODE, 0); -} - #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x val = 0; \ - if (IS_GEN5(dev_priv->dev)) \ - ilk_dummy_write(dev_priv); \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ unsigned long irqflags; \ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ @@ -1256,12 +1177,6 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ - if (IS_GEN5(dev_priv->dev)) \ - ilk_dummy_write(dev_priv); \ - if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ - DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \ - I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \ - } \ if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ write##y(val, dev_priv->regs + reg + 0x180000); \ } else { \ diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.h b/trunk/drivers/gpu/drm/i915/i915_drv.h index 87c06f97fa89..f511fa2f4168 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.h +++ b/trunk/drivers/gpu/drm/i915/i915_drv.h @@ -58,14 +58,6 @@ enum pipe { }; #define pipe_name(p) ((p) + 'A') -enum transcoder { - TRANSCODER_A = 0, - TRANSCODER_B, - TRANSCODER_C, - TRANSCODER_EDP = 0xF, -}; -#define transcoder_name(t) ((t) + 'A') - enum plane { PLANE_A = 0, PLANE_B, @@ -101,12 +93,6 @@ struct intel_pch_pll { }; #define I915_NUM_PLLS 2 -struct intel_ddi_plls { - int spll_refcount; - int wrpll1_refcount; - int wrpll2_refcount; -}; - /* Interface history: * * 1.1: Original. @@ -137,6 +123,14 @@ struct drm_i915_gem_phys_object { struct drm_i915_gem_object *cur_obj; }; +struct mem_block { + struct mem_block *next; + struct mem_block *prev; + int start; + int size; + struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ +}; + struct opregion_header; struct opregion_acpi; struct opregion_swsci; @@ -257,7 +251,6 @@ struct drm_i915_display_funcs { uint32_t sprite_width, int pixel_size); void (*update_linetime_wm)(struct drm_device *dev, int pipe, struct drm_display_mode *mode); - void (*modeset_global_resources)(struct drm_device *dev); int (*crtc_mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -270,6 +263,7 @@ struct drm_i915_display_funcs { struct drm_crtc *crtc); void (*fdi_link_train)(struct drm_crtc *crtc); void (*init_clock_gating)(struct drm_device *dev); + void (*init_pch_clock_gating)(struct drm_device *dev); int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj); @@ -344,7 +338,6 @@ struct intel_device_info { #define I915_PPGTT_PD_ENTRIES 512 #define I915_PPGTT_PT_ENTRIES 1024 struct i915_hw_ppgtt { - struct drm_device *dev; unsigned num_pd_entries; struct page **pt_pages; uint32_t pd_offset; @@ -390,18 +383,154 @@ struct intel_fbc_work; struct intel_gmbus { struct i2c_adapter adapter; - u32 force_bit; + bool force_bit; u32 reg0; u32 gpio_reg; struct i2c_algo_bit_data bit_algo; struct drm_i915_private *dev_priv; }; -struct i915_suspend_saved_registers { +typedef struct drm_i915_private { + struct drm_device *dev; + + const struct intel_device_info *info; + + int relative_constants_mode; + + void __iomem *regs; + + struct drm_i915_gt_funcs gt; + /** gt_fifo_count and the subsequent register write are synchronized + * with dev->struct_mutex. */ + unsigned gt_fifo_count; + /** forcewake_count is protected by gt_lock */ + unsigned forcewake_count; + /** gt_lock is also taken in irq contexts. */ + struct spinlock gt_lock; + + struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; + + /** gmbus_mutex protects against concurrent usage of the single hw gmbus + * controller on different i2c buses. */ + struct mutex gmbus_mutex; + + /** + * Base address of the gmbus and gpio block. + */ + uint32_t gpio_mmio_base; + + struct pci_dev *bridge_dev; + struct intel_ring_buffer ring[I915_NUM_RINGS]; + uint32_t next_seqno; + + drm_dma_handle_t *status_page_dmah; + uint32_t counter; + struct drm_i915_gem_object *pwrctx; + struct drm_i915_gem_object *renderctx; + + struct resource mch_res; + + atomic_t irq_received; + + /* protects the irq masks */ + spinlock_t irq_lock; + + /* DPIO indirect register protection */ + spinlock_t dpio_lock; + + /** Cached value of IMR to avoid reads in updating the bitfield */ + u32 pipestat[2]; + u32 irq_mask; + u32 gt_irq_mask; + u32 pch_irq_mask; + + u32 hotplug_supported_mask; + struct work_struct hotplug_work; + + int num_pipe; + int num_pch_pll; + + /* For hangcheck timer */ +#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ + struct timer_list hangcheck_timer; + int hangcheck_count; + uint32_t last_acthd[I915_NUM_RINGS]; + uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; + + unsigned int stop_rings; + + unsigned long cfb_size; + unsigned int cfb_fb; + enum plane cfb_plane; + int cfb_y; + struct intel_fbc_work *fbc_work; + + struct intel_opregion opregion; + + /* overlay */ + struct intel_overlay *overlay; + bool sprite_scaling_enabled; + + /* LVDS info */ + int backlight_level; /* restore backlight to this value */ + bool backlight_enabled; + struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ + struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ + + /* Feature bits from the VBIOS */ + unsigned int int_tv_support:1; + unsigned int lvds_dither:1; + unsigned int lvds_vbt:1; + unsigned int int_crt_support:1; + unsigned int lvds_use_ssc:1; + unsigned int display_clock_mode:1; + int lvds_ssc_freq; + unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ + unsigned int lvds_val; /* used for checking LVDS channel mode */ + struct { + int rate; + int lanes; + int preemphasis; + int vswing; + + bool initialized; + bool support; + int bpp; + struct edp_power_seq pps; + } edp; + bool no_aux_handshake; + + struct notifier_block lid_notifier; + + int crt_ddc_pin; + struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ + int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ + int num_fence_regs; /* 8 on pre-965, 16 otherwise */ + + unsigned int fsb_freq, mem_freq, is_ddr3; + + spinlock_t error_lock; + /* Protected by dev->error_lock. */ + struct drm_i915_error_state *first_error; + struct work_struct error_work; + struct completion error_completion; + struct workqueue_struct *wq; + + /* Display functions */ + struct drm_i915_display_funcs display; + + /* PCH chipset type */ + enum intel_pch pch_type; + + unsigned long quirks; + + /* Register state */ + bool modeset_on_lid; u8 saveLBB; u32 saveDSPACNTR; u32 saveDSPBCNTR; u32 saveDSPARB; + u32 saveHWS; u32 savePIPEACONF; u32 savePIPEBCONF; u32 savePIPEASRC; @@ -547,206 +676,10 @@ struct i915_suspend_saved_registers { u32 savePIPEB_LINK_N1; u32 saveMCHBAR_RENDER_STANDBY; u32 savePCH_PORT_HOTPLUG; -}; - -struct intel_gen6_power_mgmt { - struct work_struct work; - u32 pm_iir; - /* lock - irqsave spinlock that protectects the work_struct and - * pm_iir. */ - spinlock_t lock; - - /* The below variables an all the rps hw state are protected by - * dev->struct mutext. */ - u8 cur_delay; - u8 min_delay; - u8 max_delay; - - struct delayed_work delayed_resume_work; - - /* - * Protects RPS/RC6 register access and PCU communication. - * Must be taken after struct_mutex if nested. - */ - struct mutex hw_lock; -}; - -struct intel_ilk_power_mgmt { - u8 cur_delay; - u8 min_delay; - u8 max_delay; - u8 fmax; - u8 fstart; - - u64 last_count1; - unsigned long last_time1; - unsigned long chipset_power; - u64 last_count2; - struct timespec last_time2; - unsigned long gfx_power; - u8 corr; - - int c_m; - int r_t; - - struct drm_i915_gem_object *pwrctx; - struct drm_i915_gem_object *renderctx; -}; - -struct i915_dri1_state { - unsigned allow_batchbuffer : 1; - u32 __iomem *gfx_hws_cpu_addr; - - unsigned int cpp; - int back_offset; - int front_offset; - int current_page; - int page_flipping; - - uint32_t counter; -}; - -struct intel_l3_parity { - u32 *remap_info; - struct work_struct error_work; -}; - -typedef struct drm_i915_private { - struct drm_device *dev; - - const struct intel_device_info *info; - - int relative_constants_mode; - - void __iomem *regs; - - struct drm_i915_gt_funcs gt; - /** gt_fifo_count and the subsequent register write are synchronized - * with dev->struct_mutex. */ - unsigned gt_fifo_count; - /** forcewake_count is protected by gt_lock */ - unsigned forcewake_count; - /** gt_lock is also taken in irq contexts. */ - struct spinlock gt_lock; - - struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; - - /** gmbus_mutex protects against concurrent usage of the single hw gmbus - * controller on different i2c buses. */ - struct mutex gmbus_mutex; - - /** - * Base address of the gmbus and gpio block. - */ - uint32_t gpio_mmio_base; - - struct pci_dev *bridge_dev; - struct intel_ring_buffer ring[I915_NUM_RINGS]; - uint32_t next_seqno; - - drm_dma_handle_t *status_page_dmah; - struct resource mch_res; - - atomic_t irq_received; - - /* protects the irq masks */ - spinlock_t irq_lock; - - /* DPIO indirect register protection */ - spinlock_t dpio_lock; - - /** Cached value of IMR to avoid reads in updating the bitfield */ - u32 pipestat[2]; - u32 irq_mask; - u32 gt_irq_mask; - u32 pch_irq_mask; - - u32 hotplug_supported_mask; - struct work_struct hotplug_work; - - int num_pipe; - int num_pch_pll; - - /* For hangcheck timer */ -#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ -#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) - struct timer_list hangcheck_timer; - int hangcheck_count; - uint32_t last_acthd[I915_NUM_RINGS]; - uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; - - unsigned int stop_rings; - - unsigned long cfb_size; - unsigned int cfb_fb; - enum plane cfb_plane; - int cfb_y; - struct intel_fbc_work *fbc_work; - - struct intel_opregion opregion; - - /* overlay */ - struct intel_overlay *overlay; - bool sprite_scaling_enabled; - - /* LVDS info */ - int backlight_level; /* restore backlight to this value */ - bool backlight_enabled; - struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ - struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ - - /* Feature bits from the VBIOS */ - unsigned int int_tv_support:1; - unsigned int lvds_dither:1; - unsigned int lvds_vbt:1; - unsigned int int_crt_support:1; - unsigned int lvds_use_ssc:1; - unsigned int display_clock_mode:1; - int lvds_ssc_freq; - unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ - unsigned int lvds_val; /* used for checking LVDS channel mode */ - struct { - int rate; - int lanes; - int preemphasis; - int vswing; - - bool initialized; - bool support; - int bpp; - struct edp_power_seq pps; - } edp; - bool no_aux_handshake; - - int crt_ddc_pin; - struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ - int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ - int num_fence_regs; /* 8 on pre-965, 16 otherwise */ - - unsigned int fsb_freq, mem_freq, is_ddr3; - - spinlock_t error_lock; - /* Protected by dev->error_lock. */ - struct drm_i915_error_state *first_error; - struct work_struct error_work; - struct completion error_completion; - struct workqueue_struct *wq; - - /* Display functions */ - struct drm_i915_display_funcs display; - - /* PCH chipset type */ - enum intel_pch pch_type; - unsigned short pch_id; - - unsigned long quirks; - - /* Register state */ - bool modeset_on_lid; struct { /** Bridge to intel-gtt-ko */ - struct intel_gtt *gtt; + const struct intel_gtt *gtt; /** Memory allocator for GTT stolen memory */ struct drm_mm stolen; /** Memory allocator for GTT */ @@ -773,6 +706,8 @@ typedef struct drm_i915_private { /** PPGTT used for aliasing the PPGTT with the GTT */ struct i915_hw_ppgtt *aliasing_ppgtt; + u32 *l3_remap_info; + struct shrinker inactive_shrinker; /** @@ -850,6 +785,19 @@ typedef struct drm_i915_private { u32 object_count; } mm; + /* Old dri1 support infrastructure, beware the dragons ya fools entering + * here! */ + struct { + unsigned allow_batchbuffer : 1; + u32 __iomem *gfx_hws_cpu_addr; + + unsigned int cpp; + int back_offset; + int front_offset; + int current_page; + int page_flipping; + } dri1; + /* Kernel Modesetting */ struct sdvo_device_mapping sdvo_mappings[2]; @@ -863,7 +811,6 @@ typedef struct drm_i915_private { wait_queue_head_t pending_flip_queue; struct intel_pch_pll pch_plls[I915_NUM_PLLS]; - struct intel_ddi_plls ddi_plls; /* Reclocking support */ bool render_reclock_avail; @@ -873,17 +820,46 @@ typedef struct drm_i915_private { u16 orig_clock; int child_dev_num; struct child_device_config *child_dev; + struct drm_connector *int_lvds_connector; + struct drm_connector *int_edp_connector; bool mchbar_need_disable; - struct intel_l3_parity l3_parity; - /* gen6+ rps state */ - struct intel_gen6_power_mgmt rps; + struct { + struct work_struct work; + u32 pm_iir; + /* lock - irqsave spinlock that protectects the work_struct and + * pm_iir. */ + spinlock_t lock; + + /* The below variables an all the rps hw state are protected by + * dev->struct mutext. */ + u8 cur_delay; + u8 min_delay; + u8 max_delay; + } rps; /* ilk-only ips/rps state. Everything in here is protected by the global * mchdev_lock in intel_pm.c */ - struct intel_ilk_power_mgmt ips; + struct { + u8 cur_delay; + u8 min_delay; + u8 max_delay; + u8 fmax; + u8 fstart; + + u64 last_count1; + unsigned long last_time1; + unsigned long chipset_power; + u64 last_count2; + struct timespec last_time2; + unsigned long gfx_power; + u8 corr; + + int c_m; + int r_t; + } ips; enum no_fbc_reason no_fbc_reason; @@ -895,25 +871,14 @@ typedef struct drm_i915_private { /* list of fbdev register on this device */ struct intel_fbdev *fbdev; - /* - * The console may be contended at resume, but we don't - * want it to block on it. - */ - struct work_struct console_resume_work; - struct backlight_device *backlight; struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; + struct work_struct parity_error_work; bool hw_contexts_disabled; uint32_t hw_context_size; - - struct i915_suspend_saved_registers regfile; - - /* Old dri1 support infrastructure, beware the dragons ya fools entering - * here! */ - struct i915_dri1_state dri1; } drm_i915_private_t; /* Iterate over initialised rings */ @@ -1155,14 +1120,9 @@ struct drm_i915_file_private { #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) -#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ - (dev)->pci_device == 0x0152 || \ - (dev)->pci_device == 0x015a) #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) -#define IS_ULT(dev) (IS_HASWELL(dev) && \ - ((dev)->pci_device & 0xFF00) == 0x0A00) /* * The genX designation typically refers to the render engine, so render @@ -1208,13 +1168,6 @@ struct drm_i915_file_private { #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) -#define INTEL_PCH_DEVICE_ID_MASK 0xff00 -#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 -#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 -#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 -#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 -#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 - #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) @@ -1297,7 +1250,6 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); -extern void intel_console_resume(struct work_struct *work); /* i915_irq.c */ void i915_hangcheck_elapsed(unsigned long data); @@ -1305,7 +1257,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged); extern void intel_irq_init(struct drm_device *dev); extern void intel_gt_init(struct drm_device *dev); -extern void intel_gt_reset(struct drm_device *dev); void i915_error_state_free(struct kref *error_ref); @@ -1548,14 +1499,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start, unsigned long mappable_end, unsigned long end); -int i915_gem_gtt_init(struct drm_device *dev); -void i915_gem_gtt_fini(struct drm_device *dev); -static inline void i915_gem_chipset_flush(struct drm_device *dev) -{ - if (INTEL_INFO(dev)->gen < 6) - intel_gtt_chipset_flush(); -} - /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, @@ -1685,9 +1628,6 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); -int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); - #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index b0016bb65631..107f09befe92 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -845,12 +845,12 @@ i915_gem_shmem_pwrite(struct drm_device *dev, * domain anymore. */ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { i915_gem_clflush_object(obj); - i915_gem_chipset_flush(dev); + intel_gtt_chipset_flush(); } } if (needs_clflush_after) - i915_gem_chipset_flush(dev); + intel_gtt_chipset_flush(); return ret; } @@ -1345,17 +1345,30 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) trace_i915_gem_object_fault(obj, page_offset, true, write); /* Now bind it into the GTT if needed */ - ret = i915_gem_object_pin(obj, 0, true, false); - if (ret) - goto unlock; + if (!obj->map_and_fenceable) { + ret = i915_gem_object_unbind(obj); + if (ret) + goto unlock; + } + if (!obj->gtt_space) { + ret = i915_gem_object_bind_to_gtt(obj, 0, true, false); + if (ret) + goto unlock; - ret = i915_gem_object_set_to_gtt_domain(obj, write); - if (ret) - goto unpin; + ret = i915_gem_object_set_to_gtt_domain(obj, write); + if (ret) + goto unlock; + } + + if (!obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(obj, obj->cache_level); ret = i915_gem_object_get_fence(obj); if (ret) - goto unpin; + goto unlock; + + if (i915_gem_object_is_inactive(obj)) + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); obj->fault_mappable = true; @@ -1364,8 +1377,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Finally, remap it using the new GTT offset */ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); -unpin: - i915_gem_object_unpin(obj); unlock: mutex_unlock(&dev->struct_mutex); out: @@ -2011,12 +2022,12 @@ i915_add_request(struct intel_ring_buffer *ring, if (!dev_priv->mm.suspended) { if (i915_enable_hangcheck) { mod_timer(&dev_priv->hangcheck_timer, - round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); + jiffies + + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } if (was_empty) { queue_delayed_work(dev_priv->wq, - &dev_priv->mm.retire_work, - round_jiffies_up_relative(HZ)); + &dev_priv->mm.retire_work, HZ); intel_mark_busy(dev_priv->dev); } } @@ -2207,8 +2218,7 @@ i915_gem_retire_work_handler(struct work_struct *work) /* Come back later if the device is busy... */ if (!mutex_trylock(&dev->struct_mutex)) { - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, - round_jiffies_up_relative(HZ)); + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); return; } @@ -2226,8 +2236,7 @@ i915_gem_retire_work_handler(struct work_struct *work) } if (!dev_priv->mm.suspended && !idle) - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, - round_jiffies_up_relative(HZ)); + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); if (idle) intel_mark_idle(dev); @@ -2914,14 +2923,13 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, if (ret) return ret; - i915_gem_object_pin_pages(obj); - search_free: if (map_and_fenceable) - free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, - size, alignment, obj->cache_level, - 0, dev_priv->mm.gtt_mappable_end, - false); + free_space = + drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, + size, alignment, obj->cache_level, + 0, dev_priv->mm.gtt_mappable_end, + false); else free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, size, alignment, obj->cache_level, @@ -2929,60 +2937,60 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, if (free_space != NULL) { if (map_and_fenceable) - free_space = + obj->gtt_space = drm_mm_get_block_range_generic(free_space, size, alignment, obj->cache_level, 0, dev_priv->mm.gtt_mappable_end, false); else - free_space = + obj->gtt_space = drm_mm_get_block_generic(free_space, size, alignment, obj->cache_level, false); } - if (free_space == NULL) { + if (obj->gtt_space == NULL) { ret = i915_gem_evict_something(dev, size, alignment, obj->cache_level, map_and_fenceable, nonblocking); - if (ret) { - i915_gem_object_unpin_pages(obj); + if (ret) return ret; - } goto search_free; } if (WARN_ON(!i915_gem_valid_gtt_space(dev, - free_space, + obj->gtt_space, obj->cache_level))) { - i915_gem_object_unpin_pages(obj); - drm_mm_put_block(free_space); + drm_mm_put_block(obj->gtt_space); + obj->gtt_space = NULL; return -EINVAL; } + ret = i915_gem_gtt_prepare_object(obj); if (ret) { - i915_gem_object_unpin_pages(obj); - drm_mm_put_block(free_space); + drm_mm_put_block(obj->gtt_space); + obj->gtt_space = NULL; return ret; } + if (!dev_priv->mm.aliasing_ppgtt) + i915_gem_gtt_bind_object(obj, obj->cache_level); + list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - obj->gtt_space = free_space; - obj->gtt_offset = free_space->start; + obj->gtt_offset = obj->gtt_space->start; fenceable = - free_space->size == fence_size && - (free_space->start & (fence_alignment - 1)) == 0; + obj->gtt_space->size == fence_size && + (obj->gtt_space->start & (fence_alignment - 1)) == 0; mappable = obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; obj->map_and_fenceable = mappable && fenceable; - i915_gem_object_unpin_pages(obj); trace_i915_gem_object_bind(obj, map_and_fenceable); i915_gem_verify_gtt(dev); return 0; @@ -3051,7 +3059,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) return; i915_gem_clflush_object(obj); - i915_gem_chipset_flush(obj->base.dev); + intel_gtt_chipset_flush(); old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; @@ -3446,16 +3454,11 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, } if (obj->gtt_space == NULL) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - ret = i915_gem_object_bind_to_gtt(obj, alignment, map_and_fenceable, nonblocking); if (ret) return ret; - - if (!dev_priv->mm.aliasing_ppgtt) - i915_gem_gtt_bind_object(obj, obj->cache_level); } if (!obj->has_global_gtt_mapping && map_and_fenceable) @@ -3829,7 +3832,7 @@ void i915_gem_l3_remap(struct drm_device *dev) if (!IS_IVYBRIDGE(dev)) return; - if (!dev_priv->l3_parity.remap_info) + if (!dev_priv->mm.l3_remap_info) return; misccpctl = I915_READ(GEN7_MISCCPCTL); @@ -3838,12 +3841,12 @@ void i915_gem_l3_remap(struct drm_device *dev) for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { u32 remap = I915_READ(GEN7_L3LOG_BASE + i); - if (remap && remap != dev_priv->l3_parity.remap_info[i/4]) + if (remap && remap != dev_priv->mm.l3_remap_info[i/4]) DRM_DEBUG("0x%x was already programmed to %x\n", GEN7_L3LOG_BASE + i, remap); - if (remap && !dev_priv->l3_parity.remap_info[i/4]) + if (remap && !dev_priv->mm.l3_remap_info[i/4]) DRM_DEBUG_DRIVER("Clearing remapped register\n"); - I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]); + I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]); } /* Make sure all the writes land before disabling dop clock gating */ @@ -3873,6 +3876,68 @@ void i915_gem_init_swizzling(struct drm_device *dev) I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); } +void i915_gem_init_ppgtt(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + uint32_t pd_offset; + struct intel_ring_buffer *ring; + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + uint32_t __iomem *pd_addr; + uint32_t pd_entry; + int i; + + if (!dev_priv->mm.aliasing_ppgtt) + return; + + + pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); + for (i = 0; i < ppgtt->num_pd_entries; i++) { + dma_addr_t pt_addr; + + if (dev_priv->mm.gtt->needs_dmar) + pt_addr = ppgtt->pt_dma_addr[i]; + else + pt_addr = page_to_phys(ppgtt->pt_pages[i]); + + pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); + pd_entry |= GEN6_PDE_VALID; + + writel(pd_entry, pd_addr + i); + } + readl(pd_addr); + + pd_offset = ppgtt->pd_offset; + pd_offset /= 64; /* in cachelines, */ + pd_offset <<= 16; + + if (INTEL_INFO(dev)->gen == 6) { + uint32_t ecochk, gab_ctl, ecobits; + + ecobits = I915_READ(GAC_ECO_BITS); + I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); + + gab_ctl = I915_READ(GAB_CTL); + I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); + + ecochk = I915_READ(GAM_ECOCHK); + I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | + ECOCHK_PPGTT_CACHE64B); + I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + } else if (INTEL_INFO(dev)->gen >= 7) { + I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); + /* GFX_MODE is per-ring on gen7+ */ + } + + for_each_ring(ring, dev_priv, i) { + if (INTEL_INFO(dev)->gen >= 7) + I915_WRITE(RING_MODE_GEN7(ring), + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + + I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); + I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); + } +} + static bool intel_enable_blt(struct drm_device *dev) { @@ -3895,7 +3960,7 @@ i915_gem_init_hw(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; int ret; - if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) + if (!intel_enable_gtt()) return -EIO; if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) @@ -4230,7 +4295,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, page_cache_release(page); } } - i915_gem_chipset_flush(dev); + intel_gtt_chipset_flush(); obj->phys_obj->cur_obj = NULL; obj->phys_obj = NULL; @@ -4317,7 +4382,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, return -EFAULT; } - i915_gem_chipset_flush(dev); + intel_gtt_chipset_flush(); return 0; } @@ -4342,19 +4407,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) spin_unlock(&file_priv->mm.lock); } -static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) -{ - if (!mutex_is_locked(mutex)) - return false; - -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) - return mutex->owner == task; -#else - /* Since UP may be pre-empted, we cannot assume that we own the lock */ - return false; -#endif -} - static int i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) { @@ -4365,15 +4417,10 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) struct drm_device *dev = dev_priv->dev; struct drm_i915_gem_object *obj; int nr_to_scan = sc->nr_to_scan; - bool unlock = true; int cnt; - if (!mutex_trylock(&dev->struct_mutex)) { - if (!mutex_is_locked_by(&dev->struct_mutex, current)) - return 0; - - unlock = false; - } + if (!mutex_trylock(&dev->struct_mutex)) + return 0; if (nr_to_scan) { nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); @@ -4389,7 +4436,6 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) if (obj->pin_count == 0 && obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; - if (unlock) - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->struct_mutex); return cnt; } diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_context.c b/trunk/drivers/gpu/drm/i915/i915_gem_context.c index 0e510df80d73..05ed42f203d7 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_context.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_context.c @@ -146,7 +146,7 @@ create_hw_context(struct drm_device *dev, struct i915_hw_context *ctx; int ret, id; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL); if (ctx == NULL) return ERR_PTR(-ENOMEM); diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 48e4317e72dc..3eea143749f6 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -128,6 +128,15 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, target_i915_obj->cache_level); } + /* The target buffer should have appeared before us in the + * exec_object list, so it should have a GTT space bound by now. + */ + if (unlikely(target_offset == 0)) { + DRM_DEBUG("No GTT space found for object %d\n", + reloc->target_handle); + return ret; + } + /* Validate that the target is in a valid r/w GPU domain */ if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { DRM_DEBUG("reloc with multiple write domains: " @@ -663,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, } if (flush_domains & I915_GEM_DOMAIN_CPU) - i915_gem_chipset_flush(ring->dev); + intel_gtt_chipset_flush(); if (flush_domains & I915_GEM_DOMAIN_GTT) wmb(); @@ -791,7 +800,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, u32 exec_start, exec_len; u32 seqno; u32 mask; - u32 flags; int ret, mode, i; if (!i915_gem_check_execbuffer(args)) { @@ -803,14 +811,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) return ret; - flags = 0; - if (args->flags & I915_EXEC_SECURE) { - if (!file->is_master || !capable(CAP_SYS_ADMIN)) - return -EPERM; - - flags |= I915_DISPATCH_SECURE; - } - switch (args->flags & I915_EXEC_RING_MASK) { case I915_EXEC_DEFAULT: case I915_EXEC_RENDER: @@ -983,13 +983,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; - /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure - * batch" bit. Hence we need to pin secure batches into the global gtt. - * hsw should have this fixed, but let's be paranoid and do it - * unconditionally for now. */ - if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) - i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); - ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); if (ret) goto err; @@ -1035,7 +1028,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - trace_i915_gem_ring_dispatch(ring, seqno, flags); + trace_i915_gem_ring_dispatch(ring, seqno); exec_start = batch_obj->gtt_offset + args->batch_start_offset; exec_len = args->batch_len; @@ -1047,15 +1040,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; ret = ring->dispatch_execbuffer(ring, - exec_start, exec_len, - flags); + exec_start, exec_len); if (ret) goto err; } } else { - ret = ring->dispatch_execbuffer(ring, - exec_start, exec_len, - flags); + ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); if (ret) goto err; } diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c index f7ac61ee1504..df470b5e8d36 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -28,67 +28,19 @@ #include "i915_trace.h" #include "intel_drv.h" -typedef uint32_t gtt_pte_t; - -/* PPGTT stuff */ -#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) - -#define GEN6_PDE_VALID (1 << 0) -/* gen6+ has bit 11-4 for physical addr bit 39-32 */ -#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) - -#define GEN6_PTE_VALID (1 << 0) -#define GEN6_PTE_UNCACHED (1 << 1) -#define HSW_PTE_UNCACHED (0) -#define GEN6_PTE_CACHE_LLC (2 << 1) -#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) -#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) - -static inline gtt_pte_t pte_encode(struct drm_device *dev, - dma_addr_t addr, - enum i915_cache_level level) -{ - gtt_pte_t pte = GEN6_PTE_VALID; - pte |= GEN6_PTE_ADDR_ENCODE(addr); - - switch (level) { - case I915_CACHE_LLC_MLC: - /* Haswell doesn't set L3 this way */ - if (IS_HASWELL(dev)) - pte |= GEN6_PTE_CACHE_LLC; - else - pte |= GEN6_PTE_CACHE_LLC_MLC; - break; - case I915_CACHE_LLC: - pte |= GEN6_PTE_CACHE_LLC; - break; - case I915_CACHE_NONE: - if (IS_HASWELL(dev)) - pte |= HSW_PTE_UNCACHED; - else - pte |= GEN6_PTE_UNCACHED; - break; - default: - BUG(); - } - - - return pte; -} - /* PPGTT support for Sandybdrige/Gen6 and later */ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, unsigned first_entry, unsigned num_entries) { - gtt_pte_t *pt_vaddr; - gtt_pte_t scratch_pte; + uint32_t *pt_vaddr; + uint32_t scratch_pte; unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned last_pte, i; - scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr, - I915_CACHE_LLC); + scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr); + scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC; while (num_entries) { last_pte = first_pte + num_entries; @@ -125,7 +77,6 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) if (!ppgtt) return ret; - ppgtt->dev = dev; ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, GFP_KERNEL); @@ -167,7 +118,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) i915_ppgtt_clear_range(ppgtt, 0, ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); - ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t); + ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t); dev_priv->mm.aliasing_ppgtt = ppgtt; @@ -217,9 +168,9 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, const struct sg_table *pages, unsigned first_entry, - enum i915_cache_level cache_level) + uint32_t pte_flags) { - gtt_pte_t *pt_vaddr; + uint32_t *pt_vaddr, pte; unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned i, j, m, segment_len; @@ -237,8 +188,8 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); - pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr, - cache_level); + pte = GEN6_PTE_ADDR_ENCODE(page_addr); + pt_vaddr[j] = pte | pte_flags; /* grab the next page */ if (++m == segment_len) { @@ -262,10 +213,29 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { + uint32_t pte_flags = GEN6_PTE_VALID; + + switch (cache_level) { + case I915_CACHE_LLC_MLC: + pte_flags |= GEN6_PTE_CACHE_LLC_MLC; + break; + case I915_CACHE_LLC: + pte_flags |= GEN6_PTE_CACHE_LLC; + break; + case I915_CACHE_NONE: + if (IS_HASWELL(obj->base.dev)) + pte_flags |= HSW_PTE_UNCACHED; + else + pte_flags |= GEN6_PTE_UNCACHED; + break; + default: + BUG(); + } + i915_ppgtt_insert_sg_entries(ppgtt, obj->pages, obj->gtt_space->start >> PAGE_SHIFT, - cache_level); + pte_flags); } void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, @@ -276,65 +246,23 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, obj->base.size >> PAGE_SHIFT); } -void i915_gem_init_ppgtt(struct drm_device *dev) +/* XXX kill agp_type! */ +static unsigned int cache_level_to_agp_type(struct drm_device *dev, + enum i915_cache_level cache_level) { - drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t pd_offset; - struct intel_ring_buffer *ring; - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - uint32_t __iomem *pd_addr; - uint32_t pd_entry; - int i; - - if (!dev_priv->mm.aliasing_ppgtt) - return; - - - pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); - for (i = 0; i < ppgtt->num_pd_entries; i++) { - dma_addr_t pt_addr; - - if (dev_priv->mm.gtt->needs_dmar) - pt_addr = ppgtt->pt_dma_addr[i]; - else - pt_addr = page_to_phys(ppgtt->pt_pages[i]); - - pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); - pd_entry |= GEN6_PDE_VALID; - - writel(pd_entry, pd_addr + i); - } - readl(pd_addr); - - pd_offset = ppgtt->pd_offset; - pd_offset /= 64; /* in cachelines, */ - pd_offset <<= 16; - - if (INTEL_INFO(dev)->gen == 6) { - uint32_t ecochk, gab_ctl, ecobits; - - ecobits = I915_READ(GAC_ECO_BITS); - I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); - - gab_ctl = I915_READ(GAB_CTL); - I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); - - ecochk = I915_READ(GAM_ECOCHK); - I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | - ECOCHK_PPGTT_CACHE64B); - I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); - } else if (INTEL_INFO(dev)->gen >= 7) { - I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); - /* GFX_MODE is per-ring on gen7+ */ - } - - for_each_ring(ring, dev_priv, i) { - if (INTEL_INFO(dev)->gen >= 7) - I915_WRITE(RING_MODE_GEN7(ring), - _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); - - I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); - I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); + switch (cache_level) { + case I915_CACHE_LLC_MLC: + if (INTEL_INFO(dev)->gen >= 6) + return AGP_USER_CACHED_MEMORY_LLC_MLC; + /* Older chipsets do not have this extra level of CPU + * cacheing, so fallthrough and request the PTE simply + * as cached. + */ + case I915_CACHE_LLC: + return AGP_USER_CACHED_MEMORY; + default: + case I915_CACHE_NONE: + return AGP_USER_MEMORY; } } @@ -360,40 +288,13 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) dev_priv->mm.interruptible = interruptible; } - -static void i915_ggtt_clear_range(struct drm_device *dev, - unsigned first_entry, - unsigned num_entries) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - gtt_pte_t scratch_pte; - gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry; - const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; - int i; - - if (INTEL_INFO(dev)->gen < 6) { - intel_gtt_clear_range(first_entry, num_entries); - return; - } - - if (WARN(num_entries > max_entries, - "First entry = %d; Num entries = %d (max=%d)\n", - first_entry, num_entries, max_entries)) - num_entries = max_entries; - - scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC); - for (i = 0; i < num_entries; i++) - iowrite32(scratch_pte, >t_base[i]); - readl(gtt_base); -} - void i915_gem_restore_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; /* First fill our portion of the GTT with scratch pages */ - i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE, + intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { @@ -401,7 +302,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) i915_gem_gtt_bind_object(obj, obj->cache_level); } - i915_gem_chipset_flush(dev); + intel_gtt_chipset_flush(); } int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) @@ -417,76 +318,21 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) return 0; } -/* - * Binds an object into the global gtt with the specified cache level. The object - * will be accessible to the GPU via commands whose operands reference offsets - * within the global GTT as well as accessible by the GPU through the GMADR - * mapped BAR (dev_priv->mm.gtt->gtt). - */ -static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj, - enum i915_cache_level level) -{ - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct sg_table *st = obj->pages; - struct scatterlist *sg = st->sgl; - const int first_entry = obj->gtt_space->start >> PAGE_SHIFT; - const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; - gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry; - int unused, i = 0; - unsigned int len, m = 0; - dma_addr_t addr; - - for_each_sg(st->sgl, sg, st->nents, unused) { - len = sg_dma_len(sg) >> PAGE_SHIFT; - for (m = 0; m < len; m++) { - addr = sg_dma_address(sg) + (m << PAGE_SHIFT); - iowrite32(pte_encode(dev, addr, level), >t_entries[i]); - i++; - } - } - - BUG_ON(i > max_entries); - BUG_ON(i != obj->base.size / PAGE_SIZE); - - /* XXX: This serves as a posting read to make sure that the PTE has - * actually been updated. There is some concern that even though - * registers and PTEs are within the same BAR that they are potentially - * of NUMA access patterns. Therefore, even with the way we assume - * hardware should work, we must keep this posting read for paranoia. - */ - if (i != 0) - WARN_ON(readl(>t_entries[i-1]) != pte_encode(dev, addr, level)); - - /* This next bit makes the above posting read even more important. We - * want to flush the TLBs only after we're certain all the PTE updates - * have finished. - */ - I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); - POSTING_READ(GFX_FLSH_CNTL_GEN6); -} - void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { struct drm_device *dev = obj->base.dev; - if (INTEL_INFO(dev)->gen < 6) { - unsigned int flags = (cache_level == I915_CACHE_NONE) ? - AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; - intel_gtt_insert_sg_entries(obj->pages, - obj->gtt_space->start >> PAGE_SHIFT, - flags); - } else { - gen6_ggtt_bind_object(obj, cache_level); - } + unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); + intel_gtt_insert_sg_entries(obj->pages, + obj->gtt_space->start >> PAGE_SHIFT, + agp_type); obj->has_global_gtt_mapping = 1; } void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) { - i915_ggtt_clear_range(obj->base.dev, - obj->gtt_space->start >> PAGE_SHIFT, + intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT); obj->has_global_gtt_mapping = 0; @@ -544,161 +390,5 @@ void i915_gem_init_global_gtt(struct drm_device *dev, dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; /* ... but ensure that we clear the entire range. */ - i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE); -} - -static int setup_scratch_page(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct page *page; - dma_addr_t dma_addr; - - page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); - if (page == NULL) - return -ENOMEM; - get_page(page); - set_pages_uc(page, 1); - -#ifdef CONFIG_INTEL_IOMMU - dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(dev->pdev, dma_addr)) - return -EINVAL; -#else - dma_addr = page_to_phys(page); -#endif - dev_priv->mm.gtt->scratch_page = page; - dev_priv->mm.gtt->scratch_page_dma = dma_addr; - - return 0; -} - -static void teardown_scratch_page(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - set_pages_wb(dev_priv->mm.gtt->scratch_page, 1); - pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - put_page(dev_priv->mm.gtt->scratch_page); - __free_page(dev_priv->mm.gtt->scratch_page); -} - -static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) -{ - snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; - snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; - return snb_gmch_ctl << 20; -} - -static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl) -{ - snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; - snb_gmch_ctl &= SNB_GMCH_GMS_MASK; - return snb_gmch_ctl << 25; /* 32 MB units */ -} - -static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl) -{ - static const int stolen_decoder[] = { - 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352}; - snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT; - snb_gmch_ctl &= IVB_GMCH_GMS_MASK; - return stolen_decoder[snb_gmch_ctl] << 20; -} - -int i915_gem_gtt_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - phys_addr_t gtt_bus_addr; - u16 snb_gmch_ctl; - int ret; - - /* On modern platforms we need not worry ourself with the legacy - * hostbridge query stuff. Skip it entirely - */ - if (INTEL_INFO(dev)->gen < 6) { - ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); - if (!ret) { - DRM_ERROR("failed to set up gmch\n"); - return -EIO; - } - - dev_priv->mm.gtt = intel_gtt_get(); - if (!dev_priv->mm.gtt) { - DRM_ERROR("Failed to initialize GTT\n"); - intel_gmch_remove(); - return -ENODEV; - } - return 0; - } - - dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL); - if (!dev_priv->mm.gtt) - return -ENOMEM; - - if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) - pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); - - /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */ - gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20); - dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2); - - /* i9xx_setup */ - pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); - dev_priv->mm.gtt->gtt_total_entries = - gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t); - if (INTEL_INFO(dev)->gen < 7) - dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl); - else - dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl); - - dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT; - /* 64/512MB is the current min/max we actually know of, but this is just a - * coarse sanity check. - */ - if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 || - dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) { - DRM_ERROR("Unknown GMADR entries (%d)\n", - dev_priv->mm.gtt->gtt_mappable_entries); - ret = -ENXIO; - goto err_out; - } - - ret = setup_scratch_page(dev); - if (ret) { - DRM_ERROR("Scratch setup failed\n"); - goto err_out; - } - - dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr, - dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t)); - if (!dev_priv->mm.gtt->gtt) { - DRM_ERROR("Failed to map the gtt page table\n"); - teardown_scratch_page(dev); - ret = -ENOMEM; - goto err_out; - } - - /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */ - DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8); - DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8); - DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20); - - return 0; - -err_out: - kfree(dev_priv->mm.gtt); - if (INTEL_INFO(dev)->gen < 6) - intel_gmch_remove(); - return ret; -} - -void i915_gem_gtt_fini(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - iounmap(dev_priv->mm.gtt->gtt); - teardown_scratch_page(dev); - if (INTEL_INFO(dev)->gen < 6) - intel_gmch_remove(); - kfree(dev_priv->mm.gtt); + intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); } diff --git a/trunk/drivers/gpu/drm/i915/i915_irq.c b/trunk/drivers/gpu/drm/i915/i915_irq.c index 2604867e6b7d..32e1bda865b8 100644 --- a/trunk/drivers/gpu/drm/i915/i915_irq.c +++ b/trunk/drivers/gpu/drm/i915/i915_irq.c @@ -122,10 +122,7 @@ static int i915_pipe_enabled(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); - - return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; + return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; } /* Called from drm generic code, passed a 'crtc', which @@ -185,8 +182,6 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, int vbl_start, vbl_end, htotal, vtotal; bool in_vbl = true; int ret = 0; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); if (!i915_pipe_enabled(dev, pipe)) { DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " @@ -195,7 +190,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, } /* Get vtotal. */ - vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); + vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); if (INTEL_INFO(dev)->gen >= 4) { /* No obvious pixelcount register. Only query vertical @@ -215,13 +210,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, */ position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; - htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); + htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); *vpos = position / htotal; *hpos = position - (*vpos * htotal); } /* Query vblank area. */ - vbl = I915_READ(VBLANK(cpu_transcoder)); + vbl = I915_READ(VBLANK(pipe)); /* Test position against vblank region. */ vbl_start = vbl & 0x1fff; @@ -357,7 +352,8 @@ static void notify_ring(struct drm_device *dev, if (i915_enable_hangcheck) { dev_priv->hangcheck_count = 0; mod_timer(&dev_priv->hangcheck_timer, - round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); + jiffies + + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } } @@ -378,7 +374,7 @@ static void gen6_pm_rps_work(struct work_struct *work) if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) return; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->dev->struct_mutex); if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) new_delay = dev_priv->rps.cur_delay + 1; @@ -393,7 +389,7 @@ static void gen6_pm_rps_work(struct work_struct *work) gen6_set_rps(dev_priv->dev, new_delay); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->dev->struct_mutex); } @@ -409,7 +405,7 @@ static void gen6_pm_rps_work(struct work_struct *work) static void ivybridge_parity_work(struct work_struct *work) { drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, - l3_parity.error_work); + parity_error_work); u32 error_status, row, bank, subbank; char *parity_event[5]; uint32_t misccpctl; @@ -473,7 +469,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev) I915_WRITE(GTIMR, dev_priv->gt_irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); + queue_work(dev_priv->wq, &dev_priv->parity_error_work); } static void snb_gt_irq_handler(struct drm_device *dev, @@ -524,7 +520,7 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, queue_work(dev_priv->wq, &dev_priv->rps.work); } -static irqreturn_t valleyview_irq_handler(int irq, void *arg) +static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -610,9 +606,6 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; - if (pch_iir & SDE_HOTPLUG_MASK) - queue_work(dev_priv->wq, &dev_priv->hotplug_work); - if (pch_iir & SDE_AUDIO_POWER_MASK) DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", (pch_iir & SDE_AUDIO_POWER_MASK) >> @@ -653,9 +646,6 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; - if (pch_iir & SDE_HOTPLUG_MASK_CPT) - queue_work(dev_priv->wq, &dev_priv->hotplug_work); - if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> @@ -680,7 +670,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) I915_READ(FDI_RX_IIR(pipe))); } -static irqreturn_t ivybridge_irq_handler(int irq, void *arg) +static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -719,6 +709,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) if (de_iir & DE_PCH_EVENT_IVB) { u32 pch_iir = I915_READ(SDEIIR); + if (pch_iir & SDE_HOTPLUG_MASK_CPT) + queue_work(dev_priv->wq, &dev_priv->hotplug_work); cpt_irq_handler(dev, pch_iir); /* clear PCH hotplug event before clear CPU irq */ @@ -753,12 +745,13 @@ static void ilk_gt_irq_handler(struct drm_device *dev, notify_ring(dev, &dev_priv->ring[VCS]); } -static irqreturn_t ironlake_irq_handler(int irq, void *arg) +static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = IRQ_NONE; u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; + u32 hotplug_mask; atomic_inc(&dev_priv->irq_received); @@ -776,6 +769,11 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) (!IS_GEN6(dev) || pm_iir == 0)) goto done; + if (HAS_PCH_CPT(dev)) + hotplug_mask = SDE_HOTPLUG_MASK_CPT; + else + hotplug_mask = SDE_HOTPLUG_MASK; + ret = IRQ_HANDLED; if (IS_GEN5(dev)) @@ -804,6 +802,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) /* check event from PCH */ if (de_iir & DE_PCH_EVENT) { + if (pch_iir & hotplug_mask) + queue_work(dev_priv->wq, &dev_priv->hotplug_work); if (HAS_PCH_CPT(dev)) cpt_irq_handler(dev, pch_iir); else @@ -1751,7 +1751,7 @@ void i915_hangcheck_elapsed(unsigned long data) repeat: /* Reset timer case chip hangs without another request being added */ mod_timer(&dev_priv->hangcheck_timer, - round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); + jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } /* drm_dma.h hooks @@ -1956,7 +1956,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev) u32 enable_mask; u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; - u32 render_irqs; u16 msid; enable_mask = I915_DISPLAY_PORT_INTERRUPT; @@ -1996,12 +1995,21 @@ static int valleyview_irq_postinstall(struct drm_device *dev) I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff); + dev_priv->gt_irq_mask = ~0; + + I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - - render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | - GEN6_BLITTER_USER_INTERRUPT; - I915_WRITE(GTIER, render_irqs); + I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | + GT_GEN6_BLT_CS_ERROR_INTERRUPT | + GT_GEN6_BLT_USER_INTERRUPT | + GT_GEN6_BSD_USER_INTERRUPT | + GT_GEN6_BSD_CS_ERROR_INTERRUPT | + GT_GEN7_L3_PARITY_ERROR_INTERRUPT | + GT_PIPE_NOTIFY | + GT_RENDER_CS_ERROR_INTERRUPT | + GT_SYNC_STATUS | + GT_USER_INTERRUPT); POSTING_READ(GTIER); /* ack & enable invalid PTE error interrupts */ @@ -2011,6 +2019,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev) #endif I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); +#if 0 /* FIXME: check register definitions; some have moved */ /* Note HDMI and DP share bits */ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) hotplug_en |= HDMIB_HOTPLUG_INT_EN; @@ -2018,14 +2027,15 @@ static int valleyview_irq_postinstall(struct drm_device *dev) hotplug_en |= HDMIC_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) hotplug_en |= HDMID_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) hotplug_en |= SDVOC_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) hotplug_en |= SDVOB_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { hotplug_en |= CRT_HOTPLUG_INT_EN; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; } +#endif I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); @@ -2119,7 +2129,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev) return 0; } -static irqreturn_t i8xx_irq_handler(int irq, void *arg) +static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -2297,7 +2307,7 @@ static int i915_irq_postinstall(struct drm_device *dev) return 0; } -static irqreturn_t i915_irq_handler(int irq, void *arg) +static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -2535,7 +2545,7 @@ static int i965_irq_postinstall(struct drm_device *dev) return 0; } -static irqreturn_t i965_irq_handler(int irq, void *arg) +static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -2681,7 +2691,7 @@ void intel_irq_init(struct drm_device *dev) INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->error_work, i915_error_work_func); INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); - INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); + INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ diff --git a/trunk/drivers/gpu/drm/i915/i915_reg.h b/trunk/drivers/gpu/drm/i915/i915_reg.h index 97fbd9d1823b..a4162ddff6c5 100644 --- a/trunk/drivers/gpu/drm/i915/i915_reg.h +++ b/trunk/drivers/gpu/drm/i915/i915_reg.h @@ -26,7 +26,6 @@ #define _I915_REG_H_ #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) -#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) @@ -41,14 +40,6 @@ */ #define INTEL_GMCH_CTRL 0x52 #define INTEL_GMCH_VGA_DISABLE (1 << 1) -#define SNB_GMCH_CTRL 0x50 -#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */ -#define SNB_GMCH_GGMS_MASK 0x3 -#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ -#define SNB_GMCH_GMS_MASK 0x1f -#define IVB_GMCH_GMS_SHIFT 4 -#define IVB_GMCH_GMS_MASK 0xf - /* PCI config space */ @@ -114,6 +105,23 @@ #define GEN6_GRDOM_MEDIA (1 << 2) #define GEN6_GRDOM_BLT (1 << 3) +/* PPGTT stuff */ +#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) + +#define GEN6_PDE_VALID (1 << 0) +#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */ +/* gen6+ has bit 11-4 for physical addr bit 39-32 */ +#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) + +#define GEN6_PTE_VALID (1 << 0) +#define GEN6_PTE_UNCACHED (1 << 1) +#define HSW_PTE_UNCACHED (0) +#define GEN6_PTE_CACHE_LLC (2 << 1) +#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) +#define GEN6_PTE_CACHE_BITS (3 << 1) +#define GEN6_PTE_GFDT (1 << 3) +#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) + #define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) #define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) #define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) @@ -233,18 +241,11 @@ */ #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ -#define MI_FLUSH_DW_STORE_INDEX (1<<21) -#define MI_INVALIDATE_TLB (1<<18) -#define MI_FLUSH_DW_OP_STOREDW (1<<14) -#define MI_INVALIDATE_BSD (1<<7) -#define MI_FLUSH_DW_USE_GTT (1<<2) -#define MI_FLUSH_DW_USE_PPGTT (0<<2) +#define MI_INVALIDATE_TLB (1<<18) +#define MI_INVALIDATE_BSD (1<<7) #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) -#define MI_BATCH_NON_SECURE (1) -/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ -#define MI_BATCH_NON_SECURE_I965 (1<<8) -#define MI_BATCH_PPGTT_HSW (1<<8) -#define MI_BATCH_NON_SECURE_HSW (1<<13) +#define MI_BATCH_NON_SECURE (1) +#define MI_BATCH_NON_SECURE_I965 (1<<8) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ @@ -368,7 +369,6 @@ #define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ #define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ #define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ -#define DPIO_PLL_REFCLK_SEL_MASK 3 #define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ #define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ #define _DPIO_REFSFR_B 0x8034 @@ -384,9 +384,6 @@ #define DPIO_FASTCLK_DISABLE 0x8100 -#define DPIO_DATA_CHANNEL1 0x8220 -#define DPIO_DATA_CHANNEL2 0x8420 - /* * Fence registers */ @@ -524,7 +521,6 @@ */ # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) #define _3D_CHICKEN3 0x02090 -#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) #define MI_MODE 0x0209c @@ -551,8 +547,6 @@ #define IIR 0x020a4 #define IMR 0x020a8 #define ISR 0x020ac -#define VLV_GUNIT_CLOCK_GATE 0x182060 -#define GCFG_DIS (1<<8) #define VLV_IIR_RW 0x182084 #define VLV_IER 0x1820a0 #define VLV_IIR 0x1820a4 @@ -667,7 +661,6 @@ #define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ #define CACHE_MODE_0 0x02120 /* 915+ only */ -#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8) #define CM0_IZ_OPT_DISABLE (1<<6) #define CM0_ZR_OPT_DISABLE (1<<5) #define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) @@ -677,8 +670,6 @@ #define CM0_RC_OP_FLUSH_DISABLE (1<<0) #define BB_ADDR 0x02140 /* 8 bytes */ #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ -#define GFX_FLSH_CNTL_GEN6 0x101008 -#define GFX_FLSH_CNTL_EN (1<<0) #define ECOSKPD 0x021d0 #define ECO_GATING_CX_ONLY (1<<3) #define ECO_FLIP_DONE (1<<0) @@ -1568,14 +1559,14 @@ #define _VSYNCSHIFT_B 0x61028 -#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) -#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) -#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) -#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B) -#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B) -#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B) +#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) +#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) +#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B) +#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B) +#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) +#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) -#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) +#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B) /* VGA port control */ #define ADPA 0x61100 @@ -2650,7 +2641,6 @@ #define PIPECONF_GAMMA (1<<24) #define PIPECONF_FORCE_BORDER (1<<25) #define PIPECONF_INTERLACE_MASK (7 << 21) -#define PIPECONF_INTERLACE_MASK_HSW (3 << 21) /* Note that pre-gen3 does not support interlaced display directly. Panel * fitting must be disabled on pre-ilk for interlaced. */ #define PIPECONF_PROGRESSIVE (0 << 21) @@ -2721,7 +2711,7 @@ #define PIPE_12BPC (3 << 5) #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) -#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) +#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF) #define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) #define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) @@ -3008,19 +2998,12 @@ #define DISPPLANE_GAMMA_ENABLE (1<<30) #define DISPPLANE_GAMMA_DISABLE 0 #define DISPPLANE_PIXFORMAT_MASK (0xf<<26) -#define DISPPLANE_YUV422 (0x0<<26) #define DISPPLANE_8BPP (0x2<<26) -#define DISPPLANE_BGRA555 (0x3<<26) -#define DISPPLANE_BGRX555 (0x4<<26) -#define DISPPLANE_BGRX565 (0x5<<26) -#define DISPPLANE_BGRX888 (0x6<<26) -#define DISPPLANE_BGRA888 (0x7<<26) -#define DISPPLANE_RGBX101010 (0x8<<26) -#define DISPPLANE_RGBA101010 (0x9<<26) -#define DISPPLANE_BGRX101010 (0xa<<26) -#define DISPPLANE_RGBX161616 (0xc<<26) -#define DISPPLANE_RGBX888 (0xe<<26) -#define DISPPLANE_RGBA888 (0xf<<26) +#define DISPPLANE_15_16BPP (0x4<<26) +#define DISPPLANE_16BPP (0x5<<26) +#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) +#define DISPPLANE_32BPP (0x7<<26) +#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) #define DISPPLANE_STEREO_ENABLE (1<<25) #define DISPPLANE_STEREO_DISABLE 0 #define DISPPLANE_SEL_PIPE_SHIFT 24 @@ -3041,8 +3024,6 @@ #define _DSPASIZE 0x70190 #define _DSPASURF 0x7019C /* 965+ only */ #define _DSPATILEOFF 0x701A4 /* 965+ only */ -#define _DSPAOFFSET 0x701A4 /* HSW */ -#define _DSPASURFLIVE 0x701AC #define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) #define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) @@ -3052,8 +3033,6 @@ #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) #define DSPLINOFF(plane) DSPADDR(plane) -#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET) -#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE) /* Display/Sprite base address macros */ #define DISP_BASEADDR_MASK (0xfffff000) @@ -3099,8 +3078,6 @@ #define _DSPBSIZE 0x71190 #define _DSPBSURF 0x7119C #define _DSPBTILEOFF 0x711A4 -#define _DSPBOFFSET 0x711A4 -#define _DSPBSURFLIVE 0x711AC /* Sprite A control */ #define _DVSACNTR 0x72180 @@ -3166,7 +3143,6 @@ #define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) #define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) #define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) -#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) #define _SPRA_CTL 0x70280 #define SPRITE_ENABLE (1<<31) @@ -3201,8 +3177,6 @@ #define _SPRA_SURF 0x7029c #define _SPRA_KEYMAX 0x702a0 #define _SPRA_TILEOFF 0x702a4 -#define _SPRA_OFFSET 0x702a4 -#define _SPRA_SURFLIVE 0x702ac #define _SPRA_SCALE 0x70304 #define SPRITE_SCALE_ENABLE (1<<31) #define SPRITE_FILTER_MASK (3<<29) @@ -3223,8 +3197,6 @@ #define _SPRB_SURF 0x7129c #define _SPRB_KEYMAX 0x712a0 #define _SPRB_TILEOFF 0x712a4 -#define _SPRB_OFFSET 0x712a4 -#define _SPRB_SURFLIVE 0x712ac #define _SPRB_SCALE 0x71304 #define _SPRB_GAMC 0x71400 @@ -3238,10 +3210,8 @@ #define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) #define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) #define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) -#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) #define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) #define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) -#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) /* VBIOS regs */ #define VGACNTRL 0x71400 @@ -3276,6 +3246,12 @@ #define DISPLAY_PORT_PLL_BIOS_1 0x46010 #define DISPLAY_PORT_PLL_BIOS_2 0x46014 +#define PCH_DSPCLK_GATE_D 0x42020 +# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) +# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) +# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) +# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) + #define PCH_3DCGDIS0 0x46020 # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) @@ -3325,22 +3301,20 @@ #define _PIPEB_LINK_M2 0x61048 #define _PIPEB_LINK_N2 0x6104c -#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) -#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) -#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2) -#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2) -#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1) -#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1) -#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2) -#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2) +#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1) +#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1) +#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2) +#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2) +#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1) +#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1) +#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2) +#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2) /* CPU panel fitter */ /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ #define _PFA_CTL_1 0x68080 #define _PFB_CTL_1 0x68880 #define PF_ENABLE (1<<31) -#define PF_PIPE_SEL_MASK_IVB (3<<29) -#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29) #define PF_FILTER_MASK (3<<23) #define PF_FILTER_PROGRAMMED (0<<23) #define PF_FILTER_MED_3x3 (1<<23) @@ -3449,13 +3423,15 @@ #define ILK_HDCP_DISABLE (1<<25) #define ILK_eDP_A_DISABLE (1<<24) #define ILK_DESKTOP (1<<23) +#define ILK_DSPCLK_GATE 0x42020 +#define IVB_VRHUNIT_CLK_GATE (1<<28) +#define ILK_DPARB_CLK_GATE (1<<5) +#define ILK_DPFD_CLK_GATE (1<<7) -#define ILK_DSPCLK_GATE_D 0x42020 -#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) -#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) -#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) -#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7) -#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5) +/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ +#define ILK_CLK_FBC (1<<7) +#define ILK_DPFC_DIS1 (1<<8) +#define ILK_DPFC_DIS2 (1<<9) #define IVB_CHICKEN3 0x4200c # define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) @@ -3471,21 +3447,14 @@ #define GEN7_L3CNTLREG1 0xB01C #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C -#define GEN7_L3AGDIS (1<<19) #define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 #define GEN7_WA_L3_CHICKEN_MODE 0x20000000 -#define GEN7_L3SQCREG4 0xb034 -#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) - /* WaCatErrorRejectionIssue */ #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) -#define HSW_FUSE_STRAP 0x42014 -#define HSW_CDCLK_LIMIT (1 << 24) - /* PCH */ /* south display engine interrupt: IBX */ @@ -3717,7 +3686,7 @@ #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) -#define VLV_VIDEO_DIP_CTL_A 0x60200 +#define VLV_VIDEO_DIP_CTL_A 0x60220 #define VLV_VIDEO_DIP_DATA_A 0x60208 #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 @@ -3826,22 +3795,16 @@ #define TRANS_6BPC (2<<5) #define TRANS_12BPC (3<<5) -#define _TRANSA_CHICKEN1 0xf0060 -#define _TRANSB_CHICKEN1 0xf1060 -#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) -#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4) #define _TRANSA_CHICKEN2 0xf0064 #define _TRANSB_CHICKEN2 0xf1064 #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) -#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) - +#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31) #define SOUTH_CHICKEN1 0xc2000 #define FDIA_PHASE_SYNC_SHIFT_OVR 19 #define FDIA_PHASE_SYNC_SHIFT_EN 18 -#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) -#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) -#define FDI_BC_BIFURCATION_SELECT (1 << 12) +#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) +#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) #define SOUTH_CHICKEN2 0xc2004 #define DPLS_EDP_PPS_FIX_DIS (1<<0) @@ -3853,7 +3816,6 @@ #define SOUTH_DSPCLK_GATE_D 0xc2020 #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) -#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) /* CPU: FDI_TX */ #define _FDI_TXA_CTL 0x60100 @@ -3939,21 +3901,16 @@ #define FDI_PORT_WIDTH_2X_LPT (1<<19) #define FDI_PORT_WIDTH_1X_LPT (0<<19) -#define _FDI_RXA_MISC 0xf0010 -#define _FDI_RXB_MISC 0xf1010 -#define FDI_RX_PWRDN_LANE1_MASK (3<<26) -#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26) -#define FDI_RX_PWRDN_LANE0_MASK (3<<24) -#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24) -#define FDI_RX_TP1_TO_TP2_48 (2<<20) -#define FDI_RX_TP1_TO_TP2_64 (3<<20) -#define FDI_RX_FDI_DELAY_90 (0x90<<0) -#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) - +#define _FDI_RXA_MISC 0xf0010 +#define _FDI_RXB_MISC 0xf1010 #define _FDI_RXA_TUSIZE1 0xf0030 #define _FDI_RXA_TUSIZE2 0xf0038 #define _FDI_RXB_TUSIZE1 0xf1030 #define _FDI_RXB_TUSIZE2 0xf1038 +#define FDI_RX_TP1_TO_TP2_48 (2<<20) +#define FDI_RX_TP1_TO_TP2_64 (3<<20) +#define FDI_RX_FDI_DELAY_90 (0x90<<0) +#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) @@ -4046,11 +4003,6 @@ #define PANEL_LIGHT_ON_DELAY_SHIFT 0 #define PCH_PP_OFF_DELAYS 0xc720c -#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30) -#define PANEL_POWER_PORT_LVDS (0 << 30) -#define PANEL_POWER_PORT_DP_A (1 << 30) -#define PANEL_POWER_PORT_DP_C (2 << 30) -#define PANEL_POWER_PORT_DP_D (3 << 30) #define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) #define PANEL_POWER_DOWN_DELAY_SHIFT 16 #define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) @@ -4098,7 +4050,7 @@ #define TRANS_DP_CTL_A 0xe0300 #define TRANS_DP_CTL_B 0xe1300 #define TRANS_DP_CTL_C 0xe2300 -#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B) +#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000) #define TRANS_DP_OUTPUT_ENABLE (1<<31) #define TRANS_DP_PORT_SEL_B (0<<29) #define TRANS_DP_PORT_SEL_C (1<<29) @@ -4156,8 +4108,6 @@ #define FORCEWAKE_ACK_HSW 0x130044 #define FORCEWAKE_ACK 0x130090 #define FORCEWAKE_MT 0xa188 /* multi-threaded */ -#define FORCEWAKE_KERNEL 0x1 -#define FORCEWAKE_USER 0x2 #define FORCEWAKE_MT_ACK 0x130040 #define ECOBUS 0xa180 #define FORCEWAKE_MT_ENABLE (1<<5) @@ -4270,10 +4220,6 @@ #define GEN6_READ_OC_PARAMS 0xc #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 -#define GEN6_PCODE_WRITE_RC6VIDS 0x4 -#define GEN6_PCODE_READ_RC6VIDS 0x5 -#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0 -#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0) #define GEN6_PCODE_DATA 0x138128 #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 @@ -4305,15 +4251,6 @@ #define GEN7_L3LOG_BASE 0xB070 #define GEN7_L3LOG_SIZE 0x80 -#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ -#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100 -#define GEN7_MAX_PS_THREAD_DEP (8<<12) -#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) - -#define GEN7_ROW_CHICKEN2 0xe4f4 -#define GEN7_ROW_CHICKEN2_GT2 0xf4f4 -#define DOP_CLOCK_GATING_DISABLE (1<<0) - #define G4X_AUD_VID_DID 0x62020 #define INTEL_AUDIO_DEVCL 0x808629FB #define INTEL_AUDIO_DEVBLC 0x80862801 @@ -4443,39 +4380,33 @@ #define HSW_PWR_WELL_CTL6 0x45414 /* Per-pipe DDI Function Control */ -#define TRANS_DDI_FUNC_CTL_A 0x60400 -#define TRANS_DDI_FUNC_CTL_B 0x61400 -#define TRANS_DDI_FUNC_CTL_C 0x62400 -#define TRANS_DDI_FUNC_CTL_EDP 0x6F400 -#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \ - TRANS_DDI_FUNC_CTL_B) -#define TRANS_DDI_FUNC_ENABLE (1<<31) +#define PIPE_DDI_FUNC_CTL_A 0x60400 +#define PIPE_DDI_FUNC_CTL_B 0x61400 +#define PIPE_DDI_FUNC_CTL_C 0x62400 +#define PIPE_DDI_FUNC_CTL_EDP 0x6F400 +#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \ + PIPE_DDI_FUNC_CTL_B) +#define PIPE_DDI_FUNC_ENABLE (1<<31) /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ -#define TRANS_DDI_PORT_MASK (7<<28) -#define TRANS_DDI_SELECT_PORT(x) ((x)<<28) -#define TRANS_DDI_PORT_NONE (0<<28) -#define TRANS_DDI_MODE_SELECT_MASK (7<<24) -#define TRANS_DDI_MODE_SELECT_HDMI (0<<24) -#define TRANS_DDI_MODE_SELECT_DVI (1<<24) -#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24) -#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24) -#define TRANS_DDI_MODE_SELECT_FDI (4<<24) -#define TRANS_DDI_BPC_MASK (7<<20) -#define TRANS_DDI_BPC_8 (0<<20) -#define TRANS_DDI_BPC_10 (1<<20) -#define TRANS_DDI_BPC_6 (2<<20) -#define TRANS_DDI_BPC_12 (3<<20) -#define TRANS_DDI_PVSYNC (1<<17) -#define TRANS_DDI_PHSYNC (1<<16) -#define TRANS_DDI_EDP_INPUT_MASK (7<<12) -#define TRANS_DDI_EDP_INPUT_A_ON (0<<12) -#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12) -#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) -#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) -#define TRANS_DDI_BFI_ENABLE (1<<4) -#define TRANS_DDI_PORT_WIDTH_X1 (0<<1) -#define TRANS_DDI_PORT_WIDTH_X2 (1<<1) -#define TRANS_DDI_PORT_WIDTH_X4 (3<<1) +#define PIPE_DDI_PORT_MASK (7<<28) +#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) +#define PIPE_DDI_MODE_SELECT_MASK (7<<24) +#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) +#define PIPE_DDI_MODE_SELECT_DVI (1<<24) +#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) +#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) +#define PIPE_DDI_MODE_SELECT_FDI (4<<24) +#define PIPE_DDI_BPC_MASK (7<<20) +#define PIPE_DDI_BPC_8 (0<<20) +#define PIPE_DDI_BPC_10 (1<<20) +#define PIPE_DDI_BPC_6 (2<<20) +#define PIPE_DDI_BPC_12 (3<<20) +#define PIPE_DDI_PVSYNC (1<<17) +#define PIPE_DDI_PHSYNC (1<<16) +#define PIPE_DDI_BFI_ENABLE (1<<4) +#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) +#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) +#define PIPE_DDI_PORT_WIDTH_X4 (3<<1) /* DisplayPort Transport Control */ #define DP_TP_CTL_A 0x64040 @@ -4489,16 +4420,12 @@ #define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) #define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) #define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) -#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8) -#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8) #define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) -#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7) /* DisplayPort Transport Status */ #define DP_TP_STATUS_A 0x64044 #define DP_TP_STATUS_B 0x64144 #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) -#define DP_TP_STATUS_IDLE_DONE (1<<25) #define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) /* DDI Buffer Control */ @@ -4517,7 +4444,6 @@ #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ #define DDI_BUF_EMP_MASK (0xf<<24) #define DDI_BUF_IS_IDLE (1<<7) -#define DDI_A_4_LANES (1<<4) #define DDI_PORT_WIDTH_X1 (0<<1) #define DDI_PORT_WIDTH_X2 (1<<1) #define DDI_PORT_WIDTH_X4 (3<<1) @@ -4564,8 +4490,8 @@ /* SPLL */ #define SPLL_CTL 0x46020 #define SPLL_PLL_ENABLE (1<<31) -#define SPLL_PLL_SSC (1<<28) -#define SPLL_PLL_NON_SSC (2<<28) +#define SPLL_PLL_SCC (1<<28) +#define SPLL_PLL_NON_SCC (2<<28) #define SPLL_PLL_FREQ_810MHz (0<<26) #define SPLL_PLL_FREQ_1350MHz (1<<26) @@ -4574,7 +4500,7 @@ #define WRPLL_CTL2 0x46060 #define WRPLL_PLL_ENABLE (1<<31) #define WRPLL_PLL_SELECT_SSC (0x01<<28) -#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28) +#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) /* WRPLL divider programming */ #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) @@ -4591,36 +4517,21 @@ #define PORT_CLK_SEL_SPLL (3<<29) #define PORT_CLK_SEL_WRPLL1 (4<<29) #define PORT_CLK_SEL_WRPLL2 (5<<29) -#define PORT_CLK_SEL_NONE (7<<29) - -/* Transcoder clock selection */ -#define TRANS_CLK_SEL_A 0x46140 -#define TRANS_CLK_SEL_B 0x46144 -#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B) -/* For each transcoder, we need to select the corresponding port clock */ -#define TRANS_CLK_SEL_DISABLED (0x0<<29) -#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29) - -#define _TRANSA_MSA_MISC 0x60410 -#define _TRANSB_MSA_MISC 0x61410 -#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \ - _TRANSB_MSA_MISC) -#define TRANS_MSA_SYNC_CLK (1<<0) -#define TRANS_MSA_6_BPC (0<<5) -#define TRANS_MSA_8_BPC (1<<5) -#define TRANS_MSA_10_BPC (2<<5) -#define TRANS_MSA_12_BPC (3<<5) -#define TRANS_MSA_16_BPC (4<<5) + +/* Pipe clock selection */ +#define PIPE_CLK_SEL_A 0x46140 +#define PIPE_CLK_SEL_B 0x46144 +#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B) +/* For each pipe, we need to select the corresponding port clock */ +#define PIPE_CLK_SEL_DISABLED (0x0<<29) +#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) /* LCPLL Control */ #define LCPLL_CTL 0x130040 #define LCPLL_PLL_DISABLE (1<<31) #define LCPLL_PLL_LOCK (1<<30) -#define LCPLL_CLK_FREQ_MASK (3<<26) -#define LCPLL_CLK_FREQ_450 (0<<26) #define LCPLL_CD_CLOCK_DISABLE (1<<25) #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) -#define LCPLL_CD_SOURCE_FCLK (1<<21) /* Pipe WM_LINETIME - watermark line time */ #define PIPE_WM_LINETIME_A 0x45270 diff --git a/trunk/drivers/gpu/drm/i915/i915_suspend.c b/trunk/drivers/gpu/drm/i915/i915_suspend.c index 63d4d30c39de..5854bddb1e9f 100644 --- a/trunk/drivers/gpu/drm/i915/i915_suspend.c +++ b/trunk/drivers/gpu/drm/i915/i915_suspend.c @@ -60,9 +60,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe) reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; if (pipe == PIPE_A) - array = dev_priv->regfile.save_palette_a; + array = dev_priv->save_palette_a; else - array = dev_priv->regfile.save_palette_b; + array = dev_priv->save_palette_b; for (i = 0; i < 256; i++) array[i] = I915_READ(reg + (i << 2)); @@ -82,9 +82,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; if (pipe == PIPE_A) - array = dev_priv->regfile.save_palette_a; + array = dev_priv->save_palette_a; else - array = dev_priv->regfile.save_palette_b; + array = dev_priv->save_palette_b; for (i = 0; i < 256; i++) I915_WRITE(reg + (i << 2), array[i]); @@ -131,11 +131,11 @@ static void i915_save_vga(struct drm_device *dev) u16 cr_index, cr_data, st01; /* VGA color palette registers */ - dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK); + dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK); /* MSR bits */ - dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ); - if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { + dev_priv->saveMSR = I915_READ8(VGA_MSR_READ); + if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { cr_index = VGA_CR_INDEX_CGA; cr_data = VGA_CR_DATA_CGA; st01 = VGA_ST01_CGA; @@ -150,35 +150,35 @@ static void i915_save_vga(struct drm_device *dev) i915_read_indexed(dev, cr_index, cr_data, 0x11) & (~0x80)); for (i = 0; i <= 0x24; i++) - dev_priv->regfile.saveCR[i] = + dev_priv->saveCR[i] = i915_read_indexed(dev, cr_index, cr_data, i); /* Make sure we don't turn off CR group 0 writes */ - dev_priv->regfile.saveCR[0x11] &= ~0x80; + dev_priv->saveCR[0x11] &= ~0x80; /* Attribute controller registers */ I915_READ8(st01); - dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX); + dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX); for (i = 0; i <= 0x14; i++) - dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0); + dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0); I915_READ8(st01); - I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX); + I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX); I915_READ8(st01); /* Graphics controller registers */ for (i = 0; i < 9; i++) - dev_priv->regfile.saveGR[i] = + dev_priv->saveGR[i] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); - dev_priv->regfile.saveGR[0x10] = + dev_priv->saveGR[0x10] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); - dev_priv->regfile.saveGR[0x11] = + dev_priv->saveGR[0x11] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); - dev_priv->regfile.saveGR[0x18] = + dev_priv->saveGR[0x18] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); /* Sequencer registers */ for (i = 0; i < 8; i++) - dev_priv->regfile.saveSR[i] = + dev_priv->saveSR[i] = i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); } @@ -189,8 +189,8 @@ static void i915_restore_vga(struct drm_device *dev) u16 cr_index, cr_data, st01; /* MSR bits */ - I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR); - if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { + I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR); + if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { cr_index = VGA_CR_INDEX_CGA; cr_data = VGA_CR_DATA_CGA; st01 = VGA_ST01_CGA; @@ -203,36 +203,36 @@ static void i915_restore_vga(struct drm_device *dev) /* Sequencer registers, don't write SR07 */ for (i = 0; i < 7; i++) i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, - dev_priv->regfile.saveSR[i]); + dev_priv->saveSR[i]); /* CRT controller regs */ /* Enable CR group 0 writes */ - i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]); + i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); for (i = 0; i <= 0x24; i++) - i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]); + i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]); /* Graphics controller regs */ for (i = 0; i < 9; i++) i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, - dev_priv->regfile.saveGR[i]); + dev_priv->saveGR[i]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, - dev_priv->regfile.saveGR[0x10]); + dev_priv->saveGR[0x10]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, - dev_priv->regfile.saveGR[0x11]); + dev_priv->saveGR[0x11]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, - dev_priv->regfile.saveGR[0x18]); + dev_priv->saveGR[0x18]); /* Attribute controller registers */ I915_READ8(st01); /* switch back to index mode */ for (i = 0; i <= 0x14; i++) - i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0); + i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0); I915_READ8(st01); /* switch back to index mode */ - I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20); + I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20); I915_READ8(st01); /* VGA color palette registers */ - I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK); + I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); } static void i915_save_modeset_reg(struct drm_device *dev) @@ -244,162 +244,156 @@ static void i915_save_modeset_reg(struct drm_device *dev) return; /* Cursor state */ - dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR); - dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS); - dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE); - dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR); - dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS); - dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE); + dev_priv->saveCURACNTR = I915_READ(_CURACNTR); + dev_priv->saveCURAPOS = I915_READ(_CURAPOS); + dev_priv->saveCURABASE = I915_READ(_CURABASE); + dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR); + dev_priv->saveCURBPOS = I915_READ(_CURBPOS); + dev_priv->saveCURBBASE = I915_READ(_CURBBASE); if (IS_GEN2(dev)) - dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE); + dev_priv->saveCURSIZE = I915_READ(CURSIZE); if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); - dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); + dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); + dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); } /* Pipe & plane A info */ - dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF); - dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC); + dev_priv->savePIPEACONF = I915_READ(_PIPEACONF); + dev_priv->savePIPEASRC = I915_READ(_PIPEASRC); if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0); - dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1); - dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A); + dev_priv->saveFPA0 = I915_READ(_PCH_FPA0); + dev_priv->saveFPA1 = I915_READ(_PCH_FPA1); + dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A); } else { - dev_priv->regfile.saveFPA0 = I915_READ(_FPA0); - dev_priv->regfile.saveFPA1 = I915_READ(_FPA1); - dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A); + dev_priv->saveFPA0 = I915_READ(_FPA0); + dev_priv->saveFPA1 = I915_READ(_FPA1); + dev_priv->saveDPLL_A = I915_READ(_DPLL_A); } if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD); - dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A); - dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A); - dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A); - dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A); - dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A); - dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A); + dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD); + dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A); + dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A); + dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A); + dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A); + dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A); + dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A); if (!HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A); + dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A); if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); - dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); - dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); - dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); - - dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); - dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); - - dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1); - dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); - dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); - - dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF); - dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); - dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); - dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); - dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); - dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); - dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); - } - - dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR); - dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE); - dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE); - dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS); - dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR); + dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); + dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); + dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); + dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); + + dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); + dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); + + dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1); + dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); + dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); + + dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF); + dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); + dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); + dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); + dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); + dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); + dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); + } + + dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR); + dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE); + dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE); + dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS); + dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR); if (INTEL_INFO(dev)->gen >= 4) { - dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF); - dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF); + dev_priv->saveDSPASURF = I915_READ(_DSPASURF); + dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF); } i915_save_palette(dev, PIPE_A); - dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT); + dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT); /* Pipe & plane B info */ - dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF); - dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC); + dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF); + dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC); if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0); - dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1); - dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B); + dev_priv->saveFPB0 = I915_READ(_PCH_FPB0); + dev_priv->saveFPB1 = I915_READ(_PCH_FPB1); + dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B); } else { - dev_priv->regfile.saveFPB0 = I915_READ(_FPB0); - dev_priv->regfile.saveFPB1 = I915_READ(_FPB1); - dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B); + dev_priv->saveFPB0 = I915_READ(_FPB0); + dev_priv->saveFPB1 = I915_READ(_FPB1); + dev_priv->saveDPLL_B = I915_READ(_DPLL_B); } if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD); - dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B); - dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B); - dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B); - dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B); - dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B); - dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B); + dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD); + dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B); + dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B); + dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B); + dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B); + dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B); + dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B); if (!HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B); + dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B); if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); - dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); - dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); - dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); - - dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); - dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); - - dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1); - dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); - dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); - - dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF); - dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); - dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); - dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); - dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); - dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); - dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); - } - - dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR); - dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); - dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE); - dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS); - dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR); + dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); + dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); + dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); + dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); + + dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); + dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); + + dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1); + dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); + dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); + + dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF); + dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); + dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); + dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); + dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); + dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); + dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); + } + + dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR); + dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); + dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE); + dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS); + dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR); if (INTEL_INFO(dev)->gen >= 4) { - dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF); - dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); + dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF); + dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); } i915_save_palette(dev, PIPE_B); - dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT); + dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT); /* Fences */ switch (INTEL_INFO(dev)->gen) { case 7: case 6: for (i = 0; i < 16; i++) - dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); + dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); break; case 5: case 4: for (i = 0; i < 16; i++) - dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); break; case 3: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) - dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); case 2: for (i = 0; i < 8; i++) - dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); break; } - /* CRT state */ - if (HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA); - else - dev_priv->regfile.saveADPA = I915_READ(ADPA); - return; } @@ -418,20 +412,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev) case 7: case 6: for (i = 0; i < 16; i++) - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); break; case 5: case 4: for (i = 0; i < 16; i++) - I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); + I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); break; case 3: case 2: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]); + I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]); + I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); break; } @@ -453,164 +447,158 @@ static void i915_restore_modeset_reg(struct drm_device *dev) } if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL); - I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL); + I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); + I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); } /* Pipe & plane A info */ /* Prime the clock */ - if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) { - I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A & + if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { + I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A & ~DPLL_VCO_ENABLE); POSTING_READ(dpll_a_reg); udelay(150); } - I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0); - I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1); + I915_WRITE(fpa0_reg, dev_priv->saveFPA0); + I915_WRITE(fpa1_reg, dev_priv->saveFPA1); /* Actually enable it */ - I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A); + I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); POSTING_READ(dpll_a_reg); udelay(150); if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { - I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD); + I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD); POSTING_READ(_DPLL_A_MD); } udelay(150); /* Restore mode */ - I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A); - I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A); - I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A); - I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A); - I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A); - I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A); + I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A); + I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A); + I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A); + I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A); + I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A); + I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A); if (!HAS_PCH_SPLIT(dev)) - I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A); + I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A); if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1); - I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1); - I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1); - I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1); + I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); + I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); + I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); + I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); - I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL); - I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL); + I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); + I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); - I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1); - I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ); - I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS); + I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1); + I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); + I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS); - I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF); - I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A); - I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A); - I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A); - I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A); - I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A); - I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A); + I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF); + I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); + I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); + I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); + I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); + I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); + I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); } /* Restore plane info */ - I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE); - I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS); - I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC); - I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR); - I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE); + I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE); + I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS); + I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC); + I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR); + I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE); if (INTEL_INFO(dev)->gen >= 4) { - I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF); - I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF); + I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF); + I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF); } - I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF); + I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF); i915_restore_palette(dev, PIPE_A); /* Enable the plane */ - I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR); + I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR); I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); /* Pipe & plane B info */ - if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) { - I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B & + if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { + I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B & ~DPLL_VCO_ENABLE); POSTING_READ(dpll_b_reg); udelay(150); } - I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0); - I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1); + I915_WRITE(fpb0_reg, dev_priv->saveFPB0); + I915_WRITE(fpb1_reg, dev_priv->saveFPB1); /* Actually enable it */ - I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B); + I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); POSTING_READ(dpll_b_reg); udelay(150); if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { - I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD); + I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD); POSTING_READ(_DPLL_B_MD); } udelay(150); /* Restore mode */ - I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B); - I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B); - I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B); - I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B); - I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B); - I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B); + I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B); + I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B); + I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B); + I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B); + I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B); + I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B); if (!HAS_PCH_SPLIT(dev)) - I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B); + I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B); if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1); - I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1); - I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1); - I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1); + I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); + I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); + I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); + I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); - I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL); - I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL); + I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); + I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); - I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1); - I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ); - I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS); + I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1); + I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); + I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS); - I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF); - I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B); - I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B); - I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B); - I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B); - I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B); - I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B); + I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF); + I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); + I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); + I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); + I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); + I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); + I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); } /* Restore plane info */ - I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE); - I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS); - I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC); - I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR); - I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE); + I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE); + I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS); + I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC); + I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR); + I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); if (INTEL_INFO(dev)->gen >= 4) { - I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF); - I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF); + I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF); + I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); } - I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF); + I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF); i915_restore_palette(dev, PIPE_B); /* Enable the plane */ - I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR); + I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR); I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); /* Cursor state */ - I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS); - I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR); - I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE); - I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS); - I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR); - I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE); + I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS); + I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR); + I915_WRITE(_CURABASE, dev_priv->saveCURABASE); + I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS); + I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR); + I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE); if (IS_GEN2(dev)) - I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE); - - /* CRT state */ - if (HAS_PCH_SPLIT(dev)) - I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA); - else - I915_WRITE(ADPA, dev_priv->regfile.saveADPA); + I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); return; } @@ -620,84 +608,89 @@ static void i915_save_display(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; /* Display arbitration control */ - dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); + dev_priv->saveDSPARB = I915_READ(DSPARB); /* This is only meaningful in non-KMS mode */ - /* Don't regfile.save them in KMS mode */ + /* Don't save them in KMS mode */ i915_save_modeset_reg(dev); + /* CRT state */ + if (HAS_PCH_SPLIT(dev)) { + dev_priv->saveADPA = I915_READ(PCH_ADPA); + } else { + dev_priv->saveADPA = I915_READ(ADPA); + } + /* LVDS state */ if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); - dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); - dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); - dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); - dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); - dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); + dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); + dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); + dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); + dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); + dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); + dev_priv->saveLVDS = I915_READ(PCH_LVDS); } else { - dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); - dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); - dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); - dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); + dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); + dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); + dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); + dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); if (INTEL_INFO(dev)->gen >= 4) - dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); + dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); if (IS_MOBILE(dev) && !IS_I830(dev)) - dev_priv->regfile.saveLVDS = I915_READ(LVDS); + dev_priv->saveLVDS = I915_READ(LVDS); } if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) - dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL); + dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); - dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); - dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); + dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); + dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); + dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); } else { - dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); - dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); - dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); - } - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Display Port state */ - if (SUPPORTS_INTEGRATED_DP(dev)) { - dev_priv->regfile.saveDP_B = I915_READ(DP_B); - dev_priv->regfile.saveDP_C = I915_READ(DP_C); - dev_priv->regfile.saveDP_D = I915_READ(DP_D); - dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); - dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); - dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); - dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); - dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); - dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); - dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); - dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); - } - /* FIXME: regfile.save TV & SDVO state */ - } - - /* Only regfile.save FBC state on the platform that supports FBC */ + dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); + dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); + dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); + } + + /* Display Port state */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + dev_priv->saveDP_B = I915_READ(DP_B); + dev_priv->saveDP_C = I915_READ(DP_C); + dev_priv->saveDP_D = I915_READ(DP_D); + dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); + dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); + dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); + dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); + dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); + dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); + dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); + dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); + } + /* FIXME: save TV & SDVO state */ + + /* Only save FBC state on the platform that supports FBC */ if (I915_HAS_FBC(dev)) { if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); + dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); } else if (IS_GM45(dev)) { - dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); + dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); } else { - dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); - dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); - dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); - dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); + dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); + dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); + dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); + dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); } } /* VGA state */ - dev_priv->regfile.saveVGA0 = I915_READ(VGA0); - dev_priv->regfile.saveVGA1 = I915_READ(VGA1); - dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD); + dev_priv->saveVGA0 = I915_READ(VGA0); + dev_priv->saveVGA1 = I915_READ(VGA1); + dev_priv->saveVGA_PD = I915_READ(VGA_PD); if (HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL); + dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); else - dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL); + dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); i915_save_vga(dev); } @@ -707,95 +700,97 @@ static void i915_restore_display(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; /* Display arbitration */ - I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Display port ratios (must be done before clock is set) */ - if (SUPPORTS_INTEGRATED_DP(dev)) { - I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M); - I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M); - I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N); - I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N); - I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M); - I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M); - I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N); - I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N); - } + I915_WRITE(DSPARB, dev_priv->saveDSPARB); + + /* Display port ratios (must be done before clock is set) */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); + I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); + I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); + I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); + I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); + I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); + I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); + I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); } /* This is only meaningful in non-KMS mode */ /* Don't restore them in KMS mode */ i915_restore_modeset_reg(dev); + /* CRT state */ + if (HAS_PCH_SPLIT(dev)) + I915_WRITE(PCH_ADPA, dev_priv->saveADPA); + else + I915_WRITE(ADPA, dev_priv->saveADPA); + /* LVDS state */ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) - I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); + I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS); + I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); } else if (IS_MOBILE(dev) && !IS_I830(dev)) - I915_WRITE(LVDS, dev_priv->regfile.saveLVDS); + I915_WRITE(LVDS, dev_priv->saveLVDS); if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) - I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL); + I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL); - I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); + I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); + I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2; * otherwise we get blank eDP screen after S3 on some machines */ - I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2); - I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL); - I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); - I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); - I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); - I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); + I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2); + I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); + I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); + I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); + I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); + I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); I915_WRITE(RSTDBYCTL, - dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); + dev_priv->saveMCHBAR_RENDER_STANDBY); } else { - I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS); - I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL); - I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL); - I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); - I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); - I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); - I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); - } - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Display Port state */ - if (SUPPORTS_INTEGRATED_DP(dev)) { - I915_WRITE(DP_B, dev_priv->regfile.saveDP_B); - I915_WRITE(DP_C, dev_priv->regfile.saveDP_C); - I915_WRITE(DP_D, dev_priv->regfile.saveDP_D); - } - /* FIXME: restore TV & SDVO state */ + I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); + I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); + I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL); + I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); + I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); + I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); + I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); } + /* Display Port state */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + I915_WRITE(DP_B, dev_priv->saveDP_B); + I915_WRITE(DP_C, dev_priv->saveDP_C); + I915_WRITE(DP_D, dev_priv->saveDP_D); + } + /* FIXME: restore TV & SDVO state */ + /* only restore FBC info on the platform that supports FBC*/ intel_disable_fbc(dev); if (I915_HAS_FBC(dev)) { if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); + I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); } else if (IS_GM45(dev)) { - I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); + I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); } else { - I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE); - I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE); - I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2); - I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); + I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); + I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); + I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); + I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); } } /* VGA state */ if (HAS_PCH_SPLIT(dev)) - I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL); + I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); else - I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL); + I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); - I915_WRITE(VGA0, dev_priv->regfile.saveVGA0); - I915_WRITE(VGA1, dev_priv->regfile.saveVGA1); - I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD); + I915_WRITE(VGA0, dev_priv->saveVGA0); + I915_WRITE(VGA1, dev_priv->saveVGA1); + I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); POSTING_READ(VGA_PD); udelay(150); @@ -807,45 +802,46 @@ int i915_save_state(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB); + pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); mutex_lock(&dev->struct_mutex); + /* Hardware status page */ + dev_priv->saveHWS = I915_READ(HWS_PGA); + i915_save_display(dev); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Interrupt state */ - if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.saveDEIER = I915_READ(DEIER); - dev_priv->regfile.saveDEIMR = I915_READ(DEIMR); - dev_priv->regfile.saveGTIER = I915_READ(GTIER); - dev_priv->regfile.saveGTIMR = I915_READ(GTIMR); - dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); - dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); - dev_priv->regfile.saveMCHBAR_RENDER_STANDBY = - I915_READ(RSTDBYCTL); - dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); - } else { - dev_priv->regfile.saveIER = I915_READ(IER); - dev_priv->regfile.saveIMR = I915_READ(IMR); - } + /* Interrupt state */ + if (HAS_PCH_SPLIT(dev)) { + dev_priv->saveDEIER = I915_READ(DEIER); + dev_priv->saveDEIMR = I915_READ(DEIMR); + dev_priv->saveGTIER = I915_READ(GTIER); + dev_priv->saveGTIMR = I915_READ(GTIMR); + dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); + dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); + dev_priv->saveMCHBAR_RENDER_STANDBY = + I915_READ(RSTDBYCTL); + dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); + } else { + dev_priv->saveIER = I915_READ(IER); + dev_priv->saveIMR = I915_READ(IMR); } intel_disable_gt_powersave(dev); /* Cache mode state */ - dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); + dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); /* Memory Arbitration state */ - dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); + dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); /* Scratch space */ for (i = 0; i < 16; i++) { - dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2)); - dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2)); + dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); + dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); } for (i = 0; i < 3; i++) - dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2)); + dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); mutex_unlock(&dev->struct_mutex); @@ -857,40 +853,41 @@ int i915_restore_state(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB); + pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); mutex_lock(&dev->struct_mutex); + /* Hardware status page */ + I915_WRITE(HWS_PGA, dev_priv->saveHWS); + i915_restore_display(dev); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Interrupt state */ - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(DEIER, dev_priv->regfile.saveDEIER); - I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR); - I915_WRITE(GTIER, dev_priv->regfile.saveGTIER); - I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR); - I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR); - I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR); - I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG); - } else { - I915_WRITE(IER, dev_priv->regfile.saveIER); - I915_WRITE(IMR, dev_priv->regfile.saveIMR); - } + /* Interrupt state */ + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(DEIER, dev_priv->saveDEIER); + I915_WRITE(DEIMR, dev_priv->saveDEIMR); + I915_WRITE(GTIER, dev_priv->saveGTIER); + I915_WRITE(GTIMR, dev_priv->saveGTIMR); + I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); + I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); + I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG); + } else { + I915_WRITE(IER, dev_priv->saveIER); + I915_WRITE(IMR, dev_priv->saveIMR); } /* Cache mode state */ - I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000); + I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); /* Memory arbitration state */ - I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); + I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); for (i = 0; i < 16; i++) { - I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]); - I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]); + I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); + I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]); } for (i = 0; i < 3; i++) - I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]); + I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); mutex_unlock(&dev->struct_mutex); diff --git a/trunk/drivers/gpu/drm/i915/i915_sysfs.c b/trunk/drivers/gpu/drm/i915/i915_sysfs.c index 3bf51d58319d..903eebd2117a 100644 --- a/trunk/drivers/gpu/drm/i915/i915_sysfs.c +++ b/trunk/drivers/gpu/drm/i915/i915_sysfs.c @@ -162,7 +162,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj, if (ret) return ret; - if (!dev_priv->l3_parity.remap_info) { + if (!dev_priv->mm.l3_remap_info) { temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); if (!temp) { mutex_unlock(&drm_dev->struct_mutex); @@ -182,9 +182,9 @@ i915_l3_write(struct file *filp, struct kobject *kobj, * at this point it is left as a TODO. */ if (temp) - dev_priv->l3_parity.remap_info = temp; + dev_priv->mm.l3_remap_info = temp; - memcpy(dev_priv->l3_parity.remap_info + (offset/4), + memcpy(dev_priv->mm.l3_remap_info + (offset/4), buf + (offset/4), count); @@ -211,9 +211,12 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, struct drm_i915_private *dev_priv = dev->dev_private; int ret; - mutex_lock(&dev_priv->rps.hw_lock); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return snprintf(buf, PAGE_SIZE, "%d", ret); } @@ -225,9 +228,12 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute struct drm_i915_private *dev_priv = dev->dev_private; int ret; - mutex_lock(&dev_priv->rps.hw_lock); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return snprintf(buf, PAGE_SIZE, "%d", ret); } @@ -248,14 +254,16 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, val /= GT_FREQUENCY_MULTIPLIER; - mutex_lock(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); hw_max = (rp_state_cap & 0xff); hw_min = ((rp_state_cap & 0xff0000) >> 16); if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return -EINVAL; } @@ -264,7 +272,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, dev_priv->rps.max_delay = val; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return count; } @@ -276,9 +284,12 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute struct drm_i915_private *dev_priv = dev->dev_private; int ret; - mutex_lock(&dev_priv->rps.hw_lock); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return snprintf(buf, PAGE_SIZE, "%d", ret); } @@ -299,14 +310,16 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, val /= GT_FREQUENCY_MULTIPLIER; - mutex_lock(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); hw_max = (rp_state_cap & 0xff); hw_min = ((rp_state_cap & 0xff0000) >> 16); if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return -EINVAL; } @@ -315,7 +328,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, dev_priv->rps.min_delay = val; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return count; diff --git a/trunk/drivers/gpu/drm/i915/i915_trace.h b/trunk/drivers/gpu/drm/i915/i915_trace.h index 3db4a6817713..8134421b89a6 100644 --- a/trunk/drivers/gpu/drm/i915/i915_trace.h +++ b/trunk/drivers/gpu/drm/i915/i915_trace.h @@ -229,26 +229,24 @@ TRACE_EVENT(i915_gem_evict_everything, ); TRACE_EVENT(i915_gem_ring_dispatch, - TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags), - TP_ARGS(ring, seqno, flags), + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno), TP_STRUCT__entry( __field(u32, dev) __field(u32, ring) __field(u32, seqno) - __field(u32, flags) ), TP_fast_assign( __entry->dev = ring->dev->primary->index; __entry->ring = ring->id; __entry->seqno = seqno; - __entry->flags = flags; i915_trace_irq_get(ring, seqno); ), - TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", - __entry->dev, __entry->ring, __entry->seqno, __entry->flags) + TP_printk("dev=%u, ring=%u, seqno=%u", + __entry->dev, __entry->ring, __entry->seqno) ); TRACE_EVENT(i915_gem_ring_flush, diff --git a/trunk/drivers/gpu/drm/i915/intel_bios.c b/trunk/drivers/gpu/drm/i915/intel_bios.c index 87e9b92039df..56846ed5ee55 100644 --- a/trunk/drivers/gpu/drm/i915/intel_bios.c +++ b/trunk/drivers/gpu/drm/i915/intel_bios.c @@ -499,12 +499,8 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) edp = find_section(bdb, BDB_EDP); if (!edp) { - if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) { - DRM_DEBUG_KMS("No eDP BDB found but eDP panel " - "supported, assume %dbpp panel color " - "depth.\n", - dev_priv->edp.bpp); - } + if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) + DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n"); return; } @@ -657,9 +653,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) dev_priv->lvds_use_ssc = 1; dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); - - /* eDP data */ - dev_priv->edp.bpp = 18; } static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) @@ -762,8 +755,7 @@ void intel_setup_bios(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; /* Set the Panel Power On/Off timings if uninitialized. */ - if (!HAS_PCH_SPLIT(dev) && - I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) { + if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { /* Set T2 to 40ms and T5 to 200ms */ I915_WRITE(PP_ON_DELAYS, 0x019007d0); diff --git a/trunk/drivers/gpu/drm/i915/intel_crt.c b/trunk/drivers/gpu/drm/i915/intel_crt.c index 5c7774396e10..6345878ae1e7 100644 --- a/trunk/drivers/gpu/drm/i915/intel_crt.c +++ b/trunk/drivers/gpu/drm/i915/intel_crt.c @@ -143,7 +143,7 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode) int old_dpms; /* PCH platforms and VLV only support on/off. */ - if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON) + if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON) mode = DRM_MODE_DPMS_OFF; if (mode == connector->dpms) @@ -221,20 +221,14 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; u32 adpa; - if (HAS_PCH_SPLIT(dev)) - adpa = ADPA_HOTPLUG_BITS; - else - adpa = 0; - + adpa = ADPA_HOTPLUG_BITS; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) adpa |= ADPA_HSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) adpa |= ADPA_VSYNC_ACTIVE_HIGH; /* For CPT allow 3 pipe config, for others just use A or B */ - if (HAS_PCH_LPT(dev)) - ; /* Those bits don't exist here */ - else if (HAS_PCH_CPT(dev)) + if (HAS_PCH_CPT(dev)) adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); else if (intel_crtc->pipe == 0) adpa |= ADPA_PIPE_A_SELECT; @@ -407,16 +401,12 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector, struct i2c_adapter *adapter) { struct edid *edid; - int ret; edid = intel_crt_get_edid(connector, adapter); if (!edid) return 0; - ret = intel_connector_update_modes(connector, edid); - kfree(edid); - - return ret; + return intel_connector_update_modes(connector, edid); } static bool intel_crt_detect_ddc(struct drm_connector *connector) @@ -654,22 +644,10 @@ static int intel_crt_set_property(struct drm_connector *connector, static void intel_crt_reset(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crt *crt = intel_attached_crt(connector); - if (HAS_PCH_SPLIT(dev)) { - u32 adpa; - - adpa = I915_READ(PCH_ADPA); - adpa &= ~ADPA_CRT_HOTPLUG_MASK; - adpa |= ADPA_HOTPLUG_BITS; - I915_WRITE(PCH_ADPA, adpa); - POSTING_READ(PCH_ADPA); - - DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); + if (HAS_PCH_SPLIT(dev)) crt->force_hotplug_required = 1; - } - } /* @@ -751,7 +729,7 @@ void intel_crt_init(struct drm_device *dev) crt->base.type = INTEL_OUTPUT_ANALOG; crt->base.cloneable = true; - if (IS_I830(dev)) + if (IS_HASWELL(dev) || IS_I830(dev)) crt->base.crtc_mask = (1 << 0); else crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); @@ -771,10 +749,7 @@ void intel_crt_init(struct drm_device *dev) crt->base.disable = intel_disable_crt; crt->base.enable = intel_enable_crt; - if (IS_HASWELL(dev)) - crt->base.get_hw_state = intel_ddi_get_hw_state; - else - crt->base.get_hw_state = intel_crt_get_hw_state; + crt->base.get_hw_state = intel_crt_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state; drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs); @@ -791,6 +766,18 @@ void intel_crt_init(struct drm_device *dev) * Configure the automatic hotplug detection stuff */ crt->force_hotplug_required = 0; + if (HAS_PCH_SPLIT(dev)) { + u32 adpa; + + adpa = I915_READ(PCH_ADPA); + adpa &= ~ADPA_CRT_HOTPLUG_MASK; + adpa |= ADPA_HOTPLUG_BITS; + I915_WRITE(PCH_ADPA, adpa); + POSTING_READ(PCH_ADPA); + + DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); + crt->force_hotplug_required = 1; + } dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; } diff --git a/trunk/drivers/gpu/drm/i915/intel_ddi.c b/trunk/drivers/gpu/drm/i915/intel_ddi.c index 852012b6fc5b..bfe375466a0e 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ddi.c +++ b/trunk/drivers/gpu/drm/i915/intel_ddi.c @@ -58,26 +58,6 @@ static const u32 hsw_ddi_translations_fdi[] = { 0x00FFFFFF, 0x00040006 /* HDMI parameters */ }; -static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) -{ - struct drm_encoder *encoder = &intel_encoder->base; - int type = intel_encoder->type; - - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || - type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) { - struct intel_digital_port *intel_dig_port = - enc_to_dig_port(encoder); - return intel_dig_port->port; - - } else if (type == INTEL_OUTPUT_ANALOG) { - return PORT_E; - - } else { - DRM_ERROR("Invalid DDI encoder type %d\n", type); - BUG(); - } -} - /* On Haswell, DDI port buffers must be programmed with correct values * in advance. The buffer values are different for FDI and DP modes, * but the HDMI/DVI fields are shared among those. So we program the DDI @@ -153,34 +133,25 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - u32 temp, i, rx_ctl_val; + int pipe = intel_crtc->pipe; + u32 reg, temp, i; - /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the - * mode set "sequence for CRT port" document: - * - TP1 to TP2 time with the default value - * - FDI delay to 90h - */ - I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) | - FDI_RX_PWRDN_LANE0_VAL(2) | - FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); - - /* Enable the PCH Receiver FDI PLL */ - rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE | - ((intel_crtc->fdi_lanes - 1) << 19); - I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); - POSTING_READ(_FDI_RXA_CTL); - udelay(220); - - /* Switch from Rawclk to PCDclk */ - rx_ctl_val |= FDI_PCDCLK; - I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); - - /* Configure Port Clock Select */ - I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel); - - /* Start the training iterating through available voltages and emphasis, - * testing each value twice. */ - for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) { + /* Configure CPU PLL, wait for warmup */ + I915_WRITE(SPLL_CTL, + SPLL_PLL_ENABLE | + SPLL_PLL_FREQ_1350MHz | + SPLL_PLL_SCC); + + /* Use SPLL to drive the output when in FDI mode */ + I915_WRITE(PORT_CLK_SEL(PORT_E), + PORT_CLK_SEL_SPLL); + I915_WRITE(PIPE_CLK_SEL(pipe), + PIPE_CLK_SEL_PORT(PORT_E)); + + udelay(20); + + /* Start the training iterating through available voltages and emphasis */ + for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) { /* Configure DP_TP_CTL with auto-training */ I915_WRITE(DP_TP_CTL(PORT_E), DP_TP_CTL_FDI_AUTOTRAIN | @@ -189,63 +160,103 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) DP_TP_CTL_ENABLE); /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */ + temp = I915_READ(DDI_BUF_CTL(PORT_E)); + temp = (temp & ~DDI_BUF_EMP_MASK); I915_WRITE(DDI_BUF_CTL(PORT_E), - DDI_BUF_CTL_ENABLE | - ((intel_crtc->fdi_lanes - 1) << 1) | - hsw_ddi_buf_ctl_values[i / 2]); - POSTING_READ(DDI_BUF_CTL(PORT_E)); + temp | + DDI_BUF_CTL_ENABLE | + DDI_PORT_WIDTH_X2 | + hsw_ddi_buf_ctl_values[i]); udelay(600); - /* Program PCH FDI Receiver TU */ - I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64)); - - /* Enable PCH FDI Receiver with auto-training */ - rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; - I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); - POSTING_READ(_FDI_RXA_CTL); - - /* Wait for FDI receiver lane calibration */ - udelay(30); - - /* Unset FDI_RX_MISC pwrdn lanes */ - temp = I915_READ(_FDI_RXA_MISC); - temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - I915_WRITE(_FDI_RXA_MISC, temp); - POSTING_READ(_FDI_RXA_MISC); - - /* Wait for FDI auto training time */ - udelay(5); + /* We need to program FDI_RX_MISC with the default TP1 to TP2 + * values before enabling the receiver, and configure the delay + * for the FDI timing generator to 90h. Luckily, all the other + * bits are supposed to be zeroed, so we can write those values + * directly. + */ + I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 | + FDI_RX_FDI_DELAY_90); + + /* Enable CPU FDI Receiver with auto-training */ + reg = FDI_RX_CTL(pipe); + I915_WRITE(reg, + I915_READ(reg) | + FDI_LINK_TRAIN_AUTO | + FDI_RX_ENABLE | + FDI_LINK_TRAIN_PATTERN_1_CPT | + FDI_RX_ENHANCE_FRAME_ENABLE | + FDI_PORT_WIDTH_2X_LPT | + FDI_RX_PLL_ENABLE); + POSTING_READ(reg); + udelay(100); temp = I915_READ(DP_TP_STATUS(PORT_E)); if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { - DRM_DEBUG_KMS("FDI link training done on step %d\n", i); + DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i); /* Enable normal pixel sending for FDI */ I915_WRITE(DP_TP_CTL(PORT_E), - DP_TP_CTL_FDI_AUTOTRAIN | - DP_TP_CTL_LINK_TRAIN_NORMAL | - DP_TP_CTL_ENHANCED_FRAME_ENABLE | - DP_TP_CTL_ENABLE); + DP_TP_CTL_FDI_AUTOTRAIN | + DP_TP_CTL_LINK_TRAIN_NORMAL | + DP_TP_CTL_ENHANCED_FRAME_ENABLE | + DP_TP_CTL_ENABLE); + + /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */ + temp = I915_READ(DDI_FUNC_CTL(pipe)); + temp &= ~PIPE_DDI_PORT_MASK; + temp |= PIPE_DDI_SELECT_PORT(PORT_E) | + PIPE_DDI_MODE_SELECT_FDI | + PIPE_DDI_FUNC_ENABLE | + PIPE_DDI_PORT_WIDTH_X2; + I915_WRITE(DDI_FUNC_CTL(pipe), + temp); + break; + } else { + DRM_ERROR("Error training BUF_CTL %d\n", i); - return; + /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */ + I915_WRITE(DP_TP_CTL(PORT_E), + I915_READ(DP_TP_CTL(PORT_E)) & + ~DP_TP_CTL_ENABLE); + I915_WRITE(FDI_RX_CTL(pipe), + I915_READ(FDI_RX_CTL(pipe)) & + ~FDI_RX_PLL_ENABLE); + continue; } + } - /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ - I915_WRITE(DP_TP_CTL(PORT_E), - I915_READ(DP_TP_CTL(PORT_E)) & ~DP_TP_CTL_ENABLE); + DRM_DEBUG_KMS("FDI train done.\n"); +} - rx_ctl_val &= ~FDI_RX_ENABLE; - I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); +/* For DDI connections, it is possible to support different outputs over the + * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by + * the time the output is detected what exactly is on the other end of it. This + * function aims at providing support for this detection and proper output + * configuration. + */ +void intel_ddi_init(struct drm_device *dev, enum port port) +{ + /* For now, we don't do any proper output detection and assume that we + * handle HDMI only */ - /* Reset FDI_RX_MISC pwrdn lanes */ - temp = I915_READ(_FDI_RXA_MISC); - temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); - I915_WRITE(_FDI_RXA_MISC, temp); + switch(port){ + case PORT_A: + /* We don't handle eDP and DP yet */ + DRM_DEBUG_DRIVER("Found digital output on DDI port A\n"); + break; + /* Assume that the ports B, C and D are working in HDMI mode for now */ + case PORT_B: + case PORT_C: + case PORT_D: + intel_hdmi_init(dev, DDI_BUF_CTL(port), port); + break; + default: + DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n", + port); + break; } - - DRM_ERROR("FDI link training failed!\n"); } /* WRPLL clock dividers */ @@ -634,435 +645,116 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { {298000, 2, 21, 19}, }; -static void intel_ddi_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +void intel_ddi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - int port = intel_ddi_get_encoder_port(intel_encoder); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + int port = intel_hdmi->ddi_port; int pipe = intel_crtc->pipe; - int type = intel_encoder->type; - - DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n", - port_name(port), pipe_name(pipe)); - - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; - switch (intel_dp->lane_count) { - case 1: - intel_dp->DP |= DDI_PORT_WIDTH_X1; - break; - case 2: - intel_dp->DP |= DDI_PORT_WIDTH_X2; - break; - case 4: - intel_dp->DP |= DDI_PORT_WIDTH_X4; - break; - default: - intel_dp->DP |= DDI_PORT_WIDTH_X4; - WARN(1, "Unexpected DP lane count %d\n", - intel_dp->lane_count); - break; - } - - if (intel_dp->has_audio) { - DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n", - pipe_name(intel_crtc->pipe)); - - /* write eld */ - DRM_DEBUG_DRIVER("DP audio: write eld information\n"); - intel_write_eld(encoder, adjusted_mode); - } - - intel_dp_init_link_config(intel_dp); - - } else if (type == INTEL_OUTPUT_HDMI) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - - if (intel_hdmi->has_audio) { - /* Proper support for digital audio needs a new logic - * and a new set of registers, so we leave it for future - * patch bombing. - */ - DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", - pipe_name(intel_crtc->pipe)); - - /* write eld */ - DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); - intel_write_eld(encoder, adjusted_mode); - } - - intel_hdmi->set_infoframes(encoder, adjusted_mode); - } -} - -static struct intel_encoder * -intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder, *ret = NULL; - int num_encoders = 0; - - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { - ret = intel_encoder; - num_encoders++; - } - - if (num_encoders != 1) - WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders, - intel_crtc->pipe); - - BUG_ON(ret == NULL); - return ret; -} - -void intel_ddi_put_crtc_pll(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_ddi_plls *plls = &dev_priv->ddi_plls; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - uint32_t val; - - switch (intel_crtc->ddi_pll_sel) { - case PORT_CLK_SEL_SPLL: - plls->spll_refcount--; - if (plls->spll_refcount == 0) { - DRM_DEBUG_KMS("Disabling SPLL\n"); - val = I915_READ(SPLL_CTL); - WARN_ON(!(val & SPLL_PLL_ENABLE)); - I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); - POSTING_READ(SPLL_CTL); - } - break; - case PORT_CLK_SEL_WRPLL1: - plls->wrpll1_refcount--; - if (plls->wrpll1_refcount == 0) { - DRM_DEBUG_KMS("Disabling WRPLL 1\n"); - val = I915_READ(WRPLL_CTL1); - WARN_ON(!(val & WRPLL_PLL_ENABLE)); - I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE); - POSTING_READ(WRPLL_CTL1); - } - break; - case PORT_CLK_SEL_WRPLL2: - plls->wrpll2_refcount--; - if (plls->wrpll2_refcount == 0) { - DRM_DEBUG_KMS("Disabling WRPLL 2\n"); - val = I915_READ(WRPLL_CTL2); - WARN_ON(!(val & WRPLL_PLL_ENABLE)); - I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE); - POSTING_READ(WRPLL_CTL2); - } - break; - } - - WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n"); - WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n"); - WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n"); - - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE; -} + int p, n2, r2; + u32 temp, i; -static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2) -{ - u32 i; + /* On Haswell, we need to enable the clocks and prepare DDI function to + * work in HDMI mode for this pipe. + */ + DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) - if (clock <= wrpll_tmds_clock_table[i].clock) + if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock) break; if (i == ARRAY_SIZE(wrpll_tmds_clock_table)) i--; - *p = wrpll_tmds_clock_table[i].p; - *n2 = wrpll_tmds_clock_table[i].n2; - *r2 = wrpll_tmds_clock_table[i].r2; - - if (wrpll_tmds_clock_table[i].clock != clock) - DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n", - wrpll_tmds_clock_table[i].clock, clock); - - DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", - clock, *p, *n2, *r2); -} - -bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_ddi_plls *plls = &dev_priv->ddi_plls; - int type = intel_encoder->type; - enum pipe pipe = intel_crtc->pipe; - uint32_t reg, val; - - /* TODO: reuse PLLs when possible (compare values) */ - - intel_ddi_put_crtc_pll(crtc); - - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - switch (intel_dp->link_bw) { - case DP_LINK_BW_1_62: - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; - break; - case DP_LINK_BW_2_7: - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350; - break; - case DP_LINK_BW_5_4: - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700; - break; - default: - DRM_ERROR("Link bandwidth %d unsupported\n", - intel_dp->link_bw); - return false; - } - - /* We don't need to turn any PLL on because we'll use LCPLL. */ - return true; - - } else if (type == INTEL_OUTPUT_HDMI) { - int p, n2, r2; - - if (plls->wrpll1_refcount == 0) { - DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n", - pipe_name(pipe)); - plls->wrpll1_refcount++; - reg = WRPLL_CTL1; - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1; - } else if (plls->wrpll2_refcount == 0) { - DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n", - pipe_name(pipe)); - plls->wrpll2_refcount++; - reg = WRPLL_CTL2; - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2; - } else { - DRM_ERROR("No WRPLLs available!\n"); - return false; - } - - WARN(I915_READ(reg) & WRPLL_PLL_ENABLE, - "WRPLL already enabled\n"); - - intel_ddi_calculate_wrpll(clock, &p, &n2, &r2); - - val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | - WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | - WRPLL_DIVIDER_POST(p); + p = wrpll_tmds_clock_table[i].p; + n2 = wrpll_tmds_clock_table[i].n2; + r2 = wrpll_tmds_clock_table[i].r2; - } else if (type == INTEL_OUTPUT_ANALOG) { - if (plls->spll_refcount == 0) { - DRM_DEBUG_KMS("Using SPLL on pipe %c\n", - pipe_name(pipe)); - plls->spll_refcount++; - reg = SPLL_CTL; - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL; - } + if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock) + DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n", + wrpll_tmds_clock_table[i].clock, crtc->mode.clock); - WARN(I915_READ(reg) & SPLL_PLL_ENABLE, - "SPLL already enabled\n"); + DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", + crtc->mode.clock, p, n2, r2); - val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; + /* Enable LCPLL if disabled */ + temp = I915_READ(LCPLL_CTL); + if (temp & LCPLL_PLL_DISABLE) + I915_WRITE(LCPLL_CTL, + temp & ~LCPLL_PLL_DISABLE); - } else { - WARN(1, "Invalid DDI encoder type %d\n", type); - return false; - } + /* Configure WR PLL 1, program the correct divider values for + * the desired frequency and wait for warmup */ + I915_WRITE(WRPLL_CTL1, + WRPLL_PLL_ENABLE | + WRPLL_PLL_SELECT_LCPLL_2700 | + WRPLL_DIVIDER_REFERENCE(r2) | + WRPLL_DIVIDER_FEEDBACK(n2) | + WRPLL_DIVIDER_POST(p)); - I915_WRITE(reg, val); udelay(20); - return true; -} + /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use + * this port for connection. + */ + I915_WRITE(PORT_CLK_SEL(port), + PORT_CLK_SEL_WRPLL1); + I915_WRITE(PIPE_CLK_SEL(pipe), + PIPE_CLK_SEL_PORT(port)); -void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - int type = intel_encoder->type; - uint32_t temp; + udelay(20); - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { + if (intel_hdmi->has_audio) { + /* Proper support for digital audio needs a new logic and a new set + * of registers, so we leave it for future patch bombing. + */ + DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", + pipe_name(intel_crtc->pipe)); - temp = TRANS_MSA_SYNC_CLK; - switch (intel_crtc->bpp) { - case 18: - temp |= TRANS_MSA_6_BPC; - break; - case 24: - temp |= TRANS_MSA_8_BPC; - break; - case 30: - temp |= TRANS_MSA_10_BPC; - break; - case 36: - temp |= TRANS_MSA_12_BPC; - break; - default: - temp |= TRANS_MSA_8_BPC; - WARN(1, "%d bpp unsupported by DDI function\n", - intel_crtc->bpp); - } - I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); + /* write eld */ + DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); + intel_write_eld(encoder, adjusted_mode); } -} -void intel_ddi_enable_pipe_func(struct drm_crtc *crtc) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - enum pipe pipe = intel_crtc->pipe; - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - enum port port = intel_ddi_get_encoder_port(intel_encoder); - int type = intel_encoder->type; - uint32_t temp; - - /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */ - temp = TRANS_DDI_FUNC_ENABLE; - temp |= TRANS_DDI_SELECT_PORT(port); + /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ + temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port); switch (intel_crtc->bpp) { case 18: - temp |= TRANS_DDI_BPC_6; + temp |= PIPE_DDI_BPC_6; break; case 24: - temp |= TRANS_DDI_BPC_8; + temp |= PIPE_DDI_BPC_8; break; case 30: - temp |= TRANS_DDI_BPC_10; + temp |= PIPE_DDI_BPC_10; break; case 36: - temp |= TRANS_DDI_BPC_12; + temp |= PIPE_DDI_BPC_12; break; default: - WARN(1, "%d bpp unsupported by transcoder DDI function\n", + WARN(1, "%d bpp unsupported by pipe DDI function\n", intel_crtc->bpp); } - if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) - temp |= TRANS_DDI_PVSYNC; - if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) - temp |= TRANS_DDI_PHSYNC; - - if (cpu_transcoder == TRANSCODER_EDP) { - switch (pipe) { - case PIPE_A: - temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; - break; - case PIPE_B: - temp |= TRANS_DDI_EDP_INPUT_B_ONOFF; - break; - case PIPE_C: - temp |= TRANS_DDI_EDP_INPUT_C_ONOFF; - break; - default: - BUG(); - break; - } - } - - if (type == INTEL_OUTPUT_HDMI) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - - if (intel_hdmi->has_hdmi_sink) - temp |= TRANS_DDI_MODE_SELECT_HDMI; - else - temp |= TRANS_DDI_MODE_SELECT_DVI; - - } else if (type == INTEL_OUTPUT_ANALOG) { - temp |= TRANS_DDI_MODE_SELECT_FDI; - temp |= (intel_crtc->fdi_lanes - 1) << 1; - - } else if (type == INTEL_OUTPUT_DISPLAYPORT || - type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - temp |= TRANS_DDI_MODE_SELECT_DP_SST; - - switch (intel_dp->lane_count) { - case 1: - temp |= TRANS_DDI_PORT_WIDTH_X1; - break; - case 2: - temp |= TRANS_DDI_PORT_WIDTH_X2; - break; - case 4: - temp |= TRANS_DDI_PORT_WIDTH_X4; - break; - default: - temp |= TRANS_DDI_PORT_WIDTH_X4; - WARN(1, "Unsupported lane count %d\n", - intel_dp->lane_count); - } - - } else { - WARN(1, "Invalid encoder type %d for pipe %d\n", - intel_encoder->type, pipe); - } - - I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); -} - -void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder) -{ - uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); - uint32_t val = I915_READ(reg); - - val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK); - val |= TRANS_DDI_PORT_NONE; - I915_WRITE(reg, val); -} - -bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) -{ - struct drm_device *dev = intel_connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *intel_encoder = intel_connector->encoder; - int type = intel_connector->base.connector_type; - enum port port = intel_ddi_get_encoder_port(intel_encoder); - enum pipe pipe = 0; - enum transcoder cpu_transcoder; - uint32_t tmp; - - if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) - return false; - - if (port == PORT_A) - cpu_transcoder = TRANSCODER_EDP; + if (intel_hdmi->has_hdmi_sink) + temp |= PIPE_DDI_MODE_SELECT_HDMI; else - cpu_transcoder = pipe; + temp |= PIPE_DDI_MODE_SELECT_DVI; - tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + temp |= PIPE_DDI_PVSYNC; + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + temp |= PIPE_DDI_PHSYNC; - switch (tmp & TRANS_DDI_MODE_SELECT_MASK) { - case TRANS_DDI_MODE_SELECT_HDMI: - case TRANS_DDI_MODE_SELECT_DVI: - return (type == DRM_MODE_CONNECTOR_HDMIA); + I915_WRITE(DDI_FUNC_CTL(pipe), temp); - case TRANS_DDI_MODE_SELECT_DP_SST: - if (type == DRM_MODE_CONNECTOR_eDP) - return true; - case TRANS_DDI_MODE_SELECT_DP_MST: - return (type == DRM_MODE_CONNECTOR_DisplayPort); - - case TRANS_DDI_MODE_SELECT_FDI: - return (type == DRM_MODE_CONNECTOR_VGA); - - default: - return false; - } + intel_hdmi->set_infoframes(encoder, adjusted_mode); } bool intel_ddi_get_hw_state(struct intel_encoder *encoder, @@ -1070,432 +762,58 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - enum port port = intel_ddi_get_encoder_port(encoder); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); u32 tmp; int i; - tmp = I915_READ(DDI_BUF_CTL(port)); + tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port)); if (!(tmp & DDI_BUF_CTL_ENABLE)) return false; - if (port == PORT_A) { - tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); + for_each_pipe(i) { + tmp = I915_READ(DDI_FUNC_CTL(i)); - switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { - case TRANS_DDI_EDP_INPUT_A_ON: - case TRANS_DDI_EDP_INPUT_A_ONOFF: - *pipe = PIPE_A; - break; - case TRANS_DDI_EDP_INPUT_B_ONOFF: - *pipe = PIPE_B; - break; - case TRANS_DDI_EDP_INPUT_C_ONOFF: - *pipe = PIPE_C; - break; - } - - return true; - } else { - for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) { - tmp = I915_READ(TRANS_DDI_FUNC_CTL(i)); - - if ((tmp & TRANS_DDI_PORT_MASK) - == TRANS_DDI_SELECT_PORT(port)) { - *pipe = i; - return true; - } + if ((tmp & PIPE_DDI_PORT_MASK) + == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) { + *pipe = i; + return true; } } - DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port); + DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port); return true; } -static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - uint32_t temp, ret; - enum port port; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); - int i; - - if (cpu_transcoder == TRANSCODER_EDP) { - port = PORT_A; - } else { - temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); - temp &= TRANS_DDI_PORT_MASK; - - for (i = PORT_B; i <= PORT_E; i++) - if (temp == TRANS_DDI_SELECT_PORT(i)) - port = i; - } - - ret = I915_READ(PORT_CLK_SEL(port)); - - DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n", - pipe_name(pipe), port_name(port), ret); - - return ret; -} - -void intel_ddi_setup_hw_pll_state(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - enum pipe pipe; - struct intel_crtc *intel_crtc; - - for_each_pipe(pipe) { - intel_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - - if (!intel_crtc->active) - continue; - - intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, - pipe); - - switch (intel_crtc->ddi_pll_sel) { - case PORT_CLK_SEL_SPLL: - dev_priv->ddi_plls.spll_refcount++; - break; - case PORT_CLK_SEL_WRPLL1: - dev_priv->ddi_plls.wrpll1_refcount++; - break; - case PORT_CLK_SEL_WRPLL2: - dev_priv->ddi_plls.wrpll2_refcount++; - break; - } - } -} - -void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) -{ - struct drm_crtc *crtc = &intel_crtc->base; - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); - enum port port = intel_ddi_get_encoder_port(intel_encoder); - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - - if (cpu_transcoder != TRANSCODER_EDP) - I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), - TRANS_CLK_SEL_PORT(port)); -} - -void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) -{ - struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - - if (cpu_transcoder != TRANSCODER_EDP) - I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), - TRANS_CLK_SEL_DISABLED); -} - -static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) -{ - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_crtc *crtc = encoder->crtc; - struct drm_i915_private *dev_priv = encoder->dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum port port = intel_ddi_get_encoder_port(intel_encoder); - int type = intel_encoder->type; - - if (type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - ironlake_edp_panel_vdd_on(intel_dp); - ironlake_edp_panel_on(intel_dp); - ironlake_edp_panel_vdd_off(intel_dp, true); - } - - WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); - I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel); - - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); - intel_dp_start_link_train(intel_dp); - intel_dp_complete_link_train(intel_dp); - } -} - -static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, - enum port port) -{ - uint32_t reg = DDI_BUF_CTL(port); - int i; - - for (i = 0; i < 8; i++) { - udelay(1); - if (I915_READ(reg) & DDI_BUF_IS_IDLE) - return; - } - DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port)); -} - -static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) -{ - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_i915_private *dev_priv = encoder->dev->dev_private; - enum port port = intel_ddi_get_encoder_port(intel_encoder); - int type = intel_encoder->type; - uint32_t val; - bool wait = false; - - val = I915_READ(DDI_BUF_CTL(port)); - if (val & DDI_BUF_CTL_ENABLE) { - val &= ~DDI_BUF_CTL_ENABLE; - I915_WRITE(DDI_BUF_CTL(port), val); - wait = true; - } - - val = I915_READ(DP_TP_CTL(port)); - val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); - val |= DP_TP_CTL_LINK_TRAIN_PAT1; - I915_WRITE(DP_TP_CTL(port), val); - - if (wait) - intel_wait_ddi_buf_idle(dev_priv, port); - - if (type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - ironlake_edp_panel_vdd_on(intel_dp); - ironlake_edp_panel_off(intel_dp); - } - - I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); -} - -static void intel_enable_ddi(struct intel_encoder *intel_encoder) +void intel_enable_ddi(struct intel_encoder *encoder) { - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_device *dev = encoder->dev; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - enum port port = intel_ddi_get_encoder_port(intel_encoder); - int type = intel_encoder->type; - - if (type == INTEL_OUTPUT_HDMI) { - /* In HDMI/DVI mode, the port width, and swing/emphasis values - * are ignored so nothing special needs to be done besides - * enabling the port. - */ - I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE); - } else if (type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - ironlake_edp_backlight_on(intel_dp); - } -} - -static void intel_disable_ddi(struct intel_encoder *intel_encoder) -{ - struct drm_encoder *encoder = &intel_encoder->base; - int type = intel_encoder->type; - - if (type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - ironlake_edp_backlight_off(intel_dp); - } -} - -int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) -{ - if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) - return 450; - else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) == - LCPLL_CLK_FREQ_450) - return 450; - else if (IS_ULT(dev_priv->dev)) - return 338; - else - return 540; -} + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + int port = intel_hdmi->ddi_port; + u32 temp; -void intel_ddi_pll_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t val = I915_READ(LCPLL_CTL); + temp = I915_READ(DDI_BUF_CTL(port)); + temp |= DDI_BUF_CTL_ENABLE; - /* The LCPLL register should be turned on by the BIOS. For now let's - * just check its state and print errors in case something is wrong. - * Don't even try to turn it on. + /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width, + * and swing/emphasis values are ignored so nothing special needs + * to be done besides enabling the port. */ - - DRM_DEBUG_KMS("CDCLK running at %dMHz\n", - intel_ddi_get_cdclk_freq(dev_priv)); - - if (val & LCPLL_CD_SOURCE_FCLK) - DRM_ERROR("CDCLK source is not LCPLL\n"); - - if (val & LCPLL_PLL_DISABLE) - DRM_ERROR("LCPLL is disabled\n"); -} - -void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) -{ - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - struct intel_dp *intel_dp = &intel_dig_port->dp; - struct drm_i915_private *dev_priv = encoder->dev->dev_private; - enum port port = intel_dig_port->port; - bool wait; - uint32_t val; - - if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { - val = I915_READ(DDI_BUF_CTL(port)); - if (val & DDI_BUF_CTL_ENABLE) { - val &= ~DDI_BUF_CTL_ENABLE; - I915_WRITE(DDI_BUF_CTL(port), val); - wait = true; - } - - val = I915_READ(DP_TP_CTL(port)); - val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); - val |= DP_TP_CTL_LINK_TRAIN_PAT1; - I915_WRITE(DP_TP_CTL(port), val); - POSTING_READ(DP_TP_CTL(port)); - - if (wait) - intel_wait_ddi_buf_idle(dev_priv, port); - } - - val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST | - DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; - if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) - val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; - I915_WRITE(DP_TP_CTL(port), val); - POSTING_READ(DP_TP_CTL(port)); - - intel_dp->DP |= DDI_BUF_CTL_ENABLE; - I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP); - POSTING_READ(DDI_BUF_CTL(port)); - - udelay(600); + I915_WRITE(DDI_BUF_CTL(port), temp); } -void intel_ddi_fdi_disable(struct drm_crtc *crtc) +void intel_disable_ddi(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); - uint32_t val; - - intel_ddi_post_disable(intel_encoder); - - val = I915_READ(_FDI_RXA_CTL); - val &= ~FDI_RX_ENABLE; - I915_WRITE(_FDI_RXA_CTL, val); - - val = I915_READ(_FDI_RXA_MISC); - val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); - I915_WRITE(_FDI_RXA_MISC, val); - - val = I915_READ(_FDI_RXA_CTL); - val &= ~FDI_PCDCLK; - I915_WRITE(_FDI_RXA_CTL, val); - - val = I915_READ(_FDI_RXA_CTL); - val &= ~FDI_RX_PLL_ENABLE; - I915_WRITE(_FDI_RXA_CTL, val); -} - -static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder) -{ - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); - int type = intel_encoder->type; - - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) - intel_dp_check_link_status(intel_dp); -} - -static void intel_ddi_destroy(struct drm_encoder *encoder) -{ - /* HDMI has nothing special to destroy, so we can go with this. */ - intel_dp_encoder_destroy(encoder); -} - -static bool intel_ddi_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - int type = intel_encoder->type; - - WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n"); - - if (type == INTEL_OUTPUT_HDMI) - return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode); - else - return intel_dp_mode_fixup(encoder, mode, adjusted_mode); -} - -static const struct drm_encoder_funcs intel_ddi_funcs = { - .destroy = intel_ddi_destroy, -}; - -static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = { - .mode_fixup = intel_ddi_mode_fixup, - .mode_set = intel_ddi_mode_set, - .disable = intel_encoder_noop, -}; - -void intel_ddi_init(struct drm_device *dev, enum port port) -{ - struct intel_digital_port *intel_dig_port; - struct intel_encoder *intel_encoder; - struct drm_encoder *encoder; - struct intel_connector *hdmi_connector = NULL; - struct intel_connector *dp_connector = NULL; - - intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); - if (!intel_dig_port) - return; - - dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); - if (!dp_connector) { - kfree(intel_dig_port); - return; - } - - if (port != PORT_A) { - hdmi_connector = kzalloc(sizeof(struct intel_connector), - GFP_KERNEL); - if (!hdmi_connector) { - kfree(dp_connector); - kfree(intel_dig_port); - return; - } - } - - intel_encoder = &intel_dig_port->base; - encoder = &intel_encoder->base; - - drm_encoder_init(dev, encoder, &intel_ddi_funcs, - DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs); - - intel_encoder->enable = intel_enable_ddi; - intel_encoder->pre_enable = intel_ddi_pre_enable; - intel_encoder->disable = intel_disable_ddi; - intel_encoder->post_disable = intel_ddi_post_disable; - intel_encoder->get_hw_state = intel_ddi_get_hw_state; - - intel_dig_port->port = port; - if (hdmi_connector) - intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port); - else - intel_dig_port->hdmi.sdvox_reg = 0; - intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + int port = intel_hdmi->ddi_port; + u32 temp; - intel_encoder->type = INTEL_OUTPUT_UNKNOWN; - intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); - intel_encoder->cloneable = false; - intel_encoder->hot_plug = intel_ddi_hot_plug; + temp = I915_READ(DDI_BUF_CTL(port)); + temp &= ~DDI_BUF_CTL_ENABLE; - if (hdmi_connector) - intel_hdmi_init_connector(intel_dig_port, hdmi_connector); - intel_dp_init_connector(intel_dig_port, dp_connector); + I915_WRITE(DDI_BUF_CTL(port), temp); } diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index 3f7f62d370cb..b426d44a2b05 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -41,6 +41,8 @@ #include #include +#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) + bool intel_pipe_has_type(struct drm_crtc *crtc, int type); static void intel_increase_pllclock(struct drm_crtc *crtc); static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); @@ -78,16 +80,6 @@ struct intel_limit { /* FDI */ #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ -int -intel_pch_rawclk(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - WARN_ON(!HAS_PCH_SPLIT(dev)); - - return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; -} - static bool intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, @@ -388,7 +380,7 @@ static const intel_limit_t intel_limits_vlv_dac = { static const intel_limit_t intel_limits_vlv_hdmi = { .dot = { .min = 20000, .max = 165000 }, - .vco = { .min = 4000000, .max = 5994000}, + .vco = { .min = 5994000, .max = 4000000 }, .n = { .min = 1, .max = 7 }, .m = { .min = 60, .max = 300 }, /* guess */ .m1 = { .min = 2, .max = 3 }, @@ -401,10 +393,10 @@ static const intel_limit_t intel_limits_vlv_hdmi = { }; static const intel_limit_t intel_limits_vlv_dp = { - .dot = { .min = 25000, .max = 270000 }, - .vco = { .min = 4000000, .max = 6000000 }, + .dot = { .min = 162000, .max = 270000 }, + .vco = { .min = 5994000, .max = 4000000 }, .n = { .min = 1, .max = 7 }, - .m = { .min = 22, .max = 450 }, + .m = { .min = 60, .max = 300 }, /* guess */ .m1 = { .min = 2, .max = 3 }, .m2 = { .min = 11, .max = 156 }, .p = { .min = 10, .max = 30 }, @@ -539,7 +531,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, limit = &intel_limits_ironlake_single_lvds; } } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || - intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) + HAS_eDP) limit = &intel_limits_ironlake_display_port; else limit = &intel_limits_ironlake_dac; @@ -935,15 +927,6 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, return true; } -enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - return intel_crtc->cpu_transcoder; -} - static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1016,11 +999,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); if (INTEL_INFO(dev)->gen >= 4) { - int reg = PIPECONF(cpu_transcoder); + int reg = PIPECONF(pipe); /* Wait for the Pipe State to go off */ if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, @@ -1122,14 +1103,12 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv, int reg; u32 val; bool cur_state; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); if (IS_HASWELL(dev_priv->dev)) { /* On Haswell, DDI is used instead of FDI_TX_CTL */ - reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); + reg = DDI_FUNC_CTL(pipe); val = I915_READ(reg); - cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); + cur_state = !!(val & PIPE_DDI_FUNC_ENABLE); } else { reg = FDI_TX_CTL(pipe); val = I915_READ(reg); @@ -1149,9 +1128,14 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv, u32 val; bool cur_state; - reg = FDI_RX_CTL(pipe); - val = I915_READ(reg); - cur_state = !!(val & FDI_RX_ENABLE); + if (IS_HASWELL(dev_priv->dev) && pipe > 0) { + DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n"); + return; + } else { + reg = FDI_RX_CTL(pipe); + val = I915_READ(reg); + cur_state = !!(val & FDI_RX_ENABLE); + } WARN(cur_state != state, "FDI RX state assertion failure (expected %s, current %s)\n", state_string(state), state_string(cur_state)); @@ -1184,6 +1168,10 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, int reg; u32 val; + if (IS_HASWELL(dev_priv->dev) && pipe > 0) { + DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n"); + return; + } reg = FDI_RX_CTL(pipe); val = I915_READ(reg); WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); @@ -1224,14 +1212,12 @@ void assert_pipe(struct drm_i915_private *dev_priv, int reg; u32 val; bool cur_state; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); /* if we need the pipe A quirk it must be always on */ if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) state = true; - reg = PIPECONF(cpu_transcoder); + reg = PIPECONF(pipe); val = I915_READ(reg); cur_state = !!(val & PIPECONF_ENABLE); WARN(cur_state != state, @@ -1568,14 +1554,14 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) } /** - * ironlake_enable_pch_pll - enable PCH PLL + * intel_enable_pch_pll - enable PCH PLL * @dev_priv: i915 private structure * @pipe: pipe PLL to enable * * The PCH PLL needs to be enabled before the PCH transcoder, since it * drives the transcoder clock. */ -static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc) +static void intel_enable_pch_pll(struct intel_crtc *intel_crtc) { struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; struct intel_pch_pll *pll; @@ -1659,12 +1645,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc) pll->on = false; } -static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe) +static void intel_enable_transcoder(struct drm_i915_private *dev_priv, + enum pipe pipe) { - struct drm_device *dev = dev_priv->dev; + int reg; + u32 val, pipeconf_val; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - uint32_t reg, val, pipeconf_val; /* PCH only available on ILK+ */ BUG_ON(dev_priv->info->gen < 5); @@ -1678,15 +1664,10 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, assert_fdi_tx_enabled(dev_priv, pipe); assert_fdi_rx_enabled(dev_priv, pipe); - if (HAS_PCH_CPT(dev)) { - /* Workaround: Set the timing override bit before enabling the - * pch transcoder. */ - reg = TRANS_CHICKEN2(pipe); - val = I915_READ(reg); - val |= TRANS_CHICKEN2_TIMING_OVERRIDE; - I915_WRITE(reg, val); + if (IS_HASWELL(dev_priv->dev) && pipe > 0) { + DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n"); + return; } - reg = TRANSCONF(pipe); val = I915_READ(reg); pipeconf_val = I915_READ(PIPECONF(pipe)); @@ -1715,42 +1696,11 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, DRM_ERROR("failed to enable transcoder %d\n", pipe); } -static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder) -{ - u32 val, pipeconf_val; - - /* PCH only available on ILK+ */ - BUG_ON(dev_priv->info->gen < 5); - - /* FDI must be feeding us bits for PCH ports */ - assert_fdi_tx_enabled(dev_priv, cpu_transcoder); - assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); - - /* Workaround: set timing override bit. */ - val = I915_READ(_TRANSA_CHICKEN2); - val |= TRANS_CHICKEN2_TIMING_OVERRIDE; - I915_WRITE(_TRANSA_CHICKEN2, val); - - val = TRANS_ENABLE; - pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); - - if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == - PIPECONF_INTERLACED_ILK) - val |= TRANS_INTERLACED; - else - val |= TRANS_PROGRESSIVE; - - I915_WRITE(TRANSCONF(TRANSCODER_A), val); - if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100)) - DRM_ERROR("Failed to enable PCH transcoder\n"); -} - -static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe) +static void intel_disable_transcoder(struct drm_i915_private *dev_priv, + enum pipe pipe) { - struct drm_device *dev = dev_priv->dev; - uint32_t reg, val; + int reg; + u32 val; /* FDI relies on the transcoder */ assert_fdi_tx_disabled(dev_priv, pipe); @@ -1766,31 +1716,6 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, /* wait for PCH transcoder off, transcoder state */ if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) DRM_ERROR("failed to disable transcoder %d\n", pipe); - - if (!HAS_PCH_IBX(dev)) { - /* Workaround: Clear the timing override chicken bit again. */ - reg = TRANS_CHICKEN2(pipe); - val = I915_READ(reg); - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - I915_WRITE(reg, val); - } -} - -static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) -{ - u32 val; - - val = I915_READ(_TRANSACONF); - val &= ~TRANS_ENABLE; - I915_WRITE(_TRANSACONF, val); - /* wait for PCH transcoder off, transcoder state */ - if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50)) - DRM_ERROR("Failed to disable PCH transcoder\n"); - - /* Workaround: clear timing override bit. */ - val = I915_READ(_TRANSA_CHICKEN2); - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - I915_WRITE(_TRANSA_CHICKEN2, val); } /** @@ -1810,17 +1735,9 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool pch_port) { - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); - enum transcoder pch_transcoder; int reg; u32 val; - if (IS_HASWELL(dev_priv->dev)) - pch_transcoder = TRANSCODER_A; - else - pch_transcoder = pipe; - /* * A pipe without a PLL won't actually be able to drive bits from * a plane. On ILK+ the pipe PLLs are integrated, so we don't @@ -1831,13 +1748,13 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, else { if (pch_port) { /* if driving the PCH, we need FDI enabled */ - assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); - assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder); + assert_fdi_rx_pll_enabled(dev_priv, pipe); + assert_fdi_tx_pll_enabled(dev_priv, pipe); } /* FIXME: assert CPU port conditions for SNB+ */ } - reg = PIPECONF(cpu_transcoder); + reg = PIPECONF(pipe); val = I915_READ(reg); if (val & PIPECONF_ENABLE) return; @@ -1861,8 +1778,6 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, static void intel_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); int reg; u32 val; @@ -1876,7 +1791,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) return; - reg = PIPECONF(cpu_transcoder); + reg = PIPECONF(pipe); val = I915_READ(reg); if ((val & PIPECONF_ENABLE) == 0) return; @@ -1892,10 +1807,8 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, void intel_flush_display_plane(struct drm_i915_private *dev_priv, enum plane plane) { - if (dev_priv->info->gen >= 4) - I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); - else - I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); + I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); + I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); } /** @@ -2013,9 +1926,9 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel * is assumed to be a power-of-two. */ -unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, - unsigned int bpp, - unsigned int pitch) +static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y, + unsigned int bpp, + unsigned int pitch) { int tile_rows, tiles; @@ -2056,38 +1969,24 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, dspcntr = I915_READ(reg); /* Mask out pixel format bits in case we change it */ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; - switch (fb->pixel_format) { - case DRM_FORMAT_C8: + switch (fb->bits_per_pixel) { + case 8: dspcntr |= DISPPLANE_8BPP; break; - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_ARGB1555: - dspcntr |= DISPPLANE_BGRX555; - break; - case DRM_FORMAT_RGB565: - dspcntr |= DISPPLANE_BGRX565; - break; - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ARGB8888: - dspcntr |= DISPPLANE_BGRX888; - break; - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ABGR8888: - dspcntr |= DISPPLANE_RGBX888; - break; - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_ARGB2101010: - dspcntr |= DISPPLANE_BGRX101010; + case 16: + if (fb->depth == 15) + dspcntr |= DISPPLANE_15_16BPP; + else + dspcntr |= DISPPLANE_16BPP; break; - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ABGR2101010: - dspcntr |= DISPPLANE_RGBX101010; + case 24: + case 32: + dspcntr |= DISPPLANE_32BPP_NO_ALPHA; break; default: - DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); + DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); return -EINVAL; } - if (INTEL_INFO(dev)->gen >= 4) { if (obj->tiling_mode != I915_TILING_NONE) dspcntr |= DISPPLANE_TILED; @@ -2101,9 +2000,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, if (INTEL_INFO(dev)->gen >= 4) { intel_crtc->dspaddr_offset = - intel_gen4_compute_offset_xtiled(&x, &y, - fb->bits_per_pixel / 8, - fb->pitches[0]); + gen4_compute_dspaddr_offset_xtiled(&x, &y, + fb->bits_per_pixel / 8, + fb->pitches[0]); linear_offset -= intel_crtc->dspaddr_offset; } else { intel_crtc->dspaddr_offset = linear_offset; @@ -2154,31 +2053,27 @@ static int ironlake_update_plane(struct drm_crtc *crtc, dspcntr = I915_READ(reg); /* Mask out pixel format bits in case we change it */ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; - switch (fb->pixel_format) { - case DRM_FORMAT_C8: + switch (fb->bits_per_pixel) { + case 8: dspcntr |= DISPPLANE_8BPP; break; - case DRM_FORMAT_RGB565: - dspcntr |= DISPPLANE_BGRX565; - break; - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ARGB8888: - dspcntr |= DISPPLANE_BGRX888; - break; - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ABGR8888: - dspcntr |= DISPPLANE_RGBX888; - break; - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_ARGB2101010: - dspcntr |= DISPPLANE_BGRX101010; + case 16: + if (fb->depth != 16) + return -EINVAL; + + dspcntr |= DISPPLANE_16BPP; break; - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ABGR2101010: - dspcntr |= DISPPLANE_RGBX101010; + case 24: + case 32: + if (fb->depth == 24) + dspcntr |= DISPPLANE_32BPP_NO_ALPHA; + else if (fb->depth == 30) + dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; + else + return -EINVAL; break; default: - DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); + DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); return -EINVAL; } @@ -2194,9 +2089,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc, linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); intel_crtc->dspaddr_offset = - intel_gen4_compute_offset_xtiled(&x, &y, - fb->bits_per_pixel / 8, - fb->pitches[0]); + gen4_compute_dspaddr_offset_xtiled(&x, &y, + fb->bits_per_pixel / 8, + fb->pitches[0]); linear_offset -= intel_crtc->dspaddr_offset; DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", @@ -2204,12 +2099,8 @@ static int ironlake_update_plane(struct drm_crtc *crtc, I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_MODIFY_DISPBASE(DSPSURF(plane), obj->gtt_offset + intel_crtc->dspaddr_offset); - if (IS_HASWELL(dev)) { - I915_WRITE(DSPOFFSET(plane), (y << 16) | x); - } else { - I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); - I915_WRITE(DSPLINOFF(plane), linear_offset); - } + I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); + I915_WRITE(DSPLINOFF(plane), linear_offset); POSTING_READ(reg); return 0; @@ -2257,39 +2148,13 @@ intel_finish_fb(struct drm_framebuffer *old_fb) return ret; } -static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_master_private *master_priv; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - if (!dev->primary->master) - return; - - master_priv = dev->primary->master->driver_priv; - if (!master_priv->sarea_priv) - return; - - switch (intel_crtc->pipe) { - case 0: - master_priv->sarea_priv->pipeA_x = x; - master_priv->sarea_priv->pipeA_y = y; - break; - case 1: - master_priv->sarea_priv->pipeB_x = x; - master_priv->sarea_priv->pipeB_y = y; - break; - default: - break; - } -} - static int intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_framebuffer *old_fb; int ret; @@ -2341,7 +2206,20 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, intel_update_fbc(dev); mutex_unlock(&dev->struct_mutex); - intel_crtc_update_sarea_pos(crtc, x, y); + if (!dev->primary->master) + return 0; + + master_priv = dev->primary->master->driver_priv; + if (!master_priv->sarea_priv) + return 0; + + if (intel_crtc->pipe) { + master_priv->sarea_priv->pipeB_x = x; + master_priv->sarea_priv->pipeB_y = y; + } else { + master_priv->sarea_priv->pipeA_x = x; + master_priv->sarea_priv->pipeA_y = y; + } return 0; } @@ -2436,29 +2314,6 @@ static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) POSTING_READ(SOUTH_CHICKEN1); } -static void ivb_modeset_global_resources(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *pipe_B_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); - struct intel_crtc *pipe_C_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); - uint32_t temp; - - /* When everything is off disable fdi C so that we could enable fdi B - * with all lanes. XXX: This misses the case where a pipe is not using - * any pch resources and so doesn't need any fdi lanes. */ - if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) { - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); - - temp = I915_READ(SOUTH_CHICKEN1); - temp &= ~FDI_BC_BIFURCATION_SELECT; - DRM_DEBUG_KMS("disabling fdi C rx\n"); - I915_WRITE(SOUTH_CHICKEN1, temp); - } -} - /* The FDI link training functions for ILK/Ibexpeak. */ static void ironlake_fdi_link_train(struct drm_crtc *crtc) { @@ -2502,9 +2357,11 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) udelay(150); /* Ironlake workaround, enable clock pointer after FDI enable*/ - I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); - I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | - FDI_RX_PHASE_SYNC_POINTER_EN); + if (HAS_PCH_IBX(dev)) { + I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); + I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | + FDI_RX_PHASE_SYNC_POINTER_EN); + } reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { @@ -2593,9 +2450,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; I915_WRITE(reg, temp | FDI_TX_ENABLE); - I915_WRITE(FDI_RX_MISC(pipe), - FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); - reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { @@ -2610,7 +2464,8 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(reg); udelay(150); - cpt_phase_pointer_enable(dev, pipe); + if (HAS_PCH_CPT(dev)) + cpt_phase_pointer_enable(dev, pipe); for (i = 0; i < 4; i++) { reg = FDI_TX_CTL(pipe); @@ -2715,9 +2570,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(reg); udelay(150); - DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", - I915_READ(FDI_RX_IIR(pipe))); - /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); @@ -2730,9 +2582,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) temp |= FDI_COMPOSITE_SYNC; I915_WRITE(reg, temp | FDI_TX_ENABLE); - I915_WRITE(FDI_RX_MISC(pipe), - FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); - reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_AUTO; @@ -2744,7 +2593,8 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(reg); udelay(150); - cpt_phase_pointer_enable(dev, pipe); + if (HAS_PCH_CPT(dev)) + cpt_phase_pointer_enable(dev, pipe); for (i = 0; i < 4; i++) { reg = FDI_TX_CTL(pipe); @@ -2763,7 +2613,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) if (temp & FDI_RX_BIT_LOCK || (I915_READ(reg) & FDI_RX_BIT_LOCK)) { I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); - DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i); + DRM_DEBUG_KMS("FDI train 1 done.\n"); break; } } @@ -2804,7 +2654,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) if (temp & FDI_RX_SYMBOL_LOCK) { I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i); + DRM_DEBUG_KMS("FDI train 2 done.\n"); break; } } @@ -2821,6 +2671,9 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) int pipe = intel_crtc->pipe; u32 reg, temp; + /* Write the TU size bits so error detection works */ + I915_WRITE(FDI_RX_TUSIZE1(pipe), + I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ reg = FDI_RX_CTL(pipe); @@ -2921,6 +2774,9 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) /* Ironlake workaround, disable clock pointer after downing FDI */ if (HAS_PCH_IBX(dev)) { I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); + I915_WRITE(FDI_RX_CHICKEN(pipe), + I915_READ(FDI_RX_CHICKEN(pipe) & + ~FDI_RX_PHASE_SYNC_POINTER_EN)); } else if (HAS_PCH_CPT(dev)) { cpt_phase_pointer_disable(dev, pipe); } @@ -2983,7 +2839,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) mutex_unlock(&dev->struct_mutex); } -static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc) +static bool intel_crtc_driving_pch(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct intel_encoder *intel_encoder; @@ -2993,6 +2849,23 @@ static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc) * must be driven by its own crtc; no sharing is possible. */ for_each_encoder_on_crtc(dev, crtc, intel_encoder) { + + /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell + * CPU handles all others */ + if (IS_HASWELL(dev)) { + /* It is still unclear how this will work on PPT, so throw up a warning */ + WARN_ON(!HAS_PCH_LPT(dev)); + + if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { + DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n"); + return true; + } else { + DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n", + intel_encoder->type); + return false; + } + } + switch (intel_encoder->type) { case INTEL_OUTPUT_EDP: if (!intel_encoder_is_pch_edp(&intel_encoder->base)) @@ -3004,11 +2877,6 @@ static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc) return true; } -static bool haswell_crtc_driving_pch(struct drm_crtc *crtc) -{ - return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG); -} - /* Program iCLKIP clock to the desired frequency */ static void lpt_program_iclkip(struct drm_crtc *crtc) { @@ -3118,24 +2986,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) assert_transcoder_disabled(dev_priv, pipe); - /* Write the TU size bits before fdi link training, so that error - * detection works. */ - I915_WRITE(FDI_RX_TUSIZE1(pipe), - I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); - /* For PCH output, training FDI link */ dev_priv->display.fdi_link_train(crtc); - /* XXX: pch pll's can be enabled any time before we enable the PCH - * transcoder, and we actually should do this to not upset any PCH - * transcoder that already use the clock when we share it. - * - * Note that enable_pch_pll tries to do the right thing, but get_pch_pll - * unconditionally resets the pll - we need that to have the right LVDS - * enable sequence. */ - ironlake_enable_pch_pll(intel_crtc); + intel_enable_pch_pll(intel_crtc); - if (HAS_PCH_CPT(dev)) { + if (HAS_PCH_LPT(dev)) { + DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n"); + lpt_program_iclkip(crtc); + } else if (HAS_PCH_CPT(dev)) { u32 sel; temp = I915_READ(PCH_DPLL_SEL); @@ -3172,7 +3031,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); - intel_fdi_normal_train(crtc); + if (!IS_HASWELL(dev)) + intel_fdi_normal_train(crtc); /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && @@ -3204,37 +3064,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) temp |= TRANS_DP_PORT_SEL_D; break; default: - BUG(); + DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); + temp |= TRANS_DP_PORT_SEL_B; + break; } I915_WRITE(reg, temp); } - ironlake_enable_pch_transcoder(dev_priv, pipe); -} - -static void lpt_pch_enable(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - - assert_transcoder_disabled(dev_priv, TRANSCODER_A); - - lpt_program_iclkip(crtc); - - /* Set transcoder timing. */ - I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder))); - I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder))); - I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder))); - - I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder))); - I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder))); - I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder))); - I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder))); - - lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); + intel_enable_transcoder(dev_priv, pipe); } static void intel_put_pch_pll(struct intel_crtc *intel_crtc) @@ -3327,12 +3165,16 @@ static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u3 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - int dslreg = PIPEDSL(pipe); + int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe); u32 temp; temp = I915_READ(dslreg); udelay(500); if (wait_for(I915_READ(dslreg) != temp, 5)) { + /* Without this, mode sets may fail silently on FDI */ + I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS); + udelay(250); + I915_WRITE(tc2reg, 0); if (wait_for(I915_READ(dslreg) != temp, 5)) DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); } @@ -3363,12 +3205,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); } - is_pch_port = ironlake_crtc_driving_pch(crtc); + is_pch_port = intel_crtc_driving_pch(crtc); if (is_pch_port) { - /* Note: FDI PLL enabling _must_ be done before we enable the - * cpu pipes, hence this is separate from all the other fdi/pch - * enabling. */ ironlake_fdi_pll_enable(intel_crtc); } else { assert_fdi_tx_disabled(dev_priv, pipe); @@ -3381,17 +3220,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) /* Enable panel fitting for LVDS */ if (dev_priv->pch_pf_size && - (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || - intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { + (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { /* Force use of hard-coded filter coefficients * as some pre-programmed values are broken, * e.g. x201. */ - if (IS_IVYBRIDGE(dev)) - I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | - PF_PIPE_SEL_IVB(pipe)); - else - I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); } @@ -3431,83 +3265,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_wait_for_vblank(dev, intel_crtc->pipe); } -static void haswell_crtc_enable(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; - int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; - bool is_pch_port; - - WARN_ON(!crtc->enabled); - - if (intel_crtc->active) - return; - - intel_crtc->active = true; - intel_update_watermarks(dev); - - is_pch_port = haswell_crtc_driving_pch(crtc); - - if (is_pch_port) - dev_priv->display.fdi_link_train(crtc); - - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->pre_enable) - encoder->pre_enable(encoder); - - intel_ddi_enable_pipe_clock(intel_crtc); - - /* Enable panel fitting for eDP */ - if (dev_priv->pch_pf_size && - intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { - /* Force use of hard-coded filter coefficients - * as some pre-programmed values are broken, - * e.g. x201. - */ - I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | - PF_PIPE_SEL_IVB(pipe)); - I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); - I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); - } - - /* - * On ILK+ LUT must be loaded before the pipe is running but with - * clocks enabled - */ - intel_crtc_load_lut(crtc); - - intel_ddi_set_pipe_settings(crtc); - intel_ddi_enable_pipe_func(crtc); - - intel_enable_pipe(dev_priv, pipe, is_pch_port); - intel_enable_plane(dev_priv, plane, pipe); - - if (is_pch_port) - lpt_pch_enable(crtc); - - mutex_lock(&dev->struct_mutex); - intel_update_fbc(dev); - mutex_unlock(&dev->struct_mutex); - - intel_crtc_update_cursor(crtc, true); - - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->enable(encoder); - - /* - * There seems to be a race in PCH platform hw (at least on some - * outputs) where an enabled pipe still completes any pageflip right - * away (as if the pipe is off) instead of waiting for vblank. As soon - * as the first vblank happend, everything works as expected. Hence just - * wait for one vblank before returning to avoid strange things - * happening. - */ - intel_wait_for_vblank(dev, intel_crtc->pipe); -} - static void ironlake_crtc_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -3546,7 +3303,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) ironlake_fdi_disable(crtc); - ironlake_disable_pch_transcoder(dev_priv, pipe); + intel_disable_transcoder(dev_priv, pipe); if (HAS_PCH_CPT(dev)) { /* disable TRANS_DP_CTL */ @@ -3588,78 +3345,12 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) mutex_unlock(&dev->struct_mutex); } -static void haswell_crtc_disable(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; - int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - bool is_pch_port; - - if (!intel_crtc->active) - return; - - is_pch_port = haswell_crtc_driving_pch(crtc); - - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->disable(encoder); - - intel_crtc_wait_for_pending_flips(crtc); - drm_vblank_off(dev, pipe); - intel_crtc_update_cursor(crtc, false); - - intel_disable_plane(dev_priv, plane, pipe); - - if (dev_priv->cfb_plane == plane) - intel_disable_fbc(dev); - - intel_disable_pipe(dev_priv, pipe); - - intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); - - /* Disable PF */ - I915_WRITE(PF_CTL(pipe), 0); - I915_WRITE(PF_WIN_SZ(pipe), 0); - - intel_ddi_disable_pipe_clock(intel_crtc); - - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->post_disable) - encoder->post_disable(encoder); - - if (is_pch_port) { - lpt_disable_pch_transcoder(dev_priv); - intel_ddi_fdi_disable(crtc); - } - - intel_crtc->active = false; - intel_update_watermarks(dev); - - mutex_lock(&dev->struct_mutex); - intel_update_fbc(dev); - mutex_unlock(&dev->struct_mutex); -} - -static void ironlake_crtc_off(struct drm_crtc *crtc) +static void ironlake_crtc_off(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); intel_put_pch_pll(intel_crtc); } -static void haswell_crtc_off(struct drm_crtc *crtc) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - /* Stop saying we're using TRANSCODER_EDP because some other CRTC might - * start using it. */ - intel_crtc->cpu_transcoder = intel_crtc->pipe; - - intel_ddi_put_crtc_pll(crtc); -} - static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) { if (!enable && intel_crtc->overlay) { @@ -4150,6 +3841,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, } } + if (intel_encoder->type == INTEL_OUTPUT_EDP) { + /* Use VBT settings if we have an eDP panel */ + unsigned int edp_bpc = dev_priv->edp.bpp / 3; + + if (edp_bpc && edp_bpc < display_bpc) { + DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); + display_bpc = edp_bpc; + } + continue; + } + /* * HDMI is either 12 or 8, so if the display lets 10bpc sneak * through, clamp it down. (Note: >12bpc will be caught below.) @@ -4359,7 +4061,7 @@ static void vlv_update_pll(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, intel_clock_t *clock, intel_clock_t *reduced_clock, - int num_connectors) + int refclk, int num_connectors) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -4367,19 +4069,9 @@ static void vlv_update_pll(struct drm_crtc *crtc, int pipe = intel_crtc->pipe; u32 dpll, mdiv, pdiv; u32 bestn, bestm1, bestm2, bestp1, bestp2; - bool is_sdvo; - u32 temp; - - is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || - intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); - - dpll = DPLL_VGA_MODE_DIS; - dpll |= DPLL_EXT_BUFFER_ENABLE_VLV; - dpll |= DPLL_REFA_CLK_ENABLE_VLV; - dpll |= DPLL_INTEGRATED_CLOCK_VLV; + bool is_hdmi; - I915_WRITE(DPLL(pipe), dpll); - POSTING_READ(DPLL(pipe)); + is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); bestn = clock->n; bestm1 = clock->m1; @@ -4387,10 +4079,12 @@ static void vlv_update_pll(struct drm_crtc *crtc, bestp1 = clock->p1; bestp2 = clock->p2; - /* - * In Valleyview PLL and program lane counter registers are exposed - * through DPIO interface - */ + /* Enable DPIO clock input */ + dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | + DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; + I915_WRITE(DPLL(pipe), dpll); + POSTING_READ(DPLL(pipe)); + mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); mdiv |= ((bestn << DPIO_N_SHIFT)); @@ -4401,13 +4095,12 @@ static void vlv_update_pll(struct drm_crtc *crtc, intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); - pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) | + pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) | (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) | - (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) | - (5 << DPIO_CLK_BIAS_CTL_SHIFT); + (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT); intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv); - intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b); + intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051); dpll |= DPLL_VCO_ENABLE; I915_WRITE(DPLL(pipe), dpll); @@ -4415,44 +4108,19 @@ static void vlv_update_pll(struct drm_crtc *crtc, if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) DRM_ERROR("DPLL %d failed to lock\n", pipe); - intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620); - - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) - intel_dp_set_m_n(crtc, mode, adjusted_mode); - - I915_WRITE(DPLL(pipe), dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(DPLL(pipe)); - udelay(150); + if (is_hdmi) { + u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode); - temp = 0; - if (is_sdvo) { - temp = intel_mode_get_pixel_multiplier(adjusted_mode); if (temp > 1) temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; else temp = 0; - } - I915_WRITE(DPLL_MD(pipe), temp); - POSTING_READ(DPLL_MD(pipe)); - /* Now program lane control registers */ - if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) - || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) - { - temp = 0x1000C4; - if(pipe == 1) - temp |= (1 << 21); - intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp); - } - if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP)) - { - temp = 0x1000C4; - if(pipe == 1) - temp |= (1 << 21); - intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); + I915_WRITE(DPLL_MD(pipe), temp); + POSTING_READ(DPLL_MD(pipe)); } + + intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */ } static void i9xx_update_pll(struct drm_crtc *crtc, @@ -4468,8 +4136,6 @@ static void i9xx_update_pll(struct drm_crtc *crtc, u32 dpll; bool is_sdvo; - i9xx_update_pll_dividers(crtc, clock, reduced_clock); - is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); @@ -4570,7 +4236,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc, static void i8xx_update_pll(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode, - intel_clock_t *clock, intel_clock_t *reduced_clock, + intel_clock_t *clock, int num_connectors) { struct drm_device *dev = crtc->dev; @@ -4579,8 +4245,6 @@ static void i8xx_update_pll(struct drm_crtc *crtc, int pipe = intel_crtc->pipe; u32 dpll; - i9xx_update_pll_dividers(crtc, clock, reduced_clock); - dpll = DPLL_VGA_MODE_DIS; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { @@ -4630,64 +4294,6 @@ static void i8xx_update_pll(struct drm_crtc *crtc, I915_WRITE(DPLL(pipe), dpll); } -static void intel_set_pipe_timings(struct intel_crtc *intel_crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - enum pipe pipe = intel_crtc->pipe; - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - uint32_t vsyncshift; - - if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { - /* the chip adds 2 halflines automatically */ - adjusted_mode->crtc_vtotal -= 1; - adjusted_mode->crtc_vblank_end -= 1; - vsyncshift = adjusted_mode->crtc_hsync_start - - adjusted_mode->crtc_htotal / 2; - } else { - vsyncshift = 0; - } - - if (INTEL_INFO(dev)->gen > 3) - I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); - - I915_WRITE(HTOTAL(cpu_transcoder), - (adjusted_mode->crtc_hdisplay - 1) | - ((adjusted_mode->crtc_htotal - 1) << 16)); - I915_WRITE(HBLANK(cpu_transcoder), - (adjusted_mode->crtc_hblank_start - 1) | - ((adjusted_mode->crtc_hblank_end - 1) << 16)); - I915_WRITE(HSYNC(cpu_transcoder), - (adjusted_mode->crtc_hsync_start - 1) | - ((adjusted_mode->crtc_hsync_end - 1) << 16)); - - I915_WRITE(VTOTAL(cpu_transcoder), - (adjusted_mode->crtc_vdisplay - 1) | - ((adjusted_mode->crtc_vtotal - 1) << 16)); - I915_WRITE(VBLANK(cpu_transcoder), - (adjusted_mode->crtc_vblank_start - 1) | - ((adjusted_mode->crtc_vblank_end - 1) << 16)); - I915_WRITE(VSYNC(cpu_transcoder), - (adjusted_mode->crtc_vsync_start - 1) | - ((adjusted_mode->crtc_vsync_end - 1) << 16)); - - /* Workaround: when the EDP input selection is B, the VTOTAL_B must be - * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is - * documented on the DDI_FUNC_CTL register description, EDP Input Select - * bits. */ - if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && - (pipe == PIPE_B || pipe == PIPE_C)) - I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); - - /* pipesrc controls the size that is scaled from, which should - * always be the user's requested size. - */ - I915_WRITE(PIPESRC(pipe), - ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); -} - static int i9xx_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -4701,7 +4307,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, int plane = intel_crtc->plane; int refclk, num_connectors = 0; intel_clock_t clock, reduced_clock; - u32 dspcntr, pipeconf; + u32 dspcntr, pipeconf, vsyncshift; bool ok, has_reduced_clock = false, is_sdvo = false; bool is_lvds = false, is_tv = false, is_dp = false; struct intel_encoder *encoder; @@ -4765,14 +4371,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, if (is_sdvo && is_tv) i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); + i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ? + &reduced_clock : NULL); + if (IS_GEN2(dev)) - i8xx_update_pll(crtc, adjusted_mode, &clock, - has_reduced_clock ? &reduced_clock : NULL, - num_connectors); + i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors); else if (IS_VALLEYVIEW(dev)) - vlv_update_pll(crtc, mode, adjusted_mode, &clock, - has_reduced_clock ? &reduced_clock : NULL, - num_connectors); + vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL, + refclk, num_connectors); else i9xx_update_pll(crtc, mode, adjusted_mode, &clock, has_reduced_clock ? &reduced_clock : NULL, @@ -4813,14 +4419,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, } } - if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { - if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { - pipeconf |= PIPECONF_BPP_6 | - PIPECONF_ENABLE | - I965_PIPECONF_ACTIVE; - } - } - DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); drm_mode_debug_printmodeline(mode); @@ -4836,12 +4434,40 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, pipeconf &= ~PIPECONF_INTERLACE_MASK; if (!IS_GEN2(dev) && - adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) + adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; - else + /* the chip adds 2 halflines automatically */ + adjusted_mode->crtc_vtotal -= 1; + adjusted_mode->crtc_vblank_end -= 1; + vsyncshift = adjusted_mode->crtc_hsync_start + - adjusted_mode->crtc_htotal/2; + } else { pipeconf |= PIPECONF_PROGRESSIVE; + vsyncshift = 0; + } + + if (!IS_GEN3(dev)) + I915_WRITE(VSYNCSHIFT(pipe), vsyncshift); + + I915_WRITE(HTOTAL(pipe), + (adjusted_mode->crtc_hdisplay - 1) | + ((adjusted_mode->crtc_htotal - 1) << 16)); + I915_WRITE(HBLANK(pipe), + (adjusted_mode->crtc_hblank_start - 1) | + ((adjusted_mode->crtc_hblank_end - 1) << 16)); + I915_WRITE(HSYNC(pipe), + (adjusted_mode->crtc_hsync_start - 1) | + ((adjusted_mode->crtc_hsync_end - 1) << 16)); - intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); + I915_WRITE(VTOTAL(pipe), + (adjusted_mode->crtc_vdisplay - 1) | + ((adjusted_mode->crtc_vtotal - 1) << 16)); + I915_WRITE(VBLANK(pipe), + (adjusted_mode->crtc_vblank_start - 1) | + ((adjusted_mode->crtc_vblank_end - 1) << 16)); + I915_WRITE(VSYNC(pipe), + (adjusted_mode->crtc_vsync_start - 1) | + ((adjusted_mode->crtc_vsync_end - 1) << 16)); /* pipesrc and dspsize control the size that is scaled from, * which should always be the user's requested size. @@ -4850,6 +4476,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); I915_WRITE(DSPPOS(plane), 0); + I915_WRITE(PIPESRC(pipe), + ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); I915_WRITE(PIPECONF(pipe), pipeconf); POSTING_READ(PIPECONF(pipe)); @@ -5040,8 +4668,8 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc, val |= PIPE_12BPC; break; default: - /* Case prevented by intel_choose_pipe_bpp_dither. */ - BUG(); + val |= PIPE_8BPC; + break; } val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); @@ -5058,31 +4686,6 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc, POSTING_READ(PIPECONF(pipe)); } -static void haswell_set_pipeconf(struct drm_crtc *crtc, - struct drm_display_mode *adjusted_mode, - bool dither) -{ - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - uint32_t val; - - val = I915_READ(PIPECONF(cpu_transcoder)); - - val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); - if (dither) - val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); - - val &= ~PIPECONF_INTERLACE_MASK_HSW; - if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) - val |= PIPECONF_INTERLACED_ILK; - else - val |= PIPECONF_PROGRESSIVE; - - I915_WRITE(PIPECONF(cpu_transcoder), val); - POSTING_READ(PIPECONF(cpu_transcoder)); -} - static bool ironlake_compute_clocks(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode, intel_clock_t *clock, @@ -5146,115 +4749,74 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, return true; } -static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t temp; - - temp = I915_READ(SOUTH_CHICKEN1); - if (temp & FDI_BC_BIFURCATION_SELECT) - return; - - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); - - temp |= FDI_BC_BIFURCATION_SELECT; - DRM_DEBUG_KMS("enabling fdi C rx\n"); - I915_WRITE(SOUTH_CHICKEN1, temp); - POSTING_READ(SOUTH_CHICKEN1); -} - -static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc) -{ - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *pipe_B_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); - - DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n", - intel_crtc->pipe, intel_crtc->fdi_lanes); - if (intel_crtc->fdi_lanes > 4) { - DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n", - intel_crtc->pipe, intel_crtc->fdi_lanes); - /* Clamp lanes to avoid programming the hw with bogus values. */ - intel_crtc->fdi_lanes = 4; - - return false; - } - - if (dev_priv->num_pipe == 2) - return true; - - switch (intel_crtc->pipe) { - case PIPE_A: - return true; - case PIPE_B: - if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && - intel_crtc->fdi_lanes > 2) { - DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", - intel_crtc->pipe, intel_crtc->fdi_lanes); - /* Clamp lanes to avoid programming the hw with bogus values. */ - intel_crtc->fdi_lanes = 2; - - return false; - } - - if (intel_crtc->fdi_lanes > 2) - WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); - else - cpt_enable_fdi_bc_bifurcation(dev); - - return true; - case PIPE_C: - if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) { - if (intel_crtc->fdi_lanes > 2) { - DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", - intel_crtc->pipe, intel_crtc->fdi_lanes); - /* Clamp lanes to avoid programming the hw with bogus values. */ - intel_crtc->fdi_lanes = 2; - - return false; - } - } else { - DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); - return false; - } - - cpt_enable_fdi_bc_bifurcation(dev); - - return true; - default: - BUG(); - } -} - -static void ironlake_set_m_n(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static int ironlake_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, + struct drm_framebuffer *fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - struct intel_encoder *intel_encoder, *edp_encoder = NULL; + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + int num_connectors = 0; + intel_clock_t clock, reduced_clock; + u32 dpll, fp = 0, fp2 = 0; + bool ok, has_reduced_clock = false, is_sdvo = false; + bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; + struct intel_encoder *encoder, *edp_encoder = NULL; + int ret; struct fdi_m_n m_n = {0}; - int target_clock, pixel_multiplier, lane, link_bw; - bool is_dp = false, is_cpu_edp = false; + u32 temp; + int target_clock, pixel_multiplier, lane, link_bw, factor; + unsigned int pipe_bpp; + bool dither; + bool is_cpu_edp = false, is_pch_edp = false; - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { - switch (intel_encoder->type) { + for_each_encoder_on_crtc(dev, crtc, encoder) { + switch (encoder->type) { + case INTEL_OUTPUT_LVDS: + is_lvds = true; + break; + case INTEL_OUTPUT_SDVO: + case INTEL_OUTPUT_HDMI: + is_sdvo = true; + if (encoder->needs_tv_clock) + is_tv = true; + break; + case INTEL_OUTPUT_TVOUT: + is_tv = true; + break; + case INTEL_OUTPUT_ANALOG: + is_crt = true; + break; case INTEL_OUTPUT_DISPLAYPORT: is_dp = true; break; case INTEL_OUTPUT_EDP: is_dp = true; - if (!intel_encoder_is_pch_edp(&intel_encoder->base)) + if (intel_encoder_is_pch_edp(&encoder->base)) + is_pch_edp = true; + else is_cpu_edp = true; - edp_encoder = intel_encoder; + edp_encoder = encoder; break; } + + num_connectors++; + } + + ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, + &has_reduced_clock, &reduced_clock); + if (!ok) { + DRM_ERROR("Couldn't find PLL settings for mode!\n"); + return -EINVAL; } + /* Ensure that the cursor is valid for the new mode before changing... */ + intel_crtc_update_cursor(crtc, true); + /* FDI link */ pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); lane = 0; @@ -5281,6 +4843,20 @@ static void ironlake_set_m_n(struct drm_crtc *crtc, else target_clock = adjusted_mode->clock; + /* determine panel color depth */ + dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, + adjusted_mode); + if (is_lvds && dev_priv->lvds_dither) + dither = true; + + if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 && + pipe_bpp != 36) { + WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n", + pipe_bpp); + pipe_bpp = 24; + } + intel_crtc->bpp = pipe_bpp; + if (!lane) { /* * Account for spread spectrum to avoid @@ -5298,51 +4874,10 @@ static void ironlake_set_m_n(struct drm_crtc *crtc, ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n); - I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m); - I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); - I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); - I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); -} - -static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, - struct drm_display_mode *adjusted_mode, - intel_clock_t *clock, u32 fp) -{ - struct drm_crtc *crtc = &intel_crtc->base; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *intel_encoder; - uint32_t dpll; - int factor, pixel_multiplier, num_connectors = 0; - bool is_lvds = false, is_sdvo = false, is_tv = false; - bool is_dp = false, is_cpu_edp = false; - - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { - switch (intel_encoder->type) { - case INTEL_OUTPUT_LVDS: - is_lvds = true; - break; - case INTEL_OUTPUT_SDVO: - case INTEL_OUTPUT_HDMI: - is_sdvo = true; - if (intel_encoder->needs_tv_clock) - is_tv = true; - break; - case INTEL_OUTPUT_TVOUT: - is_tv = true; - break; - case INTEL_OUTPUT_DISPLAYPORT: - is_dp = true; - break; - case INTEL_OUTPUT_EDP: - is_dp = true; - if (!intel_encoder_is_pch_edp(&intel_encoder->base)) - is_cpu_edp = true; - break; - } - - num_connectors++; - } + fp = clock.n << 16 | clock.m1 << 8 | clock.m2; + if (has_reduced_clock) + fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | + reduced_clock.m2; /* Enable autotuning of the PLL clock (if permissible) */ factor = 21; @@ -5354,7 +4889,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, } else if (is_sdvo && is_tv) factor = 20; - if (clock->m < factor * clock->n) + if (clock.m < factor * clock.n) fp |= FP_CB_TUNE; dpll = 0; @@ -5364,119 +4899,55 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, else dpll |= DPLLB_MODE_DAC_SERIAL; if (is_sdvo) { - pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); + int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); if (pixel_multiplier > 1) { dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; } - dpll |= DPLL_DVO_HIGH_SPEED; - } - if (is_dp && !is_cpu_edp) - dpll |= DPLL_DVO_HIGH_SPEED; - - /* compute bitmask from p1 value */ - dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; - /* also FPA1 */ - dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; - - switch (clock->p2) { - case 5: - dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; - break; - case 7: - dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; - break; - case 10: - dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; - break; - case 14: - dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; - break; - } - - if (is_sdvo && is_tv) - dpll |= PLL_REF_INPUT_TVCLKINBC; - else if (is_tv) - /* XXX: just matching BIOS for now */ - /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ - dpll |= 3; - else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) - dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; - else - dpll |= PLL_REF_INPUT_DREFCLK; - - return dpll; -} - -static int ironlake_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *fb) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; - int num_connectors = 0; - intel_clock_t clock, reduced_clock; - u32 dpll, fp = 0, fp2 = 0; - bool ok, has_reduced_clock = false; - bool is_lvds = false, is_dp = false, is_cpu_edp = false; - struct intel_encoder *encoder; - u32 temp; - int ret; - bool dither, fdi_config_ok; - - for_each_encoder_on_crtc(dev, crtc, encoder) { - switch (encoder->type) { - case INTEL_OUTPUT_LVDS: - is_lvds = true; - break; - case INTEL_OUTPUT_DISPLAYPORT: - is_dp = true; - break; - case INTEL_OUTPUT_EDP: - is_dp = true; - if (!intel_encoder_is_pch_edp(&encoder->base)) - is_cpu_edp = true; - break; - } - - num_connectors++; + dpll |= DPLL_DVO_HIGH_SPEED; } + if (is_dp && !is_cpu_edp) + dpll |= DPLL_DVO_HIGH_SPEED; - WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), - "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); + /* compute bitmask from p1 value */ + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; + /* also FPA1 */ + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; - ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, - &has_reduced_clock, &reduced_clock); - if (!ok) { - DRM_ERROR("Couldn't find PLL settings for mode!\n"); - return -EINVAL; + switch (clock.p2) { + case 5: + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; + break; + case 7: + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; + break; + case 10: + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; + break; + case 14: + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; + break; } - /* Ensure that the cursor is valid for the new mode before changing... */ - intel_crtc_update_cursor(crtc, true); - - /* determine panel color depth */ - dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, - adjusted_mode); - if (is_lvds && dev_priv->lvds_dither) - dither = true; - - fp = clock.n << 16 | clock.m1 << 8 | clock.m2; - if (has_reduced_clock) - fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | - reduced_clock.m2; - - dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp); + if (is_sdvo && is_tv) + dpll |= PLL_REF_INPUT_TVCLKINBC; + else if (is_tv) + /* XXX: just matching BIOS for now */ + /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ + dpll |= 3; + else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; + else + dpll |= PLL_REF_INPUT_DREFCLK; DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); drm_mode_debug_printmodeline(mode); - /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ - if (!is_cpu_edp) { + /* CPU eDP is the only output that doesn't need a PCH PLL of its own on + * pre-Haswell/LPT generation */ + if (HAS_PCH_LPT(dev)) { + DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n", + pipe); + } else if (!is_cpu_edp) { struct intel_pch_pll *pll; pll = intel_get_pch_pll(intel_crtc, dpll, fp); @@ -5562,13 +5033,47 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, } } - intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { + /* the chip adds 2 halflines automatically */ + adjusted_mode->crtc_vtotal -= 1; + adjusted_mode->crtc_vblank_end -= 1; + I915_WRITE(VSYNCSHIFT(pipe), + adjusted_mode->crtc_hsync_start + - adjusted_mode->crtc_htotal/2); + } else { + I915_WRITE(VSYNCSHIFT(pipe), 0); + } + + I915_WRITE(HTOTAL(pipe), + (adjusted_mode->crtc_hdisplay - 1) | + ((adjusted_mode->crtc_htotal - 1) << 16)); + I915_WRITE(HBLANK(pipe), + (adjusted_mode->crtc_hblank_start - 1) | + ((adjusted_mode->crtc_hblank_end - 1) << 16)); + I915_WRITE(HSYNC(pipe), + (adjusted_mode->crtc_hsync_start - 1) | + ((adjusted_mode->crtc_hsync_end - 1) << 16)); + + I915_WRITE(VTOTAL(pipe), + (adjusted_mode->crtc_vdisplay - 1) | + ((adjusted_mode->crtc_vtotal - 1) << 16)); + I915_WRITE(VBLANK(pipe), + (adjusted_mode->crtc_vblank_start - 1) | + ((adjusted_mode->crtc_vblank_end - 1) << 16)); + I915_WRITE(VSYNC(pipe), + (adjusted_mode->crtc_vsync_start - 1) | + ((adjusted_mode->crtc_vsync_end - 1) << 16)); - /* Note, this also computes intel_crtc->fdi_lanes which is used below in - * ironlake_check_fdi_lanes. */ - ironlake_set_m_n(crtc, mode, adjusted_mode); + /* pipesrc controls the size that is scaled from, which should + * always be the user's requested size. + */ + I915_WRITE(PIPESRC(pipe), + ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); - fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc); + I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); + I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); + I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); + I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); if (is_cpu_edp) ironlake_set_pll_edp(crtc, adjusted_mode->clock); @@ -5587,217 +5092,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, intel_update_linetime_watermarks(dev, pipe, adjusted_mode); - return fdi_config_ok ? ret : -EINVAL; -} - -static int haswell_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *fb) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; - int num_connectors = 0; - intel_clock_t clock, reduced_clock; - u32 dpll = 0, fp = 0, fp2 = 0; - bool ok, has_reduced_clock = false; - bool is_lvds = false, is_dp = false, is_cpu_edp = false; - struct intel_encoder *encoder; - u32 temp; - int ret; - bool dither; - - for_each_encoder_on_crtc(dev, crtc, encoder) { - switch (encoder->type) { - case INTEL_OUTPUT_LVDS: - is_lvds = true; - break; - case INTEL_OUTPUT_DISPLAYPORT: - is_dp = true; - break; - case INTEL_OUTPUT_EDP: - is_dp = true; - if (!intel_encoder_is_pch_edp(&encoder->base)) - is_cpu_edp = true; - break; - } - - num_connectors++; - } - - if (is_cpu_edp) - intel_crtc->cpu_transcoder = TRANSCODER_EDP; - else - intel_crtc->cpu_transcoder = pipe; - - /* We are not sure yet this won't happen. */ - WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", - INTEL_PCH_TYPE(dev)); - - WARN(num_connectors != 1, "%d connectors attached to pipe %c\n", - num_connectors, pipe_name(pipe)); - - WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) & - (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE)); - - WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE); - - if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock)) - return -EINVAL; - - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { - ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, - &has_reduced_clock, - &reduced_clock); - if (!ok) { - DRM_ERROR("Couldn't find PLL settings for mode!\n"); - return -EINVAL; - } - } - - /* Ensure that the cursor is valid for the new mode before changing... */ - intel_crtc_update_cursor(crtc, true); - - /* determine panel color depth */ - dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, - adjusted_mode); - if (is_lvds && dev_priv->lvds_dither) - dither = true; - - DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); - drm_mode_debug_printmodeline(mode); - - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { - fp = clock.n << 16 | clock.m1 << 8 | clock.m2; - if (has_reduced_clock) - fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | - reduced_clock.m2; - - dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, - fp); - - /* CPU eDP is the only output that doesn't need a PCH PLL of its - * own on pre-Haswell/LPT generation */ - if (!is_cpu_edp) { - struct intel_pch_pll *pll; - - pll = intel_get_pch_pll(intel_crtc, dpll, fp); - if (pll == NULL) { - DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n", - pipe); - return -EINVAL; - } - } else - intel_put_pch_pll(intel_crtc); - - /* The LVDS pin pair needs to be on before the DPLLs are - * enabled. This is an exception to the general rule that - * mode_set doesn't turn things on. - */ - if (is_lvds) { - temp = I915_READ(PCH_LVDS); - temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; - if (HAS_PCH_CPT(dev)) { - temp &= ~PORT_TRANS_SEL_MASK; - temp |= PORT_TRANS_SEL_CPT(pipe); - } else { - if (pipe == 1) - temp |= LVDS_PIPEB_SELECT; - else - temp &= ~LVDS_PIPEB_SELECT; - } - - /* set the corresponsding LVDS_BORDER bit */ - temp |= dev_priv->lvds_border_bits; - /* Set the B0-B3 data pairs corresponding to whether - * we're going to set the DPLLs for dual-channel mode or - * not. - */ - if (clock.p2 == 7) - temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; - else - temp &= ~(LVDS_B0B3_POWER_UP | - LVDS_CLKB_POWER_UP); - - /* It would be nice to set 24 vs 18-bit mode - * (LVDS_A3_POWER_UP) appropriately here, but we need to - * look more thoroughly into how panels behave in the - * two modes. - */ - temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); - if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) - temp |= LVDS_HSYNC_POLARITY; - if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) - temp |= LVDS_VSYNC_POLARITY; - I915_WRITE(PCH_LVDS, temp); - } - } - - if (is_dp && !is_cpu_edp) { - intel_dp_set_m_n(crtc, mode, adjusted_mode); - } else { - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { - /* For non-DP output, clear any trans DP clock recovery - * setting.*/ - I915_WRITE(TRANSDATA_M1(pipe), 0); - I915_WRITE(TRANSDATA_N1(pipe), 0); - I915_WRITE(TRANSDPLINK_M1(pipe), 0); - I915_WRITE(TRANSDPLINK_N1(pipe), 0); - } - } - - intel_crtc->lowfreq_avail = false; - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { - if (intel_crtc->pch_pll) { - I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(intel_crtc->pch_pll->pll_reg); - udelay(150); - - /* The pixel multiplier can only be updated once the - * DPLL is enabled and the clocks are stable. - * - * So write it again. - */ - I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); - } - - if (intel_crtc->pch_pll) { - if (is_lvds && has_reduced_clock && i915_powersave) { - I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); - intel_crtc->lowfreq_avail = true; - } else { - I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); - } - } - } - - intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); - - if (!is_dp || is_cpu_edp) - ironlake_set_m_n(crtc, mode, adjusted_mode); - - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) - if (is_cpu_edp) - ironlake_set_pll_edp(crtc, adjusted_mode->clock); - - haswell_set_pipeconf(crtc, adjusted_mode, dither); - - /* Set up the display plane register */ - I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); - POSTING_READ(DSPCNTR(plane)); - - ret = intel_pipe_set_base(crtc, x, y, fb); - - intel_update_watermarks(dev); - - intel_update_linetime_watermarks(dev, pipe, adjusted_mode); - return ret; } @@ -5809,8 +5103,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_encoder_helper_funcs *encoder_funcs; - struct intel_encoder *encoder; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int ret; @@ -5821,19 +5113,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, x, y, fb); drm_vblank_post_modeset(dev, pipe); - if (ret != 0) - return ret; - - for_each_encoder_on_crtc(dev, crtc, encoder) { - DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", - encoder->base.base.id, - drm_get_encoder_name(&encoder->base), - mode->base.id, mode->name); - encoder_funcs = encoder->base.helper_private; - encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode); - } - - return 0; + return ret; } static bool intel_eld_uptodate(struct drm_connector *connector, @@ -6469,7 +5749,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, int depth, int bpp) { struct drm_i915_gem_object *obj; - struct drm_mode_fb_cmd2 mode_cmd = { 0 }; + struct drm_mode_fb_cmd2 mode_cmd; obj = i915_gem_alloc_object(dev, intel_framebuffer_size_for_mode(mode, bpp)); @@ -6599,19 +5879,24 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); if (IS_ERR(fb)) { DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); - return false; + goto fail; } if (!intel_set_mode(crtc, mode, 0, 0, fb)) { DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); if (old->release_fb) old->release_fb->funcs->destroy(old->release_fb); - return false; + goto fail; } /* let the connector get through one full cycle before testing */ intel_wait_for_vblank(dev, intel_crtc->pipe); + return true; +fail: + connector->encoder = NULL; + encoder->crtc = NULL; + return false; } void intel_release_load_detect_pipe(struct drm_connector *connector, @@ -6736,12 +6021,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; + int pipe = intel_crtc->pipe; struct drm_display_mode *mode; - int htot = I915_READ(HTOTAL(cpu_transcoder)); - int hsync = I915_READ(HSYNC(cpu_transcoder)); - int vtot = I915_READ(VTOTAL(cpu_transcoder)); - int vsync = I915_READ(VSYNC(cpu_transcoder)); + int htot = I915_READ(HTOTAL(pipe)); + int hsync = I915_READ(HSYNC(pipe)); + int vtot = I915_READ(VTOTAL(pipe)); + int vsync = I915_READ(VSYNC(pipe)); mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) @@ -6898,19 +6183,14 @@ static void intel_unpin_work_fn(struct work_struct *__work) { struct intel_unpin_work *work = container_of(__work, struct intel_unpin_work, work); - struct drm_device *dev = work->crtc->dev; - mutex_lock(&dev->struct_mutex); + mutex_lock(&work->dev->struct_mutex); intel_unpin_fb_obj(work->old_fb_obj); drm_gem_object_unreference(&work->pending_flip_obj->base); drm_gem_object_unreference(&work->old_fb_obj->base); - intel_update_fbc(dev); - mutex_unlock(&dev->struct_mutex); - - BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); - atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); - + intel_update_fbc(work->dev); + mutex_unlock(&work->dev->struct_mutex); kfree(work); } @@ -6921,6 +6201,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; struct drm_i915_gem_object *obj; + struct drm_pending_vblank_event *e; + struct timeval tvbl; unsigned long flags; /* Ignore early vblank irqs */ @@ -6936,8 +6218,17 @@ static void do_intel_finish_page_flip(struct drm_device *dev, intel_crtc->unpin_work = NULL; - if (work->event) - drm_send_vblank_event(dev, intel_crtc->pipe, work->event); + if (work->event) { + e = work->event; + e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); + + e->event.tv_sec = tvbl.tv_sec; + e->event.tv_usec = tvbl.tv_usec; + + list_add_tail(&e->base.link, + &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + } drm_vblank_put(dev, intel_crtc->pipe); @@ -6947,9 +6238,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev, atomic_clear_mask(1 << intel_crtc->plane, &obj->pending_flip.counter); - wake_up(&dev_priv->pending_flip_queue); - queue_work(dev_priv->wq, &work->work); + wake_up(&dev_priv->pending_flip_queue); + schedule_work(&work->work); trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); } @@ -7250,7 +6541,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return -ENOMEM; work->event = event; - work->crtc = crtc; + work->dev = crtc->dev; intel_fb = to_intel_framebuffer(crtc->fb); work->old_fb_obj = intel_fb->obj; INIT_WORK(&work->work, intel_unpin_work_fn); @@ -7275,9 +6566,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; - if (atomic_read(&intel_crtc->unpin_work_count) >= 2) - flush_workqueue(dev_priv->wq); - ret = i915_mutex_lock_interruptible(dev); if (ret) goto cleanup; @@ -7296,7 +6584,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, * the flip occurs and the object is no longer visible. */ atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); - atomic_inc(&intel_crtc->unpin_work_count); ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); if (ret) @@ -7311,7 +6598,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return 0; cleanup_pending: - atomic_dec(&intel_crtc->unpin_work_count); atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); drm_gem_object_unreference(&work->old_fb_obj->base); drm_gem_object_unreference(&obj->base); @@ -7607,7 +6893,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) dev->mode_config.dpms_property; connector->dpms = DRM_MODE_DPMS_ON; - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, dpms_property, DRM_MODE_DPMS_ON); @@ -7729,6 +7015,8 @@ bool intel_set_mode(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; + struct drm_encoder_helper_funcs *encoder_funcs; + struct drm_encoder *encoder; struct intel_crtc *intel_crtc; unsigned disable_pipes, prepare_pipes, modeset_pipes; bool ret = true; @@ -7773,9 +7061,6 @@ bool intel_set_mode(struct drm_crtc *crtc, * update the the output configuration. */ intel_modeset_update_state(dev, prepare_pipes); - if (dev_priv->display.modeset_global_resources) - dev_priv->display.modeset_global_resources(dev); - /* Set up the DPLL and any encoders state that needs to adjust or depend * on the DPLL. */ @@ -7785,6 +7070,18 @@ bool intel_set_mode(struct drm_crtc *crtc, x, y, fb); if (!ret) goto done; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + + if (encoder->crtc != &intel_crtc->base) + continue; + + DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", + encoder->base.id, drm_get_encoder_name(encoder), + mode->base.id, mode->name); + encoder_funcs = encoder->helper_private; + encoder_funcs->mode_set(encoder, mode, adjusted_mode); + } } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ @@ -8123,12 +7420,6 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { .page_flip = intel_crtc_page_flip, }; -static void intel_cpu_pll_init(struct drm_device *dev) -{ - if (IS_HASWELL(dev)) - intel_ddi_pll_init(dev); -} - static void intel_pch_pll_init(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -8168,7 +7459,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) /* Swap pipes & planes for FBC on pre-965 */ intel_crtc->pipe = pipe; intel_crtc->plane = pipe; - intel_crtc->cpu_transcoder = pipe; if (IS_MOBILE(dev) && IS_GEN3(dev)) { DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); intel_crtc->plane = !pipe; @@ -8261,9 +7551,17 @@ static void intel_setup_outputs(struct drm_device *dev) I915_WRITE(PFIT_CONTROL, 0); } - if (!(IS_HASWELL(dev) && - (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES))) - intel_crt_init(dev); + if (HAS_PCH_SPLIT(dev)) { + dpd_is_edp = intel_dpd_is_edp(dev); + + if (has_edp_a(dev)) + intel_dp_init(dev, DP_A, PORT_A); + + if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) + intel_dp_init(dev, PCH_DP_D, PORT_D); + } + + intel_crt_init(dev); if (IS_HASWELL(dev)) { int found; @@ -8286,10 +7584,6 @@ static void intel_setup_outputs(struct drm_device *dev) intel_ddi_init(dev, PORT_D); } else if (HAS_PCH_SPLIT(dev)) { int found; - dpd_is_edp = intel_dpd_is_edp(dev); - - if (has_edp_a(dev)) - intel_dp_init(dev, DP_A, PORT_A); if (I915_READ(HDMIB) & PORT_DETECTED) { /* PCH SDVOB multiplex with HDMIB */ @@ -8309,15 +7603,11 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(PCH_DP_C) & DP_DETECTED) intel_dp_init(dev, PCH_DP_C, PORT_C); - if (I915_READ(PCH_DP_D) & DP_DETECTED) + if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) intel_dp_init(dev, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev)) { int found; - /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ - if (I915_READ(DP_C) & DP_DETECTED) - intel_dp_init(dev, DP_C, PORT_C); - if (I915_READ(SDVOB) & PORT_DETECTED) { /* SDVOB multiplex with HDMIB */ found = intel_sdvo_init(dev, SDVOB, true); @@ -8330,6 +7620,9 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(SDVOC) & PORT_DETECTED) intel_hdmi_init(dev, SDVOC, PORT_C); + /* Shares lanes with HDMI on SDVOC */ + if (I915_READ(DP_C) & DP_DETECTED) + intel_dp_init(dev, DP_C, PORT_C); } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { bool found = false; @@ -8385,8 +7678,6 @@ static void intel_setup_outputs(struct drm_device *dev) if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) ironlake_init_pch_refclk(dev); - - drm_helper_move_panel_connectors_to_head(dev); } static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) @@ -8427,51 +7718,27 @@ int intel_framebuffer_init(struct drm_device *dev, if (mode_cmd->pitches[0] & 63) return -EINVAL; - /* FIXME <= Gen4 stride limits are bit unclear */ - if (mode_cmd->pitches[0] > 32768) - return -EINVAL; - - if (obj->tiling_mode != I915_TILING_NONE && - mode_cmd->pitches[0] != obj->stride) - return -EINVAL; - - /* Reject formats not supported by any plane early. */ switch (mode_cmd->pixel_format) { - case DRM_FORMAT_C8: + case DRM_FORMAT_RGB332: case DRM_FORMAT_RGB565: case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ARGB8888: - break; - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_ARGB1555: - if (INTEL_INFO(dev)->gen > 3) - return -EINVAL; - break; case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_ARGB8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ABGR2101010: - if (INTEL_INFO(dev)->gen < 4) - return -EINVAL; + /* RGB formats are common across chipsets */ break; case DRM_FORMAT_YUYV: case DRM_FORMAT_UYVY: case DRM_FORMAT_YVYU: case DRM_FORMAT_VYUY: - if (INTEL_INFO(dev)->gen < 6) - return -EINVAL; break; default: - DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); + DRM_DEBUG_KMS("unsupported pixel format %u\n", + mode_cmd->pixel_format); return -EINVAL; } - /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ - if (mode_cmd->offsets[0] != 0) - return -EINVAL; - ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { DRM_ERROR("framebuffer init failed %d\n", ret); @@ -8509,13 +7776,7 @@ static void intel_init_display(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; /* We always want a DPMS function */ - if (IS_HASWELL(dev)) { - dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; - dev_priv->display.crtc_enable = haswell_crtc_enable; - dev_priv->display.crtc_disable = haswell_crtc_disable; - dev_priv->display.off = haswell_crtc_off; - dev_priv->display.update_plane = ironlake_update_plane; - } else if (HAS_PCH_SPLIT(dev)) { + if (HAS_PCH_SPLIT(dev)) { dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; dev_priv->display.crtc_enable = ironlake_crtc_enable; dev_priv->display.crtc_disable = ironlake_crtc_disable; @@ -8566,8 +7827,6 @@ static void intel_init_display(struct drm_device *dev) /* FIXME: detect B0+ stepping and use auto training */ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; dev_priv->display.write_eld = ironlake_write_eld; - dev_priv->display.modeset_global_resources = - ivb_modeset_global_resources; } else if (IS_HASWELL(dev)) { dev_priv->display.fdi_link_train = hsw_fdi_link_train; dev_priv->display.write_eld = haswell_write_eld; @@ -8799,7 +8058,6 @@ void intel_modeset_init(struct drm_device *dev) DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); } - intel_cpu_pll_init(dev); intel_pch_pll_init(dev); /* Just disable it once at startup */ @@ -8869,7 +8127,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) u32 reg; /* Clear any frame start delays used for debugging left by the BIOS */ - reg = PIPECONF(crtc->cpu_transcoder); + reg = PIPECONF(crtc->pipe); I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); /* We need to sanitize the plane -> pipe mapping first because this will @@ -8997,35 +8255,10 @@ void intel_modeset_setup_hw_state(struct drm_device *dev) struct intel_encoder *encoder; struct intel_connector *connector; - if (IS_HASWELL(dev)) { - tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); - - if (tmp & TRANS_DDI_FUNC_ENABLE) { - switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { - case TRANS_DDI_EDP_INPUT_A_ON: - case TRANS_DDI_EDP_INPUT_A_ONOFF: - pipe = PIPE_A; - break; - case TRANS_DDI_EDP_INPUT_B_ONOFF: - pipe = PIPE_B; - break; - case TRANS_DDI_EDP_INPUT_C_ONOFF: - pipe = PIPE_C; - break; - } - - crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - crtc->cpu_transcoder = TRANSCODER_EDP; - - DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n", - pipe_name(pipe)); - } - } - for_each_pipe(pipe) { crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - tmp = I915_READ(PIPECONF(crtc->cpu_transcoder)); + tmp = I915_READ(PIPECONF(pipe)); if (tmp & PIPECONF_ENABLE) crtc->active = true; else @@ -9038,9 +8271,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev) crtc->active ? "enabled" : "disabled"); } - if (IS_HASWELL(dev)) - intel_ddi_setup_hw_pll_state(dev); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { pipe = 0; @@ -9090,8 +8320,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev) intel_modeset_update_staged_output_state(dev); intel_modeset_check_state(dev); - - drm_mode_config_reset(dev); } void intel_modeset_gem_init(struct drm_device *dev) @@ -9219,7 +8447,6 @@ intel_display_capture_error_state(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_display_error_state *error; - enum transcoder cpu_transcoder; int i; error = kmalloc(sizeof(*error), GFP_ATOMIC); @@ -9227,8 +8454,6 @@ intel_display_capture_error_state(struct drm_device *dev) return NULL; for_each_pipe(i) { - cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); - error->cursor[i].control = I915_READ(CURCNTR(i)); error->cursor[i].position = I915_READ(CURPOS(i)); error->cursor[i].base = I915_READ(CURBASE(i)); @@ -9243,14 +8468,14 @@ intel_display_capture_error_state(struct drm_device *dev) error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); } - error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); + error->pipe[i].conf = I915_READ(PIPECONF(i)); error->pipe[i].source = I915_READ(PIPESRC(i)); - error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); - error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); - error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); - error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); - error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); - error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); + error->pipe[i].htotal = I915_READ(HTOTAL(i)); + error->pipe[i].hblank = I915_READ(HBLANK(i)); + error->pipe[i].hsync = I915_READ(HSYNC(i)); + error->pipe[i].vtotal = I915_READ(VTOTAL(i)); + error->pipe[i].vblank = I915_READ(VBLANK(i)); + error->pipe[i].vsync = I915_READ(VSYNC(i)); } return error; diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c index 1b63d55318a0..368ed8ef1600 100644 --- a/trunk/drivers/gpu/drm/i915/intel_dp.c +++ b/trunk/drivers/gpu/drm/i915/intel_dp.c @@ -36,6 +36,8 @@ #include #include "i915_drv.h" +#define DP_RECEIVER_CAP_SIZE 0xf +#define DP_LINK_STATUS_SIZE 6 #define DP_LINK_CHECK_TIMEOUT (10 * 1000) /** @@ -47,9 +49,7 @@ */ static bool is_edp(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - - return intel_dig_port->base.type == INTEL_OUTPUT_EDP; + return intel_dp->base.type == INTEL_OUTPUT_EDP; } /** @@ -76,16 +76,15 @@ static bool is_cpu_edp(struct intel_dp *intel_dp) return is_edp(intel_dp) && !is_pch_edp(intel_dp); } -static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) +static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - - return intel_dig_port->base.base.dev; + return container_of(encoder, struct intel_dp, base.base); } static struct intel_dp *intel_attached_dp(struct drm_connector *connector) { - return enc_to_intel_dp(&intel_attached_encoder(connector)->base); + return container_of(intel_attached_encoder(connector), + struct intel_dp, base); } /** @@ -107,31 +106,48 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) return is_pch_edp(intel_dp); } +static void intel_dp_start_link_train(struct intel_dp *intel_dp); +static void intel_dp_complete_link_train(struct intel_dp *intel_dp); static void intel_dp_link_down(struct intel_dp *intel_dp); void intel_edp_link_config(struct intel_encoder *intel_encoder, int *lane_num, int *link_bw) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); *lane_num = intel_dp->lane_count; - *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); + if (intel_dp->link_bw == DP_LINK_BW_1_62) + *link_bw = 162000; + else if (intel_dp->link_bw == DP_LINK_BW_2_7) + *link_bw = 270000; } int intel_edp_target_clock(struct intel_encoder *intel_encoder, struct drm_display_mode *mode) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); - struct intel_connector *intel_connector = intel_dp->attached_connector; + struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); - if (intel_connector->panel.fixed_mode) - return intel_connector->panel.fixed_mode->clock; + if (intel_dp->panel_fixed_mode) + return intel_dp->panel_fixed_mode->clock; else return mode->clock; } +static int +intel_dp_max_lane_count(struct intel_dp *intel_dp) +{ + int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; + switch (max_lane_count) { + case 1: case 2: case 4: + break; + default: + max_lane_count = 4; + } + return max_lane_count; +} + static int intel_dp_max_link_bw(struct intel_dp *intel_dp) { @@ -192,7 +208,7 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp, bool adjust_mode) { int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); - int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); + int max_lanes = intel_dp_max_lane_count(intel_dp); int max_rate, mode_rate; mode_rate = intel_dp_link_required(mode->clock, 24); @@ -218,14 +234,12 @@ intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_dp *intel_dp = intel_attached_dp(connector); - struct intel_connector *intel_connector = to_intel_connector(connector); - struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; - if (is_edp(intel_dp) && fixed_mode) { - if (mode->hdisplay > fixed_mode->hdisplay) + if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { + if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) return MODE_PANEL; - if (mode->vdisplay > fixed_mode->vdisplay) + if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) return MODE_PANEL; } @@ -271,10 +285,6 @@ intel_hrawclk(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t clkcfg; - /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ - if (IS_VALLEYVIEW(dev)) - return 200; - clkcfg = I915_READ(CLKCFG); switch (clkcfg & CLKCFG_FSB_MASK) { case CLKCFG_FSB_400: @@ -300,7 +310,7 @@ intel_hrawclk(struct drm_device *dev) static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; @@ -308,7 +318,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; @@ -317,7 +327,7 @@ static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) static void intel_dp_check_edp(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; if (!is_edp(intel_dp)) @@ -336,8 +346,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, uint8_t *recv, int recv_size) { uint32_t output_reg = intel_dp->output_reg; - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t ch_ctl = output_reg + 0x10; uint32_t ch_data = ch_ctl + 4; @@ -347,29 +356,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, uint32_t aux_clock_divider; int try, precharge; - if (IS_HASWELL(dev)) { - switch (intel_dig_port->port) { - case PORT_A: - ch_ctl = DPA_AUX_CH_CTL; - ch_data = DPA_AUX_CH_DATA1; - break; - case PORT_B: - ch_ctl = PCH_DPB_AUX_CH_CTL; - ch_data = PCH_DPB_AUX_CH_DATA1; - break; - case PORT_C: - ch_ctl = PCH_DPC_AUX_CH_CTL; - ch_data = PCH_DPC_AUX_CH_DATA1; - break; - case PORT_D: - ch_ctl = PCH_DPD_AUX_CH_CTL; - ch_data = PCH_DPD_AUX_CH_DATA1; - break; - default: - BUG(); - } - } - intel_dp_check_edp(intel_dp); /* The clock divider is based off the hrawclk, * and would like to run at 2MHz. So, take the @@ -379,16 +365,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, * clock divider. */ if (is_cpu_edp(intel_dp)) { - if (IS_HASWELL(dev)) - aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; - else if (IS_VALLEYVIEW(dev)) - aux_clock_divider = 100; - else if (IS_GEN6(dev) || IS_GEN7(dev)) + if (IS_GEN6(dev) || IS_GEN7(dev)) aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ else aux_clock_divider = 225; /* eDP input clock at 450Mhz */ } else if (HAS_PCH_SPLIT(dev)) - aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); + aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */ else aux_clock_divider = intel_hrawclk(dev) / 2; @@ -660,6 +642,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, return -EREMOTEIO; } +static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); +static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); + static int intel_dp_i2c_init(struct intel_dp *intel_dp, struct intel_connector *intel_connector, const char *name) @@ -685,25 +670,22 @@ intel_dp_i2c_init(struct intel_dp *intel_dp, return ret; } -bool +static bool intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_connector *intel_connector = intel_dp->attached_connector; int lane_count, clock; - int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); + int max_lane_count = intel_dp_max_lane_count(intel_dp); int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; int bpp, mode_rate; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; - if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { - intel_fixed_panel_mode(intel_connector->panel.fixed_mode, - adjusted_mode); - intel_pch_panel_fitting(dev, - intel_connector->panel.fitting_mode, + if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { + intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); + intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, mode, adjusted_mode); } @@ -780,23 +762,21 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; - struct intel_encoder *intel_encoder; - struct intel_dp *intel_dp; + struct intel_encoder *encoder; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int lane_count = 4; struct intel_dp_m_n m_n; int pipe = intel_crtc->pipe; - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; /* * Find the lane count in the intel_encoder private */ - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { - intel_dp = enc_to_intel_dp(&intel_encoder->base); + for_each_encoder_on_crtc(dev, crtc, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || - intel_encoder->type == INTEL_OUTPUT_EDP) + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || + intel_dp->base.type == INTEL_OUTPUT_EDP) { lane_count = intel_dp->lane_count; break; @@ -811,46 +791,23 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, intel_dp_compute_m_n(intel_crtc->bpp, lane_count, mode->clock, adjusted_mode->clock, &m_n); - if (IS_HASWELL(dev)) { - I915_WRITE(PIPE_DATA_M1(cpu_transcoder), - TU_SIZE(m_n.tu) | m_n.gmch_m); - I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); - I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); - I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); - } else if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(TRANSDATA_M1(pipe), + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); - } else if (IS_VALLEYVIEW(dev)) { - I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); - I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); - I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); - I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); } else { I915_WRITE(PIPE_GMCH_DATA_M(pipe), - TU_SIZE(m_n.tu) | m_n.gmch_m); + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); } } -void intel_dp_init_link_config(struct intel_dp *intel_dp) -{ - memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); - intel_dp->link_configuration[0] = intel_dp->link_bw; - intel_dp->link_configuration[1] = intel_dp->lane_count; - intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; - /* - * Check for DPCD version > 1.1 and enhanced framing support - */ - if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && - (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { - intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - } -} - static void intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -858,7 +815,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_crtc *crtc = encoder->crtc; + struct drm_crtc *crtc = intel_dp->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); /* @@ -903,12 +860,21 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; intel_write_eld(encoder, adjusted_mode); } - - intel_dp_init_link_config(intel_dp); + memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); + intel_dp->link_configuration[0] = intel_dp->link_bw; + intel_dp->link_configuration[1] = intel_dp->lane_count; + intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; + /* + * Check for DPCD version > 1.1 and enhanced framing support + */ + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && + (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { + intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + } /* Split out the IBX/CPU vs CPT settings */ - if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { + if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) intel_dp->DP |= DP_SYNC_HS_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) @@ -965,7 +931,7 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp, u32 mask, u32 value) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", @@ -1012,9 +978,9 @@ static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) return control; } -void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) +static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; @@ -1053,7 +1019,7 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; @@ -1075,14 +1041,14 @@ static void ironlake_panel_vdd_work(struct work_struct *__work) { struct intel_dp *intel_dp = container_of(to_delayed_work(__work), struct intel_dp, panel_vdd_work); - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; mutex_lock(&dev->mode_config.mutex); ironlake_panel_vdd_off_sync(intel_dp); mutex_unlock(&dev->mode_config.mutex); } -void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) +static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) { if (!is_edp(intel_dp)) return; @@ -1105,9 +1071,9 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) } } -void ironlake_edp_panel_on(struct intel_dp *intel_dp) +static void ironlake_edp_panel_on(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; @@ -1147,9 +1113,9 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp) } } -void ironlake_edp_panel_off(struct intel_dp *intel_dp) +static void ironlake_edp_panel_off(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; @@ -1172,12 +1138,10 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp) ironlake_wait_panel_off(intel_dp); } -void ironlake_edp_backlight_on(struct intel_dp *intel_dp) +static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe; u32 pp; if (!is_edp(intel_dp)) @@ -1195,21 +1159,17 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp) pp |= EDP_BLC_ENABLE; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); - - intel_panel_enable_backlight(dev, pipe); } -void ironlake_edp_backlight_off(struct intel_dp *intel_dp) +static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; if (!is_edp(intel_dp)) return; - intel_panel_disable_backlight(dev); - DRM_DEBUG_KMS("\n"); pp = ironlake_get_pp_control(dev_priv); pp &= ~EDP_BLC_ENABLE; @@ -1220,9 +1180,8 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp) static void ironlake_edp_pll_on(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_crtc *crtc = intel_dig_port->base.base.crtc; - struct drm_device *dev = crtc->dev; + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_crtc *crtc = intel_dp->base.base.crtc; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; @@ -1246,9 +1205,8 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp) static void ironlake_edp_pll_off(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_crtc *crtc = intel_dig_port->base.base.crtc; - struct drm_device *dev = crtc->dev; + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_crtc *crtc = intel_dp->base.base.crtc; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; @@ -1270,7 +1228,7 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp) } /* If the sink supports it, try to set the power state appropriately */ -void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) +static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) { int ret, i; @@ -1340,11 +1298,10 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, return true; } } - - DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", - intel_dp->output_reg); } + DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg); + return true; } @@ -1439,6 +1396,38 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ DP_LINK_STATUS_SIZE); } +static uint8_t +intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], + int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +static uint8_t +intel_get_adjust_request_voltage(uint8_t adjust_request[2], + int lane) +{ + int s = ((lane & 1) ? + DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : + DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); + uint8_t l = adjust_request[lane>>1]; + + return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; +} + +static uint8_t +intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], + int lane) +{ + int s = ((lane & 1) ? + DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : + DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); + uint8_t l = adjust_request[lane>>1]; + + return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; +} + + #if 0 static char *voltage_names[] = { "0.4V", "0.6V", "0.8V", "1.2V" @@ -1459,7 +1448,7 @@ static char *link_train_names[] = { static uint8_t intel_dp_voltage_max(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) return DP_TRAIN_VOLTAGE_SWING_800; @@ -1472,21 +1461,9 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) static uint8_t intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; - if (IS_HASWELL(dev)) { - switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { - case DP_TRAIN_VOLTAGE_SWING_400: - return DP_TRAIN_PRE_EMPHASIS_9_5; - case DP_TRAIN_VOLTAGE_SWING_600: - return DP_TRAIN_PRE_EMPHASIS_6; - case DP_TRAIN_VOLTAGE_SWING_800: - return DP_TRAIN_PRE_EMPHASIS_3_5; - case DP_TRAIN_VOLTAGE_SWING_1200: - default: - return DP_TRAIN_PRE_EMPHASIS_0; - } - } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { + if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_400: return DP_TRAIN_PRE_EMPHASIS_6; @@ -1517,12 +1494,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST uint8_t v = 0; uint8_t p = 0; int lane; + uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); uint8_t voltage_max; uint8_t preemph_max; for (lane = 0; lane < intel_dp->lane_count; lane++) { - uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); - uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); + uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); if (this_v > v) v = this_v; @@ -1639,38 +1617,52 @@ intel_gen7_edp_signal_levels(uint8_t train_set) } } -/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ -static uint32_t -intel_dp_signal_levels_hsw(uint8_t train_set) +static uint8_t +intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) { - int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | - DP_TRAIN_PRE_EMPHASIS_MASK); - switch (signal_levels) { - case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: - return DDI_BUF_EMP_400MV_0DB_HSW; - case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: - return DDI_BUF_EMP_400MV_3_5DB_HSW; - case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: - return DDI_BUF_EMP_400MV_6DB_HSW; - case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: - return DDI_BUF_EMP_400MV_9_5DB_HSW; + int s = (lane & 1) * 4; + uint8_t l = link_status[lane>>1]; - case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: - return DDI_BUF_EMP_600MV_0DB_HSW; - case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: - return DDI_BUF_EMP_600MV_3_5DB_HSW; - case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: - return DDI_BUF_EMP_600MV_6DB_HSW; + return (l >> s) & 0xf; +} - case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: - return DDI_BUF_EMP_800MV_0DB_HSW; - case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: - return DDI_BUF_EMP_800MV_3_5DB_HSW; - default: - DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" - "0x%x\n", signal_levels); - return DDI_BUF_EMP_400MV_0DB_HSW; +/* Check for clock recovery is done on all channels */ +static bool +intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) +{ + int lane; + uint8_t lane_status; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = intel_get_lane_status(link_status, lane); + if ((lane_status & DP_LANE_CR_DONE) == 0) + return false; + } + return true; +} + +/* Check to see if channel eq is done on all channels */ +#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ + DP_LANE_CHANNEL_EQ_DONE|\ + DP_LANE_SYMBOL_LOCKED) +static bool +intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) +{ + uint8_t lane_align; + uint8_t lane_status; + int lane; + + lane_align = intel_dp_link_status(link_status, + DP_LANE_ALIGN_STATUS_UPDATED); + if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) + return false; + for (lane = 0; lane < intel_dp->lane_count; lane++) { + lane_status = intel_get_lane_status(link_status, lane); + if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) + return false; } + return true; } static bool @@ -1678,49 +1670,11 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, uint32_t dp_reg_value, uint8_t dp_train_pat) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - enum port port = intel_dig_port->port; int ret; - uint32_t temp; - - if (IS_HASWELL(dev)) { - temp = I915_READ(DP_TP_CTL(port)); - - if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) - temp |= DP_TP_CTL_SCRAMBLE_DISABLE; - else - temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; - - temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; - switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { - case DP_TRAINING_PATTERN_DISABLE: - temp |= DP_TP_CTL_LINK_TRAIN_IDLE; - I915_WRITE(DP_TP_CTL(port), temp); - - if (wait_for((I915_READ(DP_TP_STATUS(port)) & - DP_TP_STATUS_IDLE_DONE), 1)) - DRM_ERROR("Timed out waiting for DP idle patterns\n"); - temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; - temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; - - break; - case DP_TRAINING_PATTERN_1: - temp |= DP_TP_CTL_LINK_TRAIN_PAT1; - break; - case DP_TRAINING_PATTERN_2: - temp |= DP_TP_CTL_LINK_TRAIN_PAT2; - break; - case DP_TRAINING_PATTERN_3: - temp |= DP_TP_CTL_LINK_TRAIN_PAT3; - break; - } - I915_WRITE(DP_TP_CTL(port), temp); - - } else if (HAS_PCH_CPT(dev) && - (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { + if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { @@ -1780,20 +1734,16 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, } /* Enable corresponding port and start training pattern 1 */ -void +static void intel_dp_start_link_train(struct intel_dp *intel_dp) { - struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; - struct drm_device *dev = encoder->dev; + struct drm_device *dev = intel_dp->base.base.dev; int i; uint8_t voltage; bool clock_recovery = false; int voltage_tries, loop_tries; uint32_t DP = intel_dp->DP; - if (IS_HASWELL(dev)) - intel_ddi_prepare_link_retrain(encoder); - /* Write the link configuration data */ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, intel_dp->link_configuration, @@ -1811,11 +1761,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) uint8_t link_status[DP_LINK_STATUS_SIZE]; uint32_t signal_levels; - if (IS_HASWELL(dev)) { - signal_levels = intel_dp_signal_levels_hsw( - intel_dp->train_set[0]); - DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; - } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { + + if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { @@ -1823,24 +1770,23 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); + DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } - DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", - signal_levels); - /* Set training pattern 1 */ if (!intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE)) break; + /* Set training pattern 1 */ - drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); + udelay(100); if (!intel_dp_get_link_status(intel_dp, link_status)) { DRM_ERROR("failed to get link status\n"); break; } - if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { + if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { DRM_DEBUG_KMS("clock recovery OK\n"); clock_recovery = true; break; @@ -1879,10 +1825,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) intel_dp->DP = DP; } -void +static void intel_dp_complete_link_train(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; bool channel_eq = false; int tries, cr_tries; uint32_t DP = intel_dp->DP; @@ -1902,10 +1848,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) break; } - if (IS_HASWELL(dev)) { - signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]); - DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; - } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { + if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { @@ -1922,18 +1865,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) DP_LINK_SCRAMBLING_DISABLE)) break; - drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); + udelay(400); if (!intel_dp_get_link_status(intel_dp, link_status)) break; /* Make sure clock is still ok */ - if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { + if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { intel_dp_start_link_train(intel_dp); cr_tries++; continue; } - if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { + if (intel_channel_eq_ok(intel_dp, link_status)) { channel_eq = true; break; } @@ -1952,38 +1895,16 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) ++tries; } - if (channel_eq) - DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); - intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); } static void intel_dp_link_down(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t DP = intel_dp->DP; - /* - * DDI code has a strict mode set sequence and we should try to respect - * it, otherwise we might hang the machine in many different ways. So we - * really should be disabling the port only on a complete crtc_disable - * sequence. This function is just called under two conditions on DDI - * code: - * - Link train failed while doing crtc_enable, and on this case we - * really should respect the mode set sequence and wait for a - * crtc_disable. - * - Someone turned the monitor off and intel_dp_check_link_status - * called us. We don't need to disable the whole port on this case, so - * when someone turns the monitor on again, - * intel_ddi_prepare_link_retrain will take care of redoing the link - * train. - */ - if (IS_HASWELL(dev)) - return; - if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) return; @@ -2002,7 +1923,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) if (HAS_PCH_IBX(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { - struct drm_crtc *crtc = intel_dig_port->base.base.crtc; + struct drm_crtc *crtc = intel_dp->base.base.crtc; /* Hardware workaround: leaving our transcoder select * set to transcoder B while it's off will prevent the @@ -2103,7 +2024,7 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp) { /* NAK by default */ - intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); + intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK); } /* @@ -2115,17 +2036,16 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp) * 4. Check link status on receipt of hot-plug interrupt */ -void +static void intel_dp_check_link_status(struct intel_dp *intel_dp) { - struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; u8 sink_irq_vector; u8 link_status[DP_LINK_STATUS_SIZE]; - if (!intel_encoder->connectors_active) + if (!intel_dp->base.connectors_active) return; - if (WARN_ON(!intel_encoder->base.crtc)) + if (WARN_ON(!intel_dp->base.base.crtc)) return; /* Try to read receiver status if the link appears to be up */ @@ -2154,9 +2074,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); } - if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { + if (!intel_channel_eq_ok(intel_dp, link_status)) { DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", - drm_get_encoder_name(&intel_encoder->base)); + drm_get_encoder_name(&intel_dp->base.base)); intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); } @@ -2205,12 +2125,11 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) static enum drm_connector_status ironlake_dp_detect(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); enum drm_connector_status status; /* Can't disconnect eDP, but you can close the lid... */ if (is_edp(intel_dp)) { - status = intel_panel_detect(dev); + status = intel_panel_detect(intel_dp->base.base.dev); if (status == connector_status_unknown) status = connector_status_connected; return status; @@ -2222,7 +2141,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp) static enum drm_connector_status g4x_dp_detect(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t bit; @@ -2249,45 +2168,44 @@ g4x_dp_detect(struct intel_dp *intel_dp) static struct edid * intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) { - struct intel_connector *intel_connector = to_intel_connector(connector); - - /* use cached edid if we have one */ - if (intel_connector->edid) { - struct edid *edid; - int size; + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct edid *edid; + int size; - /* invalid edid */ - if (IS_ERR(intel_connector->edid)) + if (is_edp(intel_dp)) { + if (!intel_dp->edid) return NULL; - size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; + size = (intel_dp->edid->extensions + 1) * EDID_LENGTH; edid = kmalloc(size, GFP_KERNEL); if (!edid) return NULL; - memcpy(edid, intel_connector->edid, size); + memcpy(edid, intel_dp->edid, size); return edid; } - return drm_get_edid(connector, adapter); + edid = drm_get_edid(connector, adapter); + return edid; } static int intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) { - struct intel_connector *intel_connector = to_intel_connector(connector); - - /* use cached edid if we have one */ - if (intel_connector->edid) { - /* invalid edid */ - if (IS_ERR(intel_connector->edid)) - return 0; + struct intel_dp *intel_dp = intel_attached_dp(connector); + int ret; - return intel_connector_update_modes(connector, - intel_connector->edid); + if (is_edp(intel_dp)) { + drm_mode_connector_update_edid_property(connector, + intel_dp->edid); + ret = drm_add_edid_modes(connector, intel_dp->edid); + drm_edid_to_eld(connector, + intel_dp->edid); + return intel_dp->edid_mode_count; } - return intel_ddc_get_modes(connector, adapter); + ret = intel_ddc_get_modes(connector, adapter); + return ret; } @@ -2301,12 +2219,9 @@ static enum drm_connector_status intel_dp_detect(struct drm_connector *connector, bool force) { struct intel_dp *intel_dp = intel_attached_dp(connector); - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct intel_encoder *intel_encoder = &intel_dig_port->base; - struct drm_device *dev = connector->dev; + struct drm_device *dev = intel_dp->base.base.dev; enum drm_connector_status status; struct edid *edid = NULL; - char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; intel_dp->has_audio = false; @@ -2315,9 +2230,10 @@ intel_dp_detect(struct drm_connector *connector, bool force) else status = g4x_dp_detect(intel_dp); - hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), - 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); - DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); + DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n", + intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], + intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5], + intel_dp->dpcd[6], intel_dp->dpcd[7]); if (status != connector_status_connected) return status; @@ -2334,31 +2250,49 @@ intel_dp_detect(struct drm_connector *connector, bool force) } } - if (intel_encoder->type != INTEL_OUTPUT_EDP) - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; return connector_status_connected; } static int intel_dp_get_modes(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); - struct intel_connector *intel_connector = to_intel_connector(connector); - struct drm_device *dev = connector->dev; + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; int ret; /* We should parse the EDID data and find out if it has an audio sink */ ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); - if (ret) + if (ret) { + if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) { + struct drm_display_mode *newmode; + list_for_each_entry(newmode, &connector->probed_modes, + head) { + if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) { + intel_dp->panel_fixed_mode = + drm_mode_duplicate(dev, newmode); + break; + } + } + } return ret; + } - /* if eDP has no EDID, fall back to fixed mode */ - if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { - struct drm_display_mode *mode; - mode = drm_mode_duplicate(dev, - intel_connector->panel.fixed_mode); - if (mode) { + /* if eDP has no EDID, try to use fixed panel mode from VBT */ + if (is_edp(intel_dp)) { + /* initialize panel mode from VBT if available for eDP */ + if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) { + intel_dp->panel_fixed_mode = + drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); + if (intel_dp->panel_fixed_mode) { + intel_dp->panel_fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; + } + } + if (intel_dp->panel_fixed_mode) { + struct drm_display_mode *mode; + mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode); drm_mode_probed_add(connector, mode); return 1; } @@ -2388,12 +2322,10 @@ intel_dp_set_property(struct drm_connector *connector, uint64_t val) { struct drm_i915_private *dev_priv = connector->dev->dev_private; - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_encoder *intel_encoder = intel_attached_encoder(connector); - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + struct intel_dp *intel_dp = intel_attached_dp(connector); int ret; - ret = drm_object_property_set_value(&connector->base, property, val); + ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; @@ -2426,27 +2358,11 @@ intel_dp_set_property(struct drm_connector *connector, goto done; } - if (is_edp(intel_dp) && - property == connector->dev->mode_config.scaling_mode_property) { - if (val == DRM_MODE_SCALE_NONE) { - DRM_DEBUG_KMS("no scaling not supported\n"); - return -EINVAL; - } - - if (intel_connector->panel.fitting_mode == val) { - /* the eDP scaling property is not changed */ - return 0; - } - intel_connector->panel.fitting_mode = val; - - goto done; - } - return -EINVAL; done: - if (intel_encoder->base.crtc) { - struct drm_crtc *crtc = intel_encoder->base.crtc; + if (intel_dp->base.base.crtc) { + struct drm_crtc *crtc = intel_dp->base.base.crtc; intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } @@ -2459,33 +2375,27 @@ intel_dp_destroy(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct intel_dp *intel_dp = intel_attached_dp(connector); - struct intel_connector *intel_connector = to_intel_connector(connector); - if (!IS_ERR_OR_NULL(intel_connector->edid)) - kfree(intel_connector->edid); - - if (is_edp(intel_dp)) { + if (is_edp(intel_dp)) intel_panel_destroy_backlight(dev); - intel_panel_fini(&intel_connector->panel); - } drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); } -void intel_dp_encoder_destroy(struct drm_encoder *encoder) +static void intel_dp_encoder_destroy(struct drm_encoder *encoder) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); i2c_del_adapter(&intel_dp->adapter); drm_encoder_cleanup(encoder); if (is_edp(intel_dp)) { + kfree(intel_dp->edid); cancel_delayed_work_sync(&intel_dp->panel_vdd_work); ironlake_panel_vdd_off_sync(intel_dp); } - kfree(intel_dig_port); + kfree(intel_dp); } static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { @@ -2515,7 +2425,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { static void intel_dp_hot_plug(struct intel_encoder *intel_encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); intel_dp_check_link_status(intel_dp); } @@ -2525,14 +2435,13 @@ int intel_trans_dp_port_sel(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct intel_encoder *intel_encoder; - struct intel_dp *intel_dp; + struct intel_encoder *encoder; - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { - intel_dp = enc_to_intel_dp(&intel_encoder->base); + for_each_encoder_on_crtc(dev, crtc, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || - intel_encoder->type == INTEL_OUTPUT_EDP) + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || + intel_dp->base.type == INTEL_OUTPUT_EDP) return intel_dp->output_reg; } @@ -2562,191 +2471,78 @@ bool intel_dpd_is_edp(struct drm_device *dev) static void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) { - struct intel_connector *intel_connector = to_intel_connector(connector); - intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); - - if (is_edp(intel_dp)) { - drm_mode_create_scaling_mode_property(connector->dev); - drm_object_attach_property( - &connector->base, - connector->dev->mode_config.scaling_mode_property, - DRM_MODE_SCALE_ASPECT); - intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; - } -} - -static void -intel_dp_init_panel_power_sequencer(struct drm_device *dev, - struct intel_dp *intel_dp) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct edp_power_seq cur, vbt, spec, final; - u32 pp_on, pp_off, pp_div, pp; - - /* Workaround: Need to write PP_CONTROL with the unlock key as - * the very first thing. */ - pp = ironlake_get_pp_control(dev_priv); - I915_WRITE(PCH_PP_CONTROL, pp); - - pp_on = I915_READ(PCH_PP_ON_DELAYS); - pp_off = I915_READ(PCH_PP_OFF_DELAYS); - pp_div = I915_READ(PCH_PP_DIVISOR); - - /* Pull timing values out of registers */ - cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> - PANEL_POWER_UP_DELAY_SHIFT; - - cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> - PANEL_LIGHT_ON_DELAY_SHIFT; - - cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> - PANEL_LIGHT_OFF_DELAY_SHIFT; - - cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> - PANEL_POWER_DOWN_DELAY_SHIFT; - - cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> - PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; - - DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", - cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); - - vbt = dev_priv->edp.pps; - - /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of - * our hw here, which are all in 100usec. */ - spec.t1_t3 = 210 * 10; - spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ - spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ - spec.t10 = 500 * 10; - /* This one is special and actually in units of 100ms, but zero - * based in the hw (so we need to add 100 ms). But the sw vbt - * table multiplies it with 1000 to make it in units of 100usec, - * too. */ - spec.t11_t12 = (510 + 100) * 10; - - DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", - vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); - - /* Use the max of the register settings and vbt. If both are - * unset, fall back to the spec limits. */ -#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ - spec.field : \ - max(cur.field, vbt.field)) - assign_final(t1_t3); - assign_final(t8); - assign_final(t9); - assign_final(t10); - assign_final(t11_t12); -#undef assign_final - -#define get_delay(field) (DIV_ROUND_UP(final.field, 10)) - intel_dp->panel_power_up_delay = get_delay(t1_t3); - intel_dp->backlight_on_delay = get_delay(t8); - intel_dp->backlight_off_delay = get_delay(t9); - intel_dp->panel_power_down_delay = get_delay(t10); - intel_dp->panel_power_cycle_delay = get_delay(t11_t12); -#undef get_delay - - /* And finally store the new values in the power sequencer. */ - pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | - (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT); - pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | - (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT); - /* Compute the divisor for the pp clock, simply match the Bspec - * formula. */ - pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) - << PP_REFERENCE_DIVIDER_SHIFT; - pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000) - << PANEL_POWER_CYCLE_DELAY_SHIFT); - - /* Haswell doesn't have any port selection bits for the panel - * power sequencer any more. */ - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { - if (is_cpu_edp(intel_dp)) - pp_on |= PANEL_POWER_PORT_DP_A; - else - pp_on |= PANEL_POWER_PORT_DP_D; - } - - I915_WRITE(PCH_PP_ON_DELAYS, pp_on); - I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); - I915_WRITE(PCH_PP_DIVISOR, pp_div); - - - DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", - intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, - intel_dp->panel_power_cycle_delay); - - DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", - intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); - - DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", - I915_READ(PCH_PP_ON_DELAYS), - I915_READ(PCH_PP_OFF_DELAYS), - I915_READ(PCH_PP_DIVISOR)); } void -intel_dp_init_connector(struct intel_digital_port *intel_dig_port, - struct intel_connector *intel_connector) +intel_dp_init(struct drm_device *dev, int output_reg, enum port port) { - struct drm_connector *connector = &intel_connector->base; - struct intel_dp *intel_dp = &intel_dig_port->dp; - struct intel_encoder *intel_encoder = &intel_dig_port->base; - struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_display_mode *fixed_mode = NULL; - enum port port = intel_dig_port->port; + struct drm_connector *connector; + struct intel_dp *intel_dp; + struct intel_encoder *intel_encoder; + struct intel_connector *intel_connector; const char *name = NULL; int type; + intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL); + if (!intel_dp) + return; + + intel_dp->output_reg = output_reg; + intel_dp->port = port; /* Preserve the current hw state. */ intel_dp->DP = I915_READ(intel_dp->output_reg); - intel_dp->attached_connector = intel_connector; - if (HAS_PCH_SPLIT(dev) && port == PORT_D) + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_dp); + return; + } + intel_encoder = &intel_dp->base; + + if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) if (intel_dpd_is_edp(dev)) intel_dp->is_pch_edp = true; - /* - * FIXME : We need to initialize built-in panels before external panels. - * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup - */ - if (IS_VALLEYVIEW(dev) && port == PORT_C) { - type = DRM_MODE_CONNECTOR_eDP; - intel_encoder->type = INTEL_OUTPUT_EDP; - } else if (port == PORT_A || is_pch_edp(intel_dp)) { + if (output_reg == DP_A || is_pch_edp(intel_dp)) { type = DRM_MODE_CONNECTOR_eDP; intel_encoder->type = INTEL_OUTPUT_EDP; } else { - /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for - * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't - * rewrite it. - */ type = DRM_MODE_CONNECTOR_DisplayPort; + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; } + connector = &intel_connector->base; drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); connector->polled = DRM_CONNECTOR_POLL_HPD; - connector->interlace_allowed = true; - connector->doublescan_allowed = 0; + + intel_encoder->cloneable = false; INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, ironlake_panel_vdd_work); + intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + + connector->interlace_allowed = true; + connector->doublescan_allowed = 0; + + drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); + intel_connector_attach_encoder(intel_connector, intel_encoder); drm_sysfs_connector_add(connector); - if (IS_HASWELL(dev)) - intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; - else - intel_connector->get_hw_state = intel_connector_get_hw_state; - + intel_encoder->enable = intel_enable_dp; + intel_encoder->pre_enable = intel_pre_enable_dp; + intel_encoder->disable = intel_disable_dp; + intel_encoder->post_disable = intel_post_disable_dp; + intel_encoder->get_hw_state = intel_dp_get_hw_state; + intel_connector->get_hw_state = intel_connector_get_hw_state; /* Set up the DDC bus. */ switch (port) { @@ -2770,15 +2566,66 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, break; } - if (is_edp(intel_dp)) - intel_dp_init_panel_power_sequencer(dev, intel_dp); + /* Cache some DPCD data in the eDP case */ + if (is_edp(intel_dp)) { + struct edp_power_seq cur, vbt; + u32 pp_on, pp_off, pp_div; + + pp_on = I915_READ(PCH_PP_ON_DELAYS); + pp_off = I915_READ(PCH_PP_OFF_DELAYS); + pp_div = I915_READ(PCH_PP_DIVISOR); + + if (!pp_on || !pp_off || !pp_div) { + DRM_INFO("bad panel power sequencing delays, disabling panel\n"); + intel_dp_encoder_destroy(&intel_dp->base.base); + intel_dp_destroy(&intel_connector->base); + return; + } + + /* Pull timing values out of registers */ + cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> + PANEL_POWER_UP_DELAY_SHIFT; + + cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> + PANEL_LIGHT_ON_DELAY_SHIFT; + + cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> + PANEL_LIGHT_OFF_DELAY_SHIFT; + + cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> + PANEL_POWER_DOWN_DELAY_SHIFT; + + cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> + PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; + + DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", + cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); + + vbt = dev_priv->edp.pps; + + DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", + vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); + +#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10) + + intel_dp->panel_power_up_delay = get_delay(t1_t3); + intel_dp->backlight_on_delay = get_delay(t8); + intel_dp->backlight_off_delay = get_delay(t9); + intel_dp->panel_power_down_delay = get_delay(t10); + intel_dp->panel_power_cycle_delay = get_delay(t11_t12); + + DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", + intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, + intel_dp->panel_power_cycle_delay); + + DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", + intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); + } intel_dp_i2c_init(intel_dp, intel_connector, name); - /* Cache DPCD and EDID for edp. */ if (is_edp(intel_dp)) { bool ret; - struct drm_display_mode *scan; struct edid *edid; ironlake_edp_panel_vdd_on(intel_dp); @@ -2793,47 +2640,29 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, } else { /* if this fails, presume the device is a ghost */ DRM_INFO("failed to retrieve link info, disabling eDP\n"); - intel_dp_encoder_destroy(&intel_encoder->base); - intel_dp_destroy(connector); + intel_dp_encoder_destroy(&intel_dp->base.base); + intel_dp_destroy(&intel_connector->base); return; } ironlake_edp_panel_vdd_on(intel_dp); edid = drm_get_edid(connector, &intel_dp->adapter); if (edid) { - if (drm_add_edid_modes(connector, edid)) { - drm_mode_connector_update_edid_property(connector, edid); - drm_edid_to_eld(connector, edid); - } else { - kfree(edid); - edid = ERR_PTR(-EINVAL); - } - } else { - edid = ERR_PTR(-ENOENT); - } - intel_connector->edid = edid; - - /* prefer fixed mode from EDID if available */ - list_for_each_entry(scan, &connector->probed_modes, head) { - if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { - fixed_mode = drm_mode_duplicate(dev, scan); - break; - } + drm_mode_connector_update_edid_property(connector, + edid); + intel_dp->edid_mode_count = + drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); + intel_dp->edid = edid; } - - /* fallback to VBT if available for eDP */ - if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) { - fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); - if (fixed_mode) - fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; - } - ironlake_edp_panel_vdd_off(intel_dp, false); } + intel_encoder->hot_plug = intel_dp_hot_plug; + if (is_edp(intel_dp)) { - intel_panel_init(&intel_connector->panel, fixed_mode); - intel_panel_setup_backlight(connector); + dev_priv->int_edp_connector = connector; + intel_panel_setup_backlight(dev); } intel_dp_add_properties(intel_dp, connector); @@ -2847,45 +2676,3 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } } - -void -intel_dp_init(struct drm_device *dev, int output_reg, enum port port) -{ - struct intel_digital_port *intel_dig_port; - struct intel_encoder *intel_encoder; - struct drm_encoder *encoder; - struct intel_connector *intel_connector; - - intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); - if (!intel_dig_port) - return; - - intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); - if (!intel_connector) { - kfree(intel_dig_port); - return; - } - - intel_encoder = &intel_dig_port->base; - encoder = &intel_encoder->base; - - drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, - DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); - - intel_encoder->enable = intel_enable_dp; - intel_encoder->pre_enable = intel_pre_enable_dp; - intel_encoder->disable = intel_disable_dp; - intel_encoder->post_disable = intel_post_disable_dp; - intel_encoder->get_hw_state = intel_dp_get_hw_state; - - intel_dig_port->port = port; - intel_dig_port->dp.output_reg = output_reg; - - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; - intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); - intel_encoder->cloneable = false; - intel_encoder->hot_plug = intel_dp_hot_plug; - - intel_dp_init_connector(intel_dig_port, intel_connector); -} diff --git a/trunk/drivers/gpu/drm/i915/intel_drv.h b/trunk/drivers/gpu/drm/i915/intel_drv.h index 522061ca0685..fe7142502f43 100644 --- a/trunk/drivers/gpu/drm/i915/intel_drv.h +++ b/trunk/drivers/gpu/drm/i915/intel_drv.h @@ -94,7 +94,6 @@ #define INTEL_OUTPUT_HDMI 6 #define INTEL_OUTPUT_DISPLAYPORT 7 #define INTEL_OUTPUT_EDP 8 -#define INTEL_OUTPUT_UNKNOWN 9 #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 @@ -164,11 +163,6 @@ struct intel_encoder { int crtc_mask; }; -struct intel_panel { - struct drm_display_mode *fixed_mode; - int fitting_mode; -}; - struct intel_connector { struct drm_connector base; /* @@ -185,19 +179,12 @@ struct intel_connector { /* Reads out the current hw, returning true if the connector is enabled * and active (i.e. dpms ON state). */ bool (*get_hw_state)(struct intel_connector *); - - /* Panel info for eDP and LVDS */ - struct intel_panel panel; - - /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ - struct edid *edid; }; struct intel_crtc { struct drm_crtc base; enum pipe pipe; enum plane plane; - enum transcoder cpu_transcoder; u8 lut_r[256], lut_g[256], lut_b[256]; /* * Whether the crtc and the connected output pipeline is active. Implies @@ -211,8 +198,6 @@ struct intel_crtc { struct intel_unpin_work *unpin_work; int fdi_lanes; - atomic_t unpin_work_count; - /* Display surface base address adjustement for pageflips. Note that on * gen4+ this only adjusts up to a tile, offsets within a tile are * handled in the hw itself (with the TILEOFF register). */ @@ -227,14 +212,12 @@ struct intel_crtc { /* We can share PLLs across outputs if the timings match */ struct intel_pch_pll *pch_pll; - uint32_t ddi_pll_sel; }; struct intel_plane { struct drm_plane base; enum pipe pipe; struct drm_i915_gem_object *obj; - bool can_scale; int max_downscale; u32 lut_r[1024], lut_g[1024], lut_b[1024]; void (*update_plane)(struct drm_plane *plane, @@ -334,8 +317,10 @@ struct dip_infoframe { } __attribute__((packed)); struct intel_hdmi { + struct intel_encoder base; u32 sdvox_reg; int ddc_bus; + int ddi_port; uint32_t color_range; bool has_hdmi_sink; bool has_audio; @@ -346,15 +331,18 @@ struct intel_hdmi { struct drm_display_mode *adjusted_mode); }; +#define DP_RECEIVER_CAP_SIZE 0xf #define DP_MAX_DOWNSTREAM_PORTS 0x10 #define DP_LINK_CONFIGURATION_SIZE 9 struct intel_dp { + struct intel_encoder base; uint32_t output_reg; uint32_t DP; uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; bool has_audio; enum hdmi_force_audio force_audio; + enum port port; uint32_t color_range; uint8_t link_bw; uint8_t lane_count; @@ -369,16 +357,11 @@ struct intel_dp { int panel_power_cycle_delay; int backlight_on_delay; int backlight_off_delay; + struct drm_display_mode *panel_fixed_mode; /* for eDP */ struct delayed_work panel_vdd_work; bool want_panel_vdd; - struct intel_connector *attached_connector; -}; - -struct intel_digital_port { - struct intel_encoder base; - enum port port; - struct intel_dp dp; - struct intel_hdmi hdmi; + struct edid *edid; /* cached EDID for eDP */ + int edid_mode_count; }; static inline struct drm_crtc * @@ -397,7 +380,7 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane) struct intel_unpin_work { struct work_struct work; - struct drm_crtc *crtc; + struct drm_device *dev; struct drm_i915_gem_object *old_fb_obj; struct drm_i915_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; @@ -412,8 +395,6 @@ struct intel_fbc_work { int interval; }; -int intel_pch_rawclk(struct drm_device *dev); - int intel_connector_update_modes(struct drm_connector *connector, struct edid *edid); int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); @@ -424,12 +405,7 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector) extern void intel_crt_init(struct drm_device *dev); extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port); -extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, - struct intel_connector *intel_connector); extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); -extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); @@ -442,27 +418,10 @@ extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj); extern bool intel_lvds_init(struct drm_device *dev); extern void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); -extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, - struct intel_connector *intel_connector); void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); -extern void intel_dp_init_link_config(struct intel_dp *intel_dp); -extern void intel_dp_start_link_train(struct intel_dp *intel_dp); -extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); -extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); -extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); -extern void intel_dp_check_link_status(struct intel_dp *intel_dp); -extern bool intel_dp_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); extern bool intel_dpd_is_edp(struct drm_device *dev); -extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp); -extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp); -extern void ironlake_edp_panel_on(struct intel_dp *intel_dp); -extern void ironlake_edp_panel_off(struct intel_dp *intel_dp); -extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); -extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); extern void intel_edp_link_config(struct intel_encoder *, int *, int *); extern int intel_edp_target_clock(struct intel_encoder *, struct drm_display_mode *mode); @@ -472,10 +431,6 @@ extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, enum plane plane); /* intel_panel.c */ -extern int intel_panel_init(struct intel_panel *panel, - struct drm_display_mode *fixed_mode); -extern void intel_panel_fini(struct intel_panel *panel); - extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); extern void intel_pch_panel_fitting(struct drm_device *dev, @@ -484,7 +439,7 @@ extern void intel_pch_panel_fitting(struct drm_device *dev, struct drm_display_mode *adjusted_mode); extern u32 intel_panel_get_max_backlight(struct drm_device *dev); extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); -extern int intel_panel_setup_backlight(struct drm_connector *connector); +extern int intel_panel_setup_backlight(struct drm_device *dev); extern void intel_panel_enable_backlight(struct drm_device *dev, enum pipe pipe); extern void intel_panel_disable_backlight(struct drm_device *dev); @@ -518,31 +473,6 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector return to_intel_connector(connector)->encoder; } -static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) -{ - struct intel_digital_port *intel_dig_port = - container_of(encoder, struct intel_digital_port, base.base); - return &intel_dig_port->dp; -} - -static inline struct intel_digital_port * -enc_to_dig_port(struct drm_encoder *encoder) -{ - return container_of(encoder, struct intel_digital_port, base.base); -} - -static inline struct intel_digital_port * -dp_to_dig_port(struct intel_dp *intel_dp) -{ - return container_of(intel_dp, struct intel_digital_port, dp); -} - -static inline struct intel_digital_port * -hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) -{ - return container_of(intel_hdmi, struct intel_digital_port, hdmi); -} - extern void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder); extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); @@ -551,9 +481,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern enum transcoder -intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe); extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); @@ -623,10 +550,6 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe, struct drm_display_mode *mode); -extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, - unsigned int bpp, - unsigned int pitch); - extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, @@ -650,22 +573,12 @@ extern void intel_disable_gt_powersave(struct drm_device *dev); extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); extern void ironlake_teardown_rc6(struct drm_device *dev); +extern void intel_enable_ddi(struct intel_encoder *encoder); +extern void intel_disable_ddi(struct intel_encoder *encoder); extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); -extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv); -extern void intel_ddi_pll_init(struct drm_device *dev); -extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc); -extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder); -extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); -extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); -extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev); -extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock); -extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc); -extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); -extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); -extern bool -intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); -extern void intel_ddi_fdi_disable(struct drm_crtc *crtc); +extern void intel_ddi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); #endif /* __INTEL_DRV_H__ */ diff --git a/trunk/drivers/gpu/drm/i915/intel_hdmi.c b/trunk/drivers/gpu/drm/i915/intel_hdmi.c index 5c279b48df97..9ba0aaed7ee8 100644 --- a/trunk/drivers/gpu/drm/i915/intel_hdmi.c +++ b/trunk/drivers/gpu/drm/i915/intel_hdmi.c @@ -36,15 +36,10 @@ #include #include "i915_drv.h" -static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi) -{ - return hdmi_to_dig_port(intel_hdmi)->base.base.dev; -} - static void assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) { - struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); + struct drm_device *dev = intel_hdmi->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t enabled_bits; @@ -56,14 +51,13 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) { - struct intel_digital_port *intel_dig_port = - container_of(encoder, struct intel_digital_port, base.base); - return &intel_dig_port->hdmi; + return container_of(encoder, struct intel_hdmi, base.base); } static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) { - return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); + return container_of(intel_attached_encoder(connector), + struct intel_hdmi, base); } void intel_dip_infoframe_csum(struct dip_infoframe *frame) @@ -760,16 +754,16 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector, return MODE_OK; } -bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { return true; } static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi) { - struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); + struct drm_device *dev = intel_hdmi->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t bit; @@ -792,9 +786,6 @@ static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector, bool force) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); - struct intel_digital_port *intel_dig_port = - hdmi_to_dig_port(intel_hdmi); - struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_i915_private *dev_priv = connector->dev->dev_private; struct edid *edid; enum drm_connector_status status = connector_status_disconnected; @@ -823,7 +814,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) intel_hdmi->has_audio = (intel_hdmi->force_audio == HDMI_AUDIO_ON); - intel_encoder->type = INTEL_OUTPUT_HDMI; } return status; @@ -869,12 +859,10 @@ intel_hdmi_set_property(struct drm_connector *connector, uint64_t val) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); - struct intel_digital_port *intel_dig_port = - hdmi_to_dig_port(intel_hdmi); struct drm_i915_private *dev_priv = connector->dev->dev_private; int ret; - ret = drm_object_property_set_value(&connector->base, property, val); + ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; @@ -910,8 +898,8 @@ intel_hdmi_set_property(struct drm_connector *connector, return -EINVAL; done: - if (intel_dig_port->base.base.crtc) { - struct drm_crtc *crtc = intel_dig_port->base.base.crtc; + if (intel_hdmi->base.base.crtc) { + struct drm_crtc *crtc = intel_hdmi->base.base.crtc; intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } @@ -926,6 +914,12 @@ static void intel_hdmi_destroy(struct drm_connector *connector) kfree(connector); } +static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = { + .mode_fixup = intel_hdmi_mode_fixup, + .mode_set = intel_ddi_mode_set, + .disable = intel_encoder_noop, +}; + static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { .mode_fixup = intel_hdmi_mode_fixup, .mode_set = intel_hdmi_mode_set, @@ -957,24 +951,43 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c intel_attach_broadcast_rgb_property(connector); } -void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, - struct intel_connector *intel_connector) +void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) { - struct drm_connector *connector = &intel_connector->base; - struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; - struct intel_encoder *intel_encoder = &intel_dig_port->base; - struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - enum port port = intel_dig_port->port; + struct drm_connector *connector; + struct intel_encoder *intel_encoder; + struct intel_connector *intel_connector; + struct intel_hdmi *intel_hdmi; + intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL); + if (!intel_hdmi) + return; + + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_hdmi); + return; + } + + intel_encoder = &intel_hdmi->base; + drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, + DRM_MODE_ENCODER_TMDS); + + connector = &intel_connector->base; drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); + intel_encoder->type = INTEL_OUTPUT_HDMI; + connector->polled = DRM_CONNECTOR_POLL_HPD; connector->interlace_allowed = 1; connector->doublescan_allowed = 0; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + + intel_encoder->cloneable = false; + intel_hdmi->ddi_port = port; switch (port) { case PORT_B: intel_hdmi->ddc_bus = GMBUS_PORT_DPB; @@ -994,6 +1007,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, BUG(); } + intel_hdmi->sdvox_reg = sdvox_reg; + if (!HAS_PCH_SPLIT(dev)) { intel_hdmi->write_infoframe = g4x_write_infoframe; intel_hdmi->set_infoframes = g4x_set_infoframes; @@ -1011,10 +1026,21 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_hdmi->set_infoframes = cpt_set_infoframes; } - if (IS_HASWELL(dev)) - intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; - else - intel_connector->get_hw_state = intel_connector_get_hw_state; + if (IS_HASWELL(dev)) { + intel_encoder->enable = intel_enable_ddi; + intel_encoder->disable = intel_disable_ddi; + intel_encoder->get_hw_state = intel_ddi_get_hw_state; + drm_encoder_helper_add(&intel_encoder->base, + &intel_hdmi_helper_funcs_hsw); + } else { + intel_encoder->enable = intel_enable_hdmi; + intel_encoder->disable = intel_disable_hdmi; + intel_encoder->get_hw_state = intel_hdmi_get_hw_state; + drm_encoder_helper_add(&intel_encoder->base, + &intel_hdmi_helper_funcs); + } + intel_connector->get_hw_state = intel_connector_get_hw_state; + intel_hdmi_add_properties(intel_hdmi, connector); @@ -1030,42 +1056,3 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } } - -void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) -{ - struct intel_digital_port *intel_dig_port; - struct intel_encoder *intel_encoder; - struct drm_encoder *encoder; - struct intel_connector *intel_connector; - - intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); - if (!intel_dig_port) - return; - - intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); - if (!intel_connector) { - kfree(intel_dig_port); - return; - } - - intel_encoder = &intel_dig_port->base; - encoder = &intel_encoder->base; - - drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, - DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); - - intel_encoder->enable = intel_enable_hdmi; - intel_encoder->disable = intel_disable_hdmi; - intel_encoder->get_hw_state = intel_hdmi_get_hw_state; - - intel_encoder->type = INTEL_OUTPUT_HDMI; - intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); - intel_encoder->cloneable = false; - - intel_dig_port->port = port; - intel_dig_port->hdmi.sdvox_reg = sdvox_reg; - intel_dig_port->dp.output_reg = 0; - - intel_hdmi_init_connector(intel_dig_port, intel_connector); -} diff --git a/trunk/drivers/gpu/drm/i915/intel_i2c.c b/trunk/drivers/gpu/drm/i915/intel_i2c.c index 3ef5af15b812..c2c6dbc0971c 100644 --- a/trunk/drivers/gpu/drm/i915/intel_i2c.c +++ b/trunk/drivers/gpu/drm/i915/intel_i2c.c @@ -432,7 +432,7 @@ gmbus_xfer(struct i2c_adapter *adapter, I915_WRITE(GMBUS0 + reg_offset, 0); /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ - bus->force_bit = 1; + bus->force_bit = true; ret = i2c_bit_algo.master_xfer(adapter, msgs, num); out: @@ -491,7 +491,7 @@ int intel_setup_gmbus(struct drm_device *dev) /* gmbus seems to be broken on i830 */ if (IS_I830(dev)) - bus->force_bit = 1; + bus->force_bit = true; intel_gpio_setup(bus, port); @@ -532,10 +532,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - bus->force_bit += force_bit ? 1 : -1; - DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n", - force_bit ? "en" : "dis", adapter->name, - bus->force_bit); + bus->force_bit = force_bit; } void intel_teardown_gmbus(struct drm_device *dev) diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c index 81502e8be26b..edba93b3474b 100644 --- a/trunk/drivers/gpu/drm/i915/intel_lvds.c +++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c @@ -40,30 +40,28 @@ #include /* Private structure for the integrated LVDS support */ -struct intel_lvds_connector { - struct intel_connector base; - - struct notifier_block lid_notifier; -}; - -struct intel_lvds_encoder { +struct intel_lvds { struct intel_encoder base; + struct edid *edid; + + int fitting_mode; u32 pfit_control; u32 pfit_pgm_ratios; bool pfit_dirty; - struct intel_lvds_connector *attached_connector; + struct drm_display_mode *fixed_mode; }; -static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder) +static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder) { - return container_of(encoder, struct intel_lvds_encoder, base.base); + return container_of(encoder, struct intel_lvds, base.base); } -static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector) +static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) { - return container_of(connector, struct intel_lvds_connector, base.base); + return container_of(intel_attached_encoder(connector), + struct intel_lvds, base); } static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, @@ -98,7 +96,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, static void intel_enable_lvds(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); + struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct drm_i915_private *dev_priv = dev->dev_private; u32 ctl_reg, lvds_reg, stat_reg; @@ -115,7 +113,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder) I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); - if (lvds_encoder->pfit_dirty) { + if (intel_lvds->pfit_dirty) { /* * Enable automatic panel scaling so that non-native modes * fill the screen. The panel fitter should only be @@ -123,12 +121,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder) * register description and PRM. */ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", - lvds_encoder->pfit_control, - lvds_encoder->pfit_pgm_ratios); + intel_lvds->pfit_control, + intel_lvds->pfit_pgm_ratios); - I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios); - I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control); - lvds_encoder->pfit_dirty = false; + I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); + I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); + intel_lvds->pfit_dirty = false; } I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); @@ -142,7 +140,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder) static void intel_disable_lvds(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); + struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); struct drm_i915_private *dev_priv = dev->dev_private; u32 ctl_reg, lvds_reg, stat_reg; @@ -162,9 +160,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder) if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) DRM_ERROR("timed out waiting for panel to power off\n"); - if (lvds_encoder->pfit_control) { + if (intel_lvds->pfit_control) { I915_WRITE(PFIT_CONTROL, 0); - lvds_encoder->pfit_dirty = true; + intel_lvds->pfit_dirty = true; } I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); @@ -174,8 +172,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder) static int intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_connector *intel_connector = to_intel_connector(connector); - struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; + struct intel_lvds *intel_lvds = intel_attached_lvds(connector); + struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode; if (mode->hdisplay > fixed_mode->hdisplay) return MODE_PANEL; @@ -251,10 +249,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); - struct intel_connector *intel_connector = - &lvds_encoder->attached_connector->base; - struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc; + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); + struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc; u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; int pipe; @@ -264,7 +260,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, return false; } - if (intel_encoder_check_is_cloned(&lvds_encoder->base)) + if (intel_encoder_check_is_cloned(&intel_lvds->base)) return false; /* @@ -273,12 +269,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ - intel_fixed_panel_mode(intel_connector->panel.fixed_mode, - adjusted_mode); + intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode); if (HAS_PCH_SPLIT(dev)) { - intel_pch_panel_fitting(dev, - intel_connector->panel.fitting_mode, + intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, mode, adjusted_mode); return true; } @@ -304,7 +298,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, drm_mode_set_crtcinfo(adjusted_mode, 0); - switch (intel_connector->panel.fitting_mode) { + switch (intel_lvds->fitting_mode) { case DRM_MODE_SCALE_CENTER: /* * For centered modes, we have to calculate border widths & @@ -402,11 +396,11 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) pfit_control |= PANEL_8TO6_DITHER_ENABLE; - if (pfit_control != lvds_encoder->pfit_control || - pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) { - lvds_encoder->pfit_control = pfit_control; - lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios; - lvds_encoder->pfit_dirty = true; + if (pfit_control != intel_lvds->pfit_control || + pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { + intel_lvds->pfit_control = pfit_control; + intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; + intel_lvds->pfit_dirty = true; } dev_priv->lvds_border_bits = border; @@ -455,15 +449,14 @@ intel_lvds_detect(struct drm_connector *connector, bool force) */ static int intel_lvds_get_modes(struct drm_connector *connector) { - struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector); + struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct drm_device *dev = connector->dev; struct drm_display_mode *mode; - /* use cached edid if we have one */ - if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) - return drm_add_edid_modes(connector, lvds_connector->base.edid); + if (intel_lvds->edid) + return drm_add_edid_modes(connector, intel_lvds->edid); - mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode); + mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); if (mode == NULL) return 0; @@ -503,11 +496,10 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = { static int intel_lid_notify(struct notifier_block *nb, unsigned long val, void *unused) { - struct intel_lvds_connector *lvds_connector = - container_of(nb, struct intel_lvds_connector, lid_notifier); - struct drm_connector *connector = &lvds_connector->base.base; - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = + container_of(nb, struct drm_i915_private, lid_notifier); + struct drm_device *dev = dev_priv->dev; + struct drm_connector *connector = dev_priv->int_lvds_connector; if (dev->switch_power_state != DRM_SWITCH_POWER_ON) return NOTIFY_OK; @@ -516,7 +508,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, * check and update the status of LVDS connector after receiving * the LID nofication event. */ - connector->status = connector->funcs->detect(connector, false); + if (connector) + connector->status = connector->funcs->detect(connector, + false); /* Don't force modeset on machines where it causes a GPU lockup */ if (dmi_check_system(intel_no_modeset_on_lid)) @@ -547,18 +541,13 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, */ static void intel_lvds_destroy(struct drm_connector *connector) { - struct intel_lvds_connector *lvds_connector = - to_lvds_connector(connector); - - if (lvds_connector->lid_notifier.notifier_call) - acpi_lid_notifier_unregister(&lvds_connector->lid_notifier); - - if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) - kfree(lvds_connector->base.edid); + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; - intel_panel_destroy_backlight(connector->dev); - intel_panel_fini(&lvds_connector->base.panel); + intel_panel_destroy_backlight(dev); + if (dev_priv->lid_notifier.notifier_call) + acpi_lid_notifier_unregister(&dev_priv->lid_notifier); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); @@ -568,24 +557,22 @@ static int intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { - struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct drm_device *dev = connector->dev; if (property == dev->mode_config.scaling_mode_property) { - struct drm_crtc *crtc; + struct drm_crtc *crtc = intel_lvds->base.base.crtc; if (value == DRM_MODE_SCALE_NONE) { DRM_DEBUG_KMS("no scaling not supported\n"); return -EINVAL; } - if (intel_connector->panel.fitting_mode == value) { + if (intel_lvds->fitting_mode == value) { /* the LVDS scaling property is not changed */ return 0; } - intel_connector->panel.fitting_mode = value; - - crtc = intel_attached_encoder(connector)->base.crtc; + intel_lvds->fitting_mode = value; if (crtc && crtc->enabled) { /* * If the CRTC is enabled, the display will be changed @@ -925,15 +912,12 @@ static bool intel_lvds_supported(struct drm_device *dev) bool intel_lvds_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_lvds_encoder *lvds_encoder; + struct intel_lvds *intel_lvds; struct intel_encoder *intel_encoder; - struct intel_lvds_connector *lvds_connector; struct intel_connector *intel_connector; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_display_mode *scan; /* *modes, *bios_mode; */ - struct drm_display_mode *fixed_mode = NULL; - struct edid *edid; struct drm_crtc *crtc; u32 lvds; int pipe; @@ -961,25 +945,23 @@ bool intel_lvds_init(struct drm_device *dev) } } - lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL); - if (!lvds_encoder) + intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); + if (!intel_lvds) { return false; + } - lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL); - if (!lvds_connector) { - kfree(lvds_encoder); + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_lvds); return false; } - lvds_encoder->attached_connector = lvds_connector; - if (!HAS_PCH_SPLIT(dev)) { - lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL); + intel_lvds->pfit_control = I915_READ(PFIT_CONTROL); } - intel_encoder = &lvds_encoder->base; + intel_encoder = &intel_lvds->base; encoder = &intel_encoder->base; - intel_connector = &lvds_connector->base; connector = &intel_connector->base; drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); @@ -1011,10 +993,14 @@ bool intel_lvds_init(struct drm_device *dev) /* create the scaling mode property */ drm_mode_create_scaling_mode_property(dev); - drm_object_attach_property(&connector->base, + /* + * the initial panel fitting mode will be FULL_SCREEN. + */ + + drm_connector_attach_property(&intel_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_ASPECT); - intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; + intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT; /* * LVDS discovery: * 1) check for EDID on DDC @@ -1029,21 +1015,20 @@ bool intel_lvds_init(struct drm_device *dev) * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ - edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin)); - if (edid) { - if (drm_add_edid_modes(connector, edid)) { + intel_lvds->edid = drm_get_edid(connector, + intel_gmbus_get_adapter(dev_priv, + pin)); + if (intel_lvds->edid) { + if (drm_add_edid_modes(connector, + intel_lvds->edid)) { drm_mode_connector_update_edid_property(connector, - edid); + intel_lvds->edid); } else { - kfree(edid); - edid = ERR_PTR(-EINVAL); + kfree(intel_lvds->edid); + intel_lvds->edid = NULL; } - } else { - edid = ERR_PTR(-ENOENT); } - lvds_connector->base.edid = edid; - - if (IS_ERR_OR_NULL(edid)) { + if (!intel_lvds->edid) { /* Didn't get an EDID, so * Set wide sync ranges so we get all modes * handed to valid_mode for checking @@ -1056,26 +1041,22 @@ bool intel_lvds_init(struct drm_device *dev) list_for_each_entry(scan, &connector->probed_modes, head) { if (scan->type & DRM_MODE_TYPE_PREFERRED) { - DRM_DEBUG_KMS("using preferred mode from EDID: "); - drm_mode_debug_printmodeline(scan); - - fixed_mode = drm_mode_duplicate(dev, scan); - if (fixed_mode) { - intel_find_lvds_downclock(dev, fixed_mode, - connector); - goto out; - } + intel_lvds->fixed_mode = + drm_mode_duplicate(dev, scan); + intel_find_lvds_downclock(dev, + intel_lvds->fixed_mode, + connector); + goto out; } } /* Failed to get EDID, what about VBT? */ if (dev_priv->lfp_lvds_vbt_mode) { - DRM_DEBUG_KMS("using mode from VBT: "); - drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode); - - fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); - if (fixed_mode) { - fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + intel_lvds->fixed_mode = + drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); + if (intel_lvds->fixed_mode) { + intel_lvds->fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; goto out; } } @@ -1095,17 +1076,16 @@ bool intel_lvds_init(struct drm_device *dev) crtc = intel_get_crtc_for_pipe(dev, pipe); if (crtc && (lvds & LVDS_PORT_EN)) { - fixed_mode = intel_crtc_mode_get(dev, crtc); - if (fixed_mode) { - DRM_DEBUG_KMS("using current (BIOS) mode: "); - drm_mode_debug_printmodeline(fixed_mode); - fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc); + if (intel_lvds->fixed_mode) { + intel_lvds->fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; goto out; } } /* If we still don't have a mode after all that, give up. */ - if (!fixed_mode) + if (!intel_lvds->fixed_mode) goto failed; out: @@ -1120,15 +1100,16 @@ bool intel_lvds_init(struct drm_device *dev) I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); } - lvds_connector->lid_notifier.notifier_call = intel_lid_notify; - if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { + dev_priv->lid_notifier.notifier_call = intel_lid_notify; + if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { DRM_DEBUG_KMS("lid notifier registration failed\n"); - lvds_connector->lid_notifier.notifier_call = NULL; + dev_priv->lid_notifier.notifier_call = NULL; } + /* keep the LVDS connector */ + dev_priv->int_lvds_connector = connector; drm_sysfs_connector_add(connector); - intel_panel_init(&intel_connector->panel, fixed_mode); - intel_panel_setup_backlight(connector); + intel_panel_setup_backlight(dev); return true; @@ -1136,9 +1117,7 @@ bool intel_lvds_init(struct drm_device *dev) DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); drm_connector_cleanup(connector); drm_encoder_cleanup(encoder); - if (fixed_mode) - drm_mode_destroy(dev, fixed_mode); - kfree(lvds_encoder); - kfree(lvds_connector); + kfree(intel_lvds); + kfree(intel_connector); return false; } diff --git a/trunk/drivers/gpu/drm/i915/intel_modes.c b/trunk/drivers/gpu/drm/i915/intel_modes.c index b00f1c83adce..cabd84bf66eb 100644 --- a/trunk/drivers/gpu/drm/i915/intel_modes.c +++ b/trunk/drivers/gpu/drm/i915/intel_modes.c @@ -45,6 +45,7 @@ int intel_connector_update_modes(struct drm_connector *connector, drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); drm_edid_to_eld(connector, edid); + kfree(edid); return ret; } @@ -60,16 +61,12 @@ int intel_ddc_get_modes(struct drm_connector *connector, struct i2c_adapter *adapter) { struct edid *edid; - int ret; edid = drm_get_edid(connector, adapter); if (!edid) return 0; - ret = intel_connector_update_modes(connector, edid); - kfree(edid); - - return ret; + return intel_connector_update_modes(connector, edid); } static const struct drm_prop_enum_list force_audio_names[] = { @@ -97,7 +94,7 @@ intel_attach_force_audio_property(struct drm_connector *connector) dev_priv->force_audio_property = prop; } - drm_object_attach_property(&connector->base, prop, 0); + drm_connector_attach_property(connector, prop, 0); } static const struct drm_prop_enum_list broadcast_rgb_names[] = { @@ -124,5 +121,5 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector) dev_priv->broadcast_rgb_property = prop; } - drm_object_attach_property(&connector->base, prop, 0); + drm_connector_attach_property(connector, prop, 0); } diff --git a/trunk/drivers/gpu/drm/i915/intel_opregion.c b/trunk/drivers/gpu/drm/i915/intel_opregion.c index 7741c22c934c..5530413213d8 100644 --- a/trunk/drivers/gpu/drm/i915/intel_opregion.c +++ b/trunk/drivers/gpu/drm/i915/intel_opregion.c @@ -154,8 +154,6 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) struct opregion_asle __iomem *asle = dev_priv->opregion.asle; u32 max; - DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); - if (!(bclp & ASLE_BCLP_VALID)) return ASLE_BACKLIGHT_FAILED; diff --git a/trunk/drivers/gpu/drm/i915/intel_panel.c b/trunk/drivers/gpu/drm/i915/intel_panel.c index c758ad277473..e2aacd329545 100644 --- a/trunk/drivers/gpu/drm/i915/intel_panel.c +++ b/trunk/drivers/gpu/drm/i915/intel_panel.c @@ -138,24 +138,24 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) if (HAS_PCH_SPLIT(dev_priv->dev)) { val = I915_READ(BLC_PWM_PCH_CTL2); - if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) { - dev_priv->regfile.saveBLC_PWM_CTL2 = val; + if (dev_priv->saveBLC_PWM_CTL2 == 0) { + dev_priv->saveBLC_PWM_CTL2 = val; } else if (val == 0) { I915_WRITE(BLC_PWM_PCH_CTL2, - dev_priv->regfile.saveBLC_PWM_CTL2); - val = dev_priv->regfile.saveBLC_PWM_CTL2; + dev_priv->saveBLC_PWM_CTL2); + val = dev_priv->saveBLC_PWM_CTL2; } } else { val = I915_READ(BLC_PWM_CTL); - if (dev_priv->regfile.saveBLC_PWM_CTL == 0) { - dev_priv->regfile.saveBLC_PWM_CTL = val; - dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); + if (dev_priv->saveBLC_PWM_CTL == 0) { + dev_priv->saveBLC_PWM_CTL = val; + dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); } else if (val == 0) { I915_WRITE(BLC_PWM_CTL, - dev_priv->regfile.saveBLC_PWM_CTL); + dev_priv->saveBLC_PWM_CTL); I915_WRITE(BLC_PWM_CTL2, - dev_priv->regfile.saveBLC_PWM_CTL2); - val = dev_priv->regfile.saveBLC_PWM_CTL; + dev_priv->saveBLC_PWM_CTL2); + val = dev_priv->saveBLC_PWM_CTL; } } @@ -275,7 +275,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level } tmp = I915_READ(BLC_PWM_CTL); - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_INFO(dev)->gen < 4) level <<= 1; tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; I915_WRITE(BLC_PWM_CTL, tmp | level); @@ -374,23 +374,26 @@ static void intel_panel_init_backlight(struct drm_device *dev) enum drm_connector_status intel_panel_detect(struct drm_device *dev) { +#if 0 struct drm_i915_private *dev_priv = dev->dev_private; +#endif + if (i915_panel_ignore_lid) + return i915_panel_ignore_lid > 0 ? + connector_status_connected : + connector_status_disconnected; + + /* opregion lid state on HP 2540p is wrong at boot up, + * appears to be either the BIOS or Linux ACPI fault */ +#if 0 /* Assume that the BIOS does not lie through the OpRegion... */ - if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) { + if (dev_priv->opregion.lid_state) return ioread32(dev_priv->opregion.lid_state) & 0x1 ? connector_status_connected : connector_status_disconnected; - } +#endif - switch (i915_panel_ignore_lid) { - case -2: - return connector_status_connected; - case -1: - return connector_status_disconnected; - default: - return connector_status_unknown; - } + return connector_status_unknown; } #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE @@ -413,14 +416,21 @@ static const struct backlight_ops intel_panel_bl_ops = { .get_brightness = intel_panel_get_brightness, }; -int intel_panel_setup_backlight(struct drm_connector *connector) +int intel_panel_setup_backlight(struct drm_device *dev) { - struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct backlight_properties props; + struct drm_connector *connector; intel_panel_init_backlight(dev); + if (dev_priv->int_lvds_connector) + connector = dev_priv->int_lvds_connector; + else if (dev_priv->int_edp_connector) + connector = dev_priv->int_edp_connector; + else + return -ENODEV; + memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_RAW; props.max_brightness = _intel_panel_get_max_backlight(dev); @@ -450,9 +460,9 @@ void intel_panel_destroy_backlight(struct drm_device *dev) backlight_device_unregister(dev_priv->backlight); } #else -int intel_panel_setup_backlight(struct drm_connector *connector) +int intel_panel_setup_backlight(struct drm_device *dev) { - intel_panel_init_backlight(connector->dev); + intel_panel_init_backlight(dev); return 0; } @@ -461,20 +471,3 @@ void intel_panel_destroy_backlight(struct drm_device *dev) return; } #endif - -int intel_panel_init(struct intel_panel *panel, - struct drm_display_mode *fixed_mode) -{ - panel->fixed_mode = fixed_mode; - - return 0; -} - -void intel_panel_fini(struct intel_panel *panel) -{ - struct intel_connector *intel_connector = - container_of(panel, struct intel_connector, panel); - - if (panel->fixed_mode) - drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); -} diff --git a/trunk/drivers/gpu/drm/i915/intel_pm.c b/trunk/drivers/gpu/drm/i915/intel_pm.c index 58c2f210154a..442968f8b201 100644 --- a/trunk/drivers/gpu/drm/i915/intel_pm.c +++ b/trunk/drivers/gpu/drm/i915/intel_pm.c @@ -1468,12 +1468,9 @@ static void i9xx_update_wm(struct drm_device *dev) fifo_size = dev_priv->display.get_fifo_size(dev, 0); crtc = intel_get_crtc_for_plane(dev, 0); if (crtc->enabled && crtc->fb) { - int cpp = crtc->fb->bits_per_pixel / 8; - if (IS_GEN2(dev)) - cpp = 4; - planea_wm = intel_calculate_wm(crtc->mode.clock, - wm_info, fifo_size, cpp, + wm_info, fifo_size, + crtc->fb->bits_per_pixel / 8, latency_ns); enabled = crtc; } else @@ -1482,12 +1479,9 @@ static void i9xx_update_wm(struct drm_device *dev) fifo_size = dev_priv->display.get_fifo_size(dev, 1); crtc = intel_get_crtc_for_plane(dev, 1); if (crtc->enabled && crtc->fb) { - int cpp = crtc->fb->bits_per_pixel / 8; - if (IS_GEN2(dev)) - cpp = 4; - planeb_wm = intel_calculate_wm(crtc->mode.clock, - wm_info, fifo_size, cpp, + wm_info, fifo_size, + crtc->fb->bits_per_pixel / 8, latency_ns); if (enabled == NULL) enabled = crtc; @@ -1577,7 +1571,8 @@ static void i830_update_wm(struct drm_device *dev) planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, dev_priv->display.get_fifo_size(dev, 0), - 4, latency_ns); + crtc->fb->bits_per_pixel / 8, + latency_ns); fwater_lo = I915_READ(FW_BLC) & ~0xfff; fwater_lo |= (3<<8) | planea_wm; @@ -2328,7 +2323,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val) struct drm_i915_private *dev_priv = dev->dev_private; u32 limits = gen6_rps_limits(dev_priv, &val); - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(val > dev_priv->rps.max_delay); WARN_ON(val < dev_priv->rps.min_delay); @@ -2378,15 +2373,9 @@ int intel_enable_rc6(const struct drm_device *dev) if (i915_enable_rc6 >= 0) return i915_enable_rc6; - if (INTEL_INFO(dev)->gen == 5) { -#ifdef CONFIG_INTEL_IOMMU - /* Disable rc6 on ilk if VT-d is on. */ - if (intel_iommu_gfx_mapped) - return false; -#endif - DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n"); - return INTEL_RC6_ENABLE; - } + /* Disable RC6 on Ironlake */ + if (INTEL_INFO(dev)->gen == 5) + return 0; if (IS_HASWELL(dev)) { DRM_DEBUG_DRIVER("Haswell: only RC6 available\n"); @@ -2409,12 +2398,12 @@ static void gen6_enable_rps(struct drm_device *dev) struct intel_ring_buffer *ring; u32 rp_state_cap; u32 gt_perf_status; - u32 rc6vids, pcu_mbox, rc6_mask = 0; + u32 pcu_mbox, rc6_mask = 0; u32 gtfifodbg; int rc6_mode; - int i, ret; + int i; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); /* Here begins a magic sequence of register writes to enable * auto-downclocking. @@ -2508,16 +2497,30 @@ static void gen6_enable_rps(struct drm_device *dev) GEN6_RP_UP_BUSY_AVG | (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT)); - ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); - if (!ret) { - pcu_mbox = 0; - ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); - if (ret && pcu_mbox & (1<<31)) { /* OC supported */ - dev_priv->rps.max_delay = pcu_mbox & 0xff; - DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); - } - } else { - DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); + + I915_WRITE(GEN6_PCODE_DATA, 0); + I915_WRITE(GEN6_PCODE_MAILBOX, + GEN6_PCODE_READY | + GEN6_PCODE_WRITE_MIN_FREQ_TABLE); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); + + /* Check for overclock support */ + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); + pcu_mbox = I915_READ(GEN6_PCODE_DATA); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); + if (pcu_mbox & (1<<31)) { /* OC supported */ + dev_priv->rps.max_delay = pcu_mbox & 0xff; + DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); } gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); @@ -2531,20 +2534,6 @@ static void gen6_enable_rps(struct drm_device *dev) /* enable all PM interrupts */ I915_WRITE(GEN6_PMINTRMSK, 0); - rc6vids = 0; - ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); - if (IS_GEN6(dev) && ret) { - DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); - } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { - DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", - GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); - rc6vids &= 0xffff00; - rc6vids |= GEN6_ENCODE_RC6_VID(450); - ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); - if (ret) - DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); - } - gen6_gt_force_wake_put(dev_priv); } @@ -2552,11 +2541,10 @@ static void gen6_update_ring_freq(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int min_freq = 15; - int gpu_freq; - unsigned int ia_freq, max_ia_freq; + int gpu_freq, ia_freq, max_ia_freq; int scaling_factor = 180; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); max_ia_freq = cpufreq_quick_get_max(0); /* @@ -2587,11 +2575,17 @@ static void gen6_update_ring_freq(struct drm_device *dev) else ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); - ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT; - sandybridge_pcode_write(dev_priv, - GEN6_PCODE_WRITE_MIN_FREQ_TABLE, - ia_freq | gpu_freq); + I915_WRITE(GEN6_PCODE_DATA, + (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | + gpu_freq); + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | + GEN6_PCODE_WRITE_MIN_FREQ_TABLE); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & + GEN6_PCODE_READY) == 0, 10)) { + DRM_ERROR("pcode write of freq table timed out\n"); + continue; + } } } @@ -2599,16 +2593,16 @@ void ironlake_teardown_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->ips.renderctx) { - i915_gem_object_unpin(dev_priv->ips.renderctx); - drm_gem_object_unreference(&dev_priv->ips.renderctx->base); - dev_priv->ips.renderctx = NULL; + if (dev_priv->renderctx) { + i915_gem_object_unpin(dev_priv->renderctx); + drm_gem_object_unreference(&dev_priv->renderctx->base); + dev_priv->renderctx = NULL; } - if (dev_priv->ips.pwrctx) { - i915_gem_object_unpin(dev_priv->ips.pwrctx); - drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); - dev_priv->ips.pwrctx = NULL; + if (dev_priv->pwrctx) { + i915_gem_object_unpin(dev_priv->pwrctx); + drm_gem_object_unreference(&dev_priv->pwrctx->base); + dev_priv->pwrctx = NULL; } } @@ -2634,14 +2628,14 @@ static int ironlake_setup_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->ips.renderctx == NULL) - dev_priv->ips.renderctx = intel_alloc_context_page(dev); - if (!dev_priv->ips.renderctx) + if (dev_priv->renderctx == NULL) + dev_priv->renderctx = intel_alloc_context_page(dev); + if (!dev_priv->renderctx) return -ENOMEM; - if (dev_priv->ips.pwrctx == NULL) - dev_priv->ips.pwrctx = intel_alloc_context_page(dev); - if (!dev_priv->ips.pwrctx) { + if (dev_priv->pwrctx == NULL) + dev_priv->pwrctx = intel_alloc_context_page(dev); + if (!dev_priv->pwrctx) { ironlake_teardown_rc6(dev); return -ENOMEM; } @@ -2679,7 +2673,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); intel_ring_emit(ring, MI_SET_CONTEXT); - intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset | + intel_ring_emit(ring, dev_priv->renderctx->gtt_offset | MI_MM_SPACE_GTT | MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN | @@ -2701,7 +2695,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) return; } - I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN); + I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); } @@ -3310,72 +3304,37 @@ static void intel_init_emon(struct drm_device *dev) void intel_disable_gt_powersave(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_IRONLAKE_M(dev)) { ironlake_disable_drps(dev); ironlake_disable_rc6(dev); } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) { - cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); - mutex_lock(&dev_priv->rps.hw_lock); gen6_disable_rps(dev); - mutex_unlock(&dev_priv->rps.hw_lock); } } -static void intel_gen6_powersave_work(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, - rps.delayed_resume_work.work); - struct drm_device *dev = dev_priv->dev; - - mutex_lock(&dev_priv->rps.hw_lock); - gen6_enable_rps(dev); - gen6_update_ring_freq(dev); - mutex_unlock(&dev_priv->rps.hw_lock); -} - void intel_enable_gt_powersave(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_IRONLAKE_M(dev)) { ironlake_enable_drps(dev); ironlake_enable_rc6(dev); intel_init_emon(dev); } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { - /* - * PCU communication is slow and this doesn't need to be - * done at any specific time, so do this out of our fast path - * to make resume and init faster. - */ - schedule_delayed_work(&dev_priv->rps.delayed_resume_work, - round_jiffies_up_relative(HZ)); + gen6_enable_rps(dev); + gen6_update_ring_freq(dev); } } -static void ibx_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - /* - * On Ibex Peak and Cougar Point, we need to disable clock - * gating for the panel power sequencer or it will fail to - * start up when no ports are active. - */ - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); -} - static void ironlake_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; /* Required for FBC */ - dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | - ILK_DPFCUNIT_CLOCK_GATE_DISABLE | - ILK_DPFDUNIT_CLOCK_GATE_ENABLE; + dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | + DPFCRUNIT_CLOCK_GATE_DISABLE | + DPFDUNIT_CLOCK_GATE_DISABLE; + /* Required for CxSR */ + dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; I915_WRITE(PCH_3DCGDIS0, MARIUNIT_CLOCK_GATE_DISABLE | @@ -3383,6 +3342,8 @@ static void ironlake_init_clock_gating(struct drm_device *dev) I915_WRITE(PCH_3DCGDIS1, VFMUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + /* * According to the spec the following bits should be set in * order to enable memory self-refresh @@ -3393,7 +3354,9 @@ static void ironlake_init_clock_gating(struct drm_device *dev) I915_WRITE(ILK_DISPLAY_CHICKEN2, (I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); - dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; + I915_WRITE(ILK_DSPCLK_GATE, + (I915_READ(ILK_DSPCLK_GATE) | + ILK_DPARB_CLK_GATE)); I915_WRITE(DISP_ARB_CTL, (I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS)); @@ -3415,51 +3378,28 @@ static void ironlake_init_clock_gating(struct drm_device *dev) I915_WRITE(ILK_DISPLAY_CHICKEN2, I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE); + I915_WRITE(ILK_DSPCLK_GATE, + I915_READ(ILK_DSPCLK_GATE) | + ILK_DPFC_DIS1 | + ILK_DPFC_DIS2 | + ILK_CLK_FBC); } - I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); - I915_WRITE(ILK_DISPLAY_CHICKEN2, I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_ELPIN_409_SELECT); I915_WRITE(_3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED << 16 | _3D_CHICKEN2_WM_READ_PIPELINED); - - /* WaDisableRenderCachePipelinedFlush */ - I915_WRITE(CACHE_MODE_0, - _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); - - ibx_init_clock_gating(dev); -} - -static void cpt_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; - - /* - * On Ibex Peak and Cougar Point, we need to disable clock - * gating for the panel power sequencer or it will fail to - * start up when no ports are active. - */ - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); - I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | - DPLS_EDP_PPS_FIX_DIS); - /* WADP0ClockGatingDisable */ - for_each_pipe(pipe) { - I915_WRITE(TRANS_CHICKEN1(pipe), - TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); - } } static void gen6_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); I915_WRITE(ILK_DISPLAY_CHICKEN2, I915_READ(ILK_DISPLAY_CHICKEN2) | @@ -3514,12 +3454,11 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_WRITE(ILK_DISPLAY_CHICKEN2, I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE | ILK_VSDPFD_FULL); - I915_WRITE(ILK_DSPCLK_GATE_D, - I915_READ(ILK_DSPCLK_GATE_D) | - ILK_DPARBUNIT_CLOCK_GATE_ENABLE | - ILK_DPFDUNIT_CLOCK_GATE_ENABLE); + I915_WRITE(ILK_DSPCLK_GATE, + I915_READ(ILK_DSPCLK_GATE) | + ILK_DPARB_CLK_GATE | + ILK_DPFD_CLK_GATE); - /* WaMbcDriverBootEnable */ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | GEN6_MBCTL_ENABLE_BOOT_FETCH); @@ -3534,8 +3473,6 @@ static void gen6_init_clock_gating(struct drm_device *dev) * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff)); I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); - - cpt_init_clock_gating(dev); } static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) @@ -3550,24 +3487,13 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) I915_WRITE(GEN7_FF_THREAD_MODE, reg); } -static void lpt_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - /* - * TODO: this bit should only be enabled when really needed, then - * disabled when not needed anymore in order to save power. - */ - if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) - I915_WRITE(SOUTH_DSPCLK_GATE_D, - I915_READ(SOUTH_DSPCLK_GATE_D) | - PCH_LP_PARTITION_LEVEL_DISABLE); -} - static void haswell_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); @@ -3578,6 +3504,12 @@ static void haswell_init_clock_gating(struct drm_device *dev) */ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); + + I915_WRITE(IVB_CHICKEN3, + CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | + CHICKEN3_DGMG_DONE_FIX_DISABLE); + /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); @@ -3606,10 +3538,6 @@ static void haswell_init_clock_gating(struct drm_device *dev) I915_WRITE(CACHE_MODE_1, _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); - /* WaMbcDriverBootEnable */ - I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | - GEN6_MBCTL_ENABLE_BOOT_FETCH); - /* XXX: This is a workaround for early silicon revisions and should be * removed later. */ @@ -3619,38 +3547,27 @@ static void haswell_init_clock_gating(struct drm_device *dev) WM_DBG_DISALLOW_SPRITE | WM_DBG_DISALLOW_MAXFIFO); - lpt_init_clock_gating(dev); } static void ivybridge_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; uint32_t snpcr; + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); - I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); - - /* WaDisableEarlyCull */ - I915_WRITE(_3D_CHICKEN3, - _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); + I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); - /* WaDisableBackToBackFlipFix */ I915_WRITE(IVB_CHICKEN3, CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | CHICKEN3_DGMG_DONE_FIX_DISABLE); - /* WaDisablePSDDualDispatchEnable */ - if (IS_IVB_GT1(dev)) - I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, - _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); - else - I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2, - _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); - /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); @@ -3659,18 +3576,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, - GEN7_WA_L3_CHICKEN_MODE); - if (IS_IVB_GT1(dev)) - I915_WRITE(GEN7_ROW_CHICKEN2, - _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); - else - I915_WRITE(GEN7_ROW_CHICKEN2_GT2, - _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); - - - /* WaForceL3Serialization */ - I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & - ~L3SQ_URB_READ_CAM_MATCH_DISABLE); + GEN7_WA_L3_CHICKEN_MODE); /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock * gating disable must be set. Failure to set it results in @@ -3701,7 +3607,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) intel_flush_display_plane(dev_priv, pipe); } - /* WaMbcDriverBootEnable */ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | GEN6_MBCTL_ENABLE_BOOT_FETCH); @@ -3715,59 +3620,39 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) snpcr &= ~GEN6_MBC_SNPCR_MASK; snpcr |= GEN6_MBC_SNPCR_MED; I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); - - cpt_init_clock_gating(dev); } static void valleyview_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); - I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); - - /* WaDisableEarlyCull */ - I915_WRITE(_3D_CHICKEN3, - _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); + I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); - /* WaDisableBackToBackFlipFix */ I915_WRITE(IVB_CHICKEN3, CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | CHICKEN3_DGMG_DONE_FIX_DISABLE); - I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, - _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); - /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ - I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS); + I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); - /* WaForceL3Serialization */ - I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & - ~L3SQ_URB_READ_CAM_MATCH_DISABLE); - - /* WaDisableDopClockGating */ - I915_WRITE(GEN7_ROW_CHICKEN2, - _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); - - /* WaForceL3Serialization */ - I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & - ~L3SQ_URB_READ_CAM_MATCH_DISABLE); - /* This is required by WaCatErrorRejectionIssue */ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); - /* WaMbcDriverBootEnable */ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | GEN6_MBCTL_ENABLE_BOOT_FETCH); @@ -3819,13 +3704,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev) PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN | SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN | PLANEA_FLIPDONE_INT_EN); - - /* - * WaDisableVLVClockGating_VBIIssue - * Disable clock gating on th GCFG unit to prevent a delay - * in the reporting of vblank events. - */ - I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); } static void g4x_init_clock_gating(struct drm_device *dev) @@ -3844,10 +3722,6 @@ static void g4x_init_clock_gating(struct drm_device *dev) if (IS_GM45(dev)) dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; I915_WRITE(DSPCLK_GATE_D, dspclk_gate); - - /* WaDisableRenderCachePipelinedFlush */ - I915_WRITE(CACHE_MODE_0, - _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); } static void crestline_init_clock_gating(struct drm_device *dev) @@ -3903,11 +3777,44 @@ static void i830_init_clock_gating(struct drm_device *dev) I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); } +static void ibx_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* + * On Ibex Peak and Cougar Point, we need to disable clock + * gating for the panel power sequencer or it will fail to + * start up when no ports are active. + */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); +} + +static void cpt_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + + /* + * On Ibex Peak and Cougar Point, we need to disable clock + * gating for the panel power sequencer or it will fail to + * start up when no ports are active. + */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | + DPLS_EDP_PPS_FIX_DIS); + /* Without this, mode sets may fail silently on FDI */ + for_each_pipe(pipe) + I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); +} + void intel_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->display.init_clock_gating(dev); + + if (dev_priv->display.init_pch_clock_gating) + dev_priv->display.init_pch_clock_gating(dev); } /* Starting with Haswell, we have different power wells for @@ -3933,7 +3840,7 @@ void intel_init_power_wells(struct drm_device *dev) if ((well & HSW_PWR_WELL_STATE) == 0) { I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE); - if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20)) + if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20)) DRM_ERROR("Error enabling power well %lx\n", power_wells[i]); } } @@ -3971,6 +3878,11 @@ void intel_init_pm(struct drm_device *dev) /* For FIFO watermark updates */ if (HAS_PCH_SPLIT(dev)) { + if (HAS_PCH_IBX(dev)) + dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; + else if (HAS_PCH_CPT(dev)) + dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; + if (IS_GEN5(dev)) { if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) dev_priv->display.update_wm = ironlake_update_wm; @@ -4081,12 +3993,6 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) DRM_ERROR("GT thread status wait timed out\n"); } -static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE, 0); - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ -} - static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { u32 forcewake_ack; @@ -4100,7 +4006,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL); + I915_WRITE_NOTRACE(FORCEWAKE, 1); POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), @@ -4110,12 +4016,6 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) __gen6_gt_wait_for_thread_c0(dev_priv); } -static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ -} - static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) { u32 forcewake_ack; @@ -4129,7 +4029,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), @@ -4173,7 +4073,7 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } @@ -4211,18 +4111,13 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) return ret; } -static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); -} - static void vlv_force_wake_get(struct drm_i915_private *dev_priv) { if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1)); if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), FORCEWAKE_ACK_TIMEOUT_MS)) @@ -4233,89 +4128,49 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv) static void vlv_force_wake_put(struct drm_i915_private *dev_priv) { - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1)); /* The below doubles as a POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } -void intel_gt_reset(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_VALLEYVIEW(dev)) { - vlv_force_wake_reset(dev_priv); - } else if (INTEL_INFO(dev)->gen >= 6) { - __gen6_gt_force_wake_reset(dev_priv); - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) - __gen6_gt_force_wake_mt_reset(dev_priv); - } -} - void intel_gt_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; spin_lock_init(&dev_priv->gt_lock); - intel_gt_reset(dev); - if (IS_VALLEYVIEW(dev)) { dev_priv->gt.force_wake_get = vlv_force_wake_get; dev_priv->gt.force_wake_put = vlv_force_wake_put; - } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { - dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; - dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; - } else if (IS_GEN6(dev)) { + } else if (INTEL_INFO(dev)->gen >= 6) { dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; - } - INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, - intel_gen6_powersave_work); -} - -int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) -{ - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { - DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); - return -EAGAIN; - } - - I915_WRITE(GEN6_PCODE_DATA, *val); - I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); - - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) { - DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); - return -ETIMEDOUT; + /* IVB configs may use multi-threaded forcewake */ + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { + u32 ecobus; + + /* A small trick here - if the bios hasn't configured + * MT forcewake, and if the device is in RC6, then + * force_wake_mt_get will not wake the device and the + * ECOBUS read will return zero. Which will be + * (correctly) interpreted by the test below as MT + * forcewake being disabled. + */ + mutex_lock(&dev->struct_mutex); + __gen6_gt_force_wake_mt_get(dev_priv); + ecobus = I915_READ_NOTRACE(ECOBUS); + __gen6_gt_force_wake_mt_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + + if (ecobus & FORCEWAKE_MT_ENABLE) { + DRM_DEBUG_KMS("Using MT version of forcewake\n"); + dev_priv->gt.force_wake_get = + __gen6_gt_force_wake_mt_get; + dev_priv->gt.force_wake_put = + __gen6_gt_force_wake_mt_put; + } + } } - - *val = I915_READ(GEN6_PCODE_DATA); - I915_WRITE(GEN6_PCODE_DATA, 0); - - return 0; } -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) -{ - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - - if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { - DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); - return -EAGAIN; - } - - I915_WRITE(GEN6_PCODE_DATA, val); - I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); - - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) { - DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); - return -ETIMEDOUT; - } - - I915_WRITE(GEN6_PCODE_DATA, 0); - - return 0; -} diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c index 987eb5fdaf39..ecbc5c5dbbbc 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -245,7 +245,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, /* * TLB invalidate requires a post-sync write. */ - flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; + flags |= PIPE_CONTROL_QW_WRITE; } ret = intel_ring_begin(ring, 4); @@ -558,9 +558,12 @@ update_mboxes(struct intel_ring_buffer *ring, u32 seqno, u32 mmio_offset) { - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, mmio_offset); + intel_ring_emit(ring, MI_SEMAPHORE_MBOX | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_REGISTER | + MI_SEMAPHORE_UPDATE); intel_ring_emit(ring, seqno); + intel_ring_emit(ring, mmio_offset); } /** @@ -961,9 +964,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) } static int -i965_dispatch_execbuffer(struct intel_ring_buffer *ring, - u32 offset, u32 length, - unsigned flags) +i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) { int ret; @@ -974,7 +975,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT | - (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); + MI_BATCH_NON_SECURE_I965); intel_ring_emit(ring, offset); intel_ring_advance(ring); @@ -983,8 +984,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, static int i830_dispatch_execbuffer(struct intel_ring_buffer *ring, - u32 offset, u32 len, - unsigned flags) + u32 offset, u32 len) { int ret; @@ -993,7 +993,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, return ret; intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); intel_ring_emit(ring, offset + len - 8); intel_ring_emit(ring, 0); intel_ring_advance(ring); @@ -1003,8 +1003,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, static int i915_dispatch_execbuffer(struct intel_ring_buffer *ring, - u32 offset, u32 len, - unsigned flags) + u32 offset, u32 len) { int ret; @@ -1013,7 +1012,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring, return ret; intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); - intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); intel_ring_advance(ring); return 0; @@ -1076,29 +1075,6 @@ static int init_status_page(struct intel_ring_buffer *ring) return ret; } -static int init_phys_hws_pga(struct intel_ring_buffer *ring) -{ - struct drm_i915_private *dev_priv = ring->dev->dev_private; - u32 addr; - - if (!dev_priv->status_page_dmah) { - dev_priv->status_page_dmah = - drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); - if (!dev_priv->status_page_dmah) - return -ENOMEM; - } - - addr = dev_priv->status_page_dmah->busaddr; - if (INTEL_INFO(ring->dev)->gen >= 4) - addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; - I915_WRITE(HWS_PGA, addr); - - ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; - memset(ring->status_page.page_addr, 0, PAGE_SIZE); - - return 0; -} - static int intel_init_ring_buffer(struct drm_device *dev, struct intel_ring_buffer *ring) { @@ -1117,11 +1093,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, ret = init_status_page(ring); if (ret) return ret; - } else { - BUG_ON(ring->id != RCS); - ret = init_phys_hws_pga(ring); - if (ret) - return ret; } obj = i915_gem_alloc_object(dev, ring->size); @@ -1420,48 +1391,19 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, return ret; cmd = MI_FLUSH_DW; - /* - * Bspec vol 1c.5 - video engine command streamer: - * "If ENABLED, all TLBs will be invalidated once the flush - * operation is complete. This bit is only valid when the - * Post-Sync Operation field is a value of 1h or 3h." - */ if (invalidate & I915_GEM_GPU_DOMAINS) - cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | - MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; intel_ring_emit(ring, cmd); - intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); + intel_ring_emit(ring, 0); intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); return 0; } -static int -hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, - u32 offset, u32 len, - unsigned flags) -{ - int ret; - - ret = intel_ring_begin(ring, 2); - if (ret) - return ret; - - intel_ring_emit(ring, - MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | - (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); - /* bit0-7 is the length on GEN6+ */ - intel_ring_emit(ring, offset); - intel_ring_advance(ring); - - return 0; -} - static int gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, - u32 offset, u32 len, - unsigned flags) + u32 offset, u32 len) { int ret; @@ -1469,9 +1411,7 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, if (ret) return ret; - intel_ring_emit(ring, - MI_BATCH_BUFFER_START | - (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); /* bit0-7 is the length on GEN6+ */ intel_ring_emit(ring, offset); intel_ring_advance(ring); @@ -1492,17 +1432,10 @@ static int blt_ring_flush(struct intel_ring_buffer *ring, return ret; cmd = MI_FLUSH_DW; - /* - * Bspec vol 1c.3 - blitter engine command streamer: - * "If ENABLED, all TLBs will be invalidated once the flush - * operation is complete. This bit is only valid when the - * Post-Sync Operation field is a value of 1h or 3h." - */ if (invalidate & I915_GEM_DOMAIN_RENDER) - cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | - MI_FLUSH_DW_OP_STOREDW; + cmd |= MI_INVALIDATE_TLB; intel_ring_emit(ring, cmd); - intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); + intel_ring_emit(ring, 0); intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); @@ -1557,9 +1490,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->irq_enable_mask = I915_USER_INTERRUPT; } ring->write_tail = ring_write_tail; - if (IS_HASWELL(dev)) - ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; - else if (INTEL_INFO(dev)->gen >= 6) + if (INTEL_INFO(dev)->gen >= 6) ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; else if (INTEL_INFO(dev)->gen >= 4) ring->dispatch_execbuffer = i965_dispatch_execbuffer; @@ -1570,6 +1501,12 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->init = init_render_ring; ring->cleanup = render_ring_cleanup; + + if (!I915_NEED_GFX_HWS(dev)) { + ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; + memset(ring->status_page.page_addr, 0, PAGE_SIZE); + } + return intel_init_ring_buffer(dev, ring); } @@ -1577,7 +1514,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; - int ret; ring->name = "render ring"; ring->id = RCS; @@ -1615,13 +1551,16 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ring->init = init_render_ring; ring->cleanup = render_ring_cleanup; + if (!I915_NEED_GFX_HWS(dev)) + ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; + ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); ring->size = size; ring->effective_size = ring->size; - if (IS_I830(ring->dev) || IS_845G(ring->dev)) + if (IS_I830(ring->dev)) ring->effective_size -= 128; ring->virtual_start = ioremap_wc(start, size); @@ -1631,12 +1570,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) return -ENOMEM; } - if (!I915_NEED_GFX_HWS(dev)) { - ret = init_phys_hws_pga(ring); - if (ret) - return ret; - } - return 0; } @@ -1685,6 +1618,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) } ring->init = init_ring_common; + return intel_init_ring_buffer(dev, ring); } diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h index 5af65b89765f..2ea7a311a1f0 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -81,9 +81,7 @@ struct intel_ring_buffer { u32 (*get_seqno)(struct intel_ring_buffer *ring, bool lazy_coherency); int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, - u32 offset, u32 length, - unsigned flags); -#define I915_DISPATCH_SECURE 0x1 + u32 offset, u32 length); void (*cleanup)(struct intel_ring_buffer *ring); int (*sync_to)(struct intel_ring_buffer *ring, struct intel_ring_buffer *to, @@ -183,8 +181,6 @@ intel_read_status_page(struct intel_ring_buffer *ring, * The area from dword 0x20 to 0x3ff is available for driver usage. */ #define I915_GEM_HWS_INDEX 0x20 -#define I915_GEM_HWS_SCRATCH_INDEX 0x30 -#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo.c b/trunk/drivers/gpu/drm/i915/intel_sdvo.c index a4bee83df745..a6ac0b416964 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sdvo.c +++ b/trunk/drivers/gpu/drm/i915/intel_sdvo.c @@ -1228,30 +1228,6 @@ static void intel_disable_sdvo(struct intel_encoder *encoder) temp = I915_READ(intel_sdvo->sdvo_reg); if ((temp & SDVO_ENABLE) != 0) { - /* HW workaround for IBX, we need to move the port to - * transcoder A before disabling it. */ - if (HAS_PCH_IBX(encoder->base.dev)) { - struct drm_crtc *crtc = encoder->base.crtc; - int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; - - if (temp & SDVO_PIPE_B_SELECT) { - temp &= ~SDVO_PIPE_B_SELECT; - I915_WRITE(intel_sdvo->sdvo_reg, temp); - POSTING_READ(intel_sdvo->sdvo_reg); - - /* Again we need to write this twice. */ - I915_WRITE(intel_sdvo->sdvo_reg, temp); - POSTING_READ(intel_sdvo->sdvo_reg); - - /* Transcoder selection bits only update - * effectively on vblank. */ - if (crtc) - intel_wait_for_vblank(encoder->base.dev, pipe); - else - msleep(50); - } - } - intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); } } @@ -1268,20 +1244,8 @@ static void intel_enable_sdvo(struct intel_encoder *encoder) u8 status; temp = I915_READ(intel_sdvo->sdvo_reg); - if ((temp & SDVO_ENABLE) == 0) { - /* HW workaround for IBX, we need to move the port - * to transcoder A before disabling it. */ - if (HAS_PCH_IBX(dev)) { - struct drm_crtc *crtc = encoder->base.crtc; - int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; - - /* Restore the transcoder select bit. */ - if (pipe == PIPE_B) - temp |= SDVO_PIPE_B_SELECT; - } - + if ((temp & SDVO_ENABLE) == 0) intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); - } for (i = 0; i < 2; i++) intel_wait_for_vblank(dev, intel_crtc->pipe); @@ -1832,7 +1796,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector) intel_sdvo_destroy_enhance_property(connector); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); - kfree(intel_sdvo_connector); + kfree(connector); } static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) @@ -1864,7 +1828,7 @@ intel_sdvo_set_property(struct drm_connector *connector, uint8_t cmd; int ret; - ret = drm_object_property_set_value(&connector->base, property, val); + ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; @@ -1919,7 +1883,7 @@ intel_sdvo_set_property(struct drm_connector *connector, } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { temp_value = val; if (intel_sdvo_connector->left == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, intel_sdvo_connector->right, val); if (intel_sdvo_connector->left_margin == temp_value) return 0; @@ -1931,7 +1895,7 @@ intel_sdvo_set_property(struct drm_connector *connector, cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (intel_sdvo_connector->right == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, intel_sdvo_connector->left, val); if (intel_sdvo_connector->right_margin == temp_value) return 0; @@ -1943,7 +1907,7 @@ intel_sdvo_set_property(struct drm_connector *connector, cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (intel_sdvo_connector->top == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, intel_sdvo_connector->bottom, val); if (intel_sdvo_connector->top_margin == temp_value) return 0; @@ -1955,7 +1919,7 @@ intel_sdvo_set_property(struct drm_connector *connector, cmd = SDVO_CMD_SET_OVERSCAN_V; goto set_value; } else if (intel_sdvo_connector->bottom == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, intel_sdvo_connector->top, val); if (intel_sdvo_connector->bottom_margin == temp_value) return 0; @@ -2108,24 +2072,17 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, else mapping = &dev_priv->sdvo_mappings[1]; - if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin)) + pin = GMBUS_PORT_DPB; + if (mapping->initialized) pin = mapping->i2c_pin; - else - pin = GMBUS_PORT_DPB; - - sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); - /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow - * our code totally fails once we start using gmbus. Hence fall back to - * bit banging for now. */ - intel_gmbus_force_bit(sdvo->i2c, true); -} - -/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */ -static void -intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo) -{ - intel_gmbus_force_bit(sdvo->i2c, false); + if (intel_gmbus_is_port_valid(pin)) { + sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); + intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ); + intel_gmbus_force_bit(sdvo->i2c, true); + } else { + sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); + } } static bool @@ -2244,7 +2201,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; intel_sdvo->is_hdmi = true; } - intel_sdvo->base.cloneable = true; intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (intel_sdvo->is_hdmi) @@ -2275,7 +2231,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) intel_sdvo->is_tv = true; intel_sdvo->base.needs_tv_clock = true; - intel_sdvo->base.cloneable = false; intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); @@ -2318,8 +2273,6 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; } - intel_sdvo->base.cloneable = true; - intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); return true; @@ -2350,9 +2303,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; } - /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */ - intel_sdvo->base.cloneable = false; - intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; @@ -2425,6 +2375,18 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) return true; } +static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo) +{ + struct drm_device *dev = intel_sdvo->base.base.dev; + struct drm_connector *connector, *tmp; + + list_for_each_entry_safe(connector, tmp, + &dev->mode_config.connector_list, head) { + if (intel_attached_encoder(connector) == &intel_sdvo->base) + intel_sdvo_destroy(connector); + } +} + static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, int type) @@ -2465,7 +2427,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; - drm_object_attach_property(&intel_sdvo_connector->base.base.base, + drm_connector_attach_property(&intel_sdvo_connector->base.base, intel_sdvo_connector->tv_format, 0); return true; @@ -2481,7 +2443,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, intel_sdvo_connector->name = \ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ if (!intel_sdvo_connector->name) return false; \ - drm_object_attach_property(&connector->base, \ + drm_connector_attach_property(connector, \ intel_sdvo_connector->name, \ intel_sdvo_connector->cur_##name); \ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ @@ -2518,7 +2480,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, if (!intel_sdvo_connector->left) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, intel_sdvo_connector->left, intel_sdvo_connector->left_margin); @@ -2527,7 +2489,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, if (!intel_sdvo_connector->right) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, intel_sdvo_connector->right, intel_sdvo_connector->right_margin); DRM_DEBUG_KMS("h_overscan: max %d, " @@ -2555,7 +2517,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, if (!intel_sdvo_connector->top) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, intel_sdvo_connector->top, intel_sdvo_connector->top_margin); @@ -2565,7 +2527,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, if (!intel_sdvo_connector->bottom) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, intel_sdvo_connector->bottom, intel_sdvo_connector->bottom_margin); DRM_DEBUG_KMS("v_overscan: max %d, " @@ -2597,7 +2559,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, if (!intel_sdvo_connector->dot_crawl) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, intel_sdvo_connector->dot_crawl, intel_sdvo_connector->cur_dot_crawl); DRM_DEBUG_KMS("dot crawl: current %d\n", response); @@ -2701,8 +2663,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) intel_sdvo->is_sdvob = is_sdvob; intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); - if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) - goto err_i2c_bus; + if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { + kfree(intel_sdvo); + return false; + } /* encoder type will be decided later */ intel_encoder = &intel_sdvo->base; @@ -2746,9 +2710,20 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) intel_sdvo->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on %s\n", SDVO_NAME(intel_sdvo)); - goto err; + /* Output_setup can leave behind connectors! */ + goto err_output; } + /* + * Cloning SDVO with anything is often impossible, since the SDVO + * encoder can request a special input timing mode. And even if that's + * not the case we have evidence that cloning a plain unscaled mode with + * VGA doesn't really work. Furthermore the cloning flags are way too + * simplistic anyway to express such constraints, so just give up on + * cloning for SDVO encoders. + */ + intel_sdvo->base.cloneable = false; + /* Only enable the hotplug irq if we need it, to work around noisy * hotplug lines. */ @@ -2759,12 +2734,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) - goto err; + goto err_output; if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, &intel_sdvo->pixel_clock_min, &intel_sdvo->pixel_clock_max)) - goto err; + goto err_output; DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " @@ -2784,11 +2759,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); return true; +err_output: + intel_sdvo_output_cleanup(intel_sdvo); + err: drm_encoder_cleanup(&intel_encoder->base); i2c_del_adapter(&intel_sdvo->ddc); -err_i2c_bus: - intel_sdvo_unselect_i2c_bus(intel_sdvo); kfree(intel_sdvo); return false; diff --git a/trunk/drivers/gpu/drm/i915/intel_sprite.c b/trunk/drivers/gpu/drm/i915/intel_sprite.c index 827dcd4edf1c..82f5e5c7009d 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sprite.c +++ b/trunk/drivers/gpu/drm/i915/intel_sprite.c @@ -48,8 +48,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, struct intel_plane *intel_plane = to_intel_plane(plane); int pipe = intel_plane->pipe; u32 sprctl, sprscale = 0; - unsigned long sprsurf_offset, linear_offset; - int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + int pixel_size; sprctl = I915_READ(SPRCTL(pipe)); @@ -62,24 +61,33 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; + pixel_size = 4; break; case DRM_FORMAT_XRGB8888: sprctl |= SPRITE_FORMAT_RGBX888; + pixel_size = 4; break; case DRM_FORMAT_YUYV: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV; + pixel_size = 2; break; case DRM_FORMAT_YVYU: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU; + pixel_size = 2; break; case DRM_FORMAT_UYVY: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY; + pixel_size = 2; break; case DRM_FORMAT_VYUY: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY; + pixel_size = 2; break; default: - BUG(); + DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); + sprctl |= SPRITE_FORMAT_RGBX888; + pixel_size = 4; + break; } if (obj->tiling_mode != I915_TILING_NONE) @@ -119,28 +127,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); - - linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); - sprsurf_offset = - intel_gen4_compute_offset_xtiled(&x, &y, - fb->bits_per_pixel / 8, - fb->pitches[0]); - linear_offset -= sprsurf_offset; - - /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET - * register */ - if (IS_HASWELL(dev)) - I915_WRITE(SPROFFSET(pipe), (y << 16) | x); - else if (obj->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) { I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); - else - I915_WRITE(SPRLINOFF(pipe), linear_offset); + } else { + unsigned long offset; + offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); + I915_WRITE(SPRLINOFF(pipe), offset); + } I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); - if (intel_plane->can_scale) - I915_WRITE(SPRSCALE(pipe), sprscale); + I915_WRITE(SPRSCALE(pipe), sprscale); I915_WRITE(SPRCTL(pipe), sprctl); - I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); + I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset); POSTING_READ(SPRSURF(pipe)); } @@ -154,8 +152,7 @@ ivb_disable_plane(struct drm_plane *plane) I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); /* Can't leave the scaler enabled... */ - if (intel_plane->can_scale) - I915_WRITE(SPRSCALE(pipe), 0); + I915_WRITE(SPRSCALE(pipe), 0); /* Activate double buffered register update */ I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); POSTING_READ(SPRSURF(pipe)); @@ -228,10 +225,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); - int pipe = intel_plane->pipe; - unsigned long dvssurf_offset, linear_offset; + int pipe = intel_plane->pipe, pixel_size; u32 dvscntr, dvsscale; - int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); dvscntr = I915_READ(DVSCNTR(pipe)); @@ -244,24 +239,33 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR; + pixel_size = 4; break; case DRM_FORMAT_XRGB8888: dvscntr |= DVS_FORMAT_RGBX888; + pixel_size = 4; break; case DRM_FORMAT_YUYV: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV; + pixel_size = 2; break; case DRM_FORMAT_YVYU: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU; + pixel_size = 2; break; case DRM_FORMAT_UYVY: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY; + pixel_size = 2; break; case DRM_FORMAT_VYUY: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY; + pixel_size = 2; break; default: - BUG(); + DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); + dvscntr |= DVS_FORMAT_RGBX888; + pixel_size = 4; + break; } if (obj->tiling_mode != I915_TILING_NONE) @@ -285,23 +289,18 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); - - linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); - dvssurf_offset = - intel_gen4_compute_offset_xtiled(&x, &y, - fb->bits_per_pixel / 8, - fb->pitches[0]); - linear_offset -= dvssurf_offset; - - if (obj->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) { I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); - else - I915_WRITE(DVSLINOFF(pipe), linear_offset); + } else { + unsigned long offset; + offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); + I915_WRITE(DVSLINOFF(pipe), offset); + } I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); I915_WRITE(DVSSCALE(pipe), dvsscale); I915_WRITE(DVSCNTR(pipe), dvscntr); - I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset); + I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset); POSTING_READ(DVSSURF(pipe)); } @@ -423,8 +422,6 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj, *old_obj; int pipe = intel_plane->pipe; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); int ret = 0; int x = src_x >> 16, y = src_y >> 16; int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay; @@ -439,7 +436,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, src_h = src_h >> 16; /* Pipe must be running... */ - if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) + if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE)) return -EINVAL; if (crtc_x >= primary_w || crtc_y >= primary_h) @@ -449,15 +446,6 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (intel_plane->pipe != intel_crtc->pipe) return -EINVAL; - /* Sprite planes can be linear or x-tiled surfaces */ - switch (obj->tiling_mode) { - case I915_TILING_NONE: - case I915_TILING_X: - break; - default: - return -EINVAL; - } - /* * Clamp the width & height into the visible area. Note we don't * try to scale the source if part of the visible region is offscreen. @@ -484,12 +472,6 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (!crtc_w || !crtc_h) /* Again, nothing to display */ goto out; - /* - * We may not have a scaler, eg. HSW does not have it any more - */ - if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h)) - return -EINVAL; - /* * We can take a larger source and scale it down, but * only so much... 16x is the max on SNB. @@ -683,7 +665,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe) switch (INTEL_INFO(dev)->gen) { case 5: case 6: - intel_plane->can_scale = true; intel_plane->max_downscale = 16; intel_plane->update_plane = ilk_update_plane; intel_plane->disable_plane = ilk_disable_plane; @@ -700,10 +681,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe) break; case 7: - if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev)) - intel_plane->can_scale = false; - else - intel_plane->can_scale = true; intel_plane->max_downscale = 2; intel_plane->update_plane = ivb_update_plane; intel_plane->disable_plane = ivb_disable_plane; diff --git a/trunk/drivers/gpu/drm/i915/intel_tv.c b/trunk/drivers/gpu/drm/i915/intel_tv.c index ea93520c1278..62bb048c135e 100644 --- a/trunk/drivers/gpu/drm/i915/intel_tv.c +++ b/trunk/drivers/gpu/drm/i915/intel_tv.c @@ -1088,11 +1088,13 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, int dspcntr_reg = DSPCNTR(intel_crtc->plane); int pipeconf = I915_READ(pipeconf_reg); int dspcntr = I915_READ(dspcntr_reg); + int dspbase_reg = DSPADDR(intel_crtc->plane); int xpos = 0x0, ypos = 0x0; unsigned int xsize, ysize; /* Pipe must be off here */ I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); - intel_flush_display_plane(dev_priv, intel_crtc->plane); + /* Flush the plane changes */ + I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); /* Wait for vblank for the disable to take effect */ if (IS_GEN2(dev)) @@ -1121,7 +1123,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, I915_WRITE(pipeconf_reg, pipeconf); I915_WRITE(dspcntr_reg, dspcntr); - intel_flush_display_plane(dev_priv, intel_crtc->plane); + /* Flush the plane changes */ + I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); } j = 0; @@ -1289,7 +1292,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector) } intel_tv->tv_format = tv_mode->name; - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, connector->dev->mode_config.tv_mode_property, i); } @@ -1443,7 +1446,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop int ret = 0; bool changed = false; - ret = drm_object_property_set_value(&connector->base, property, val); + ret = drm_connector_property_set_value(connector, property, val); if (ret < 0) goto out; @@ -1655,18 +1658,18 @@ intel_tv_init(struct drm_device *dev) ARRAY_SIZE(tv_modes), tv_format_names); - drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property, + drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, initial_mode); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.tv_left_margin_property, intel_tv->margin[TV_MARGIN_LEFT]); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.tv_top_margin_property, intel_tv->margin[TV_MARGIN_TOP]); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.tv_right_margin_property, intel_tv->margin[TV_MARGIN_RIGHT]); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.tv_bottom_margin_property, intel_tv->margin[TV_MARGIN_BOTTOM]); drm_sysfs_connector_add(connector); diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_main.c b/trunk/drivers/gpu/drm/mgag200/mgag200_main.c index 70dd3c5529d4..d6a1aae33701 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_main.c @@ -133,8 +133,6 @@ static int mga_vram_init(struct mga_device *mdev) { void __iomem *mem; struct apertures_struct *aper = alloc_apertures(1); - if (!aper) - return -ENOMEM; /* BAR 0 is VRAM */ mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0); @@ -142,9 +140,9 @@ static int mga_vram_init(struct mga_device *mdev) aper->ranges[0].base = mdev->mc.vram_base; aper->ranges[0].size = mdev->mc.vram_window; + aper->count = 1; remove_conflicting_framebuffers(aper, "mgafb", true); - kfree(aper); if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window, "mgadrmfb_vram")) { diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_ttm.c b/trunk/drivers/gpu/drm/mgag200/mgag200_ttm.c index 49d60a620122..1504699666c4 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -355,7 +355,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align, ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size, ttm_bo_type_device, &mgabo->placement, - align >> PAGE_SHIFT, false, NULL, acc_size, + align >> PAGE_SHIFT, 0, false, NULL, acc_size, NULL, mgag200_bo_ttm_destroy); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/Makefile b/trunk/drivers/gpu/drm/nouveau/Makefile index ab25752a0b1e..a990df4d6c04 100644 --- a/trunk/drivers/gpu/drm/nouveau/Makefile +++ b/trunk/drivers/gpu/drm/nouveau/Makefile @@ -11,7 +11,6 @@ nouveau-y := core/core/client.o nouveau-y += core/core/engctx.o nouveau-y += core/core/engine.o nouveau-y += core/core/enum.o -nouveau-y += core/core/falcon.o nouveau-y += core/core/gpuobj.o nouveau-y += core/core/handle.o nouveau-y += core/core/mm.o @@ -30,7 +29,6 @@ nouveau-y += core/subdev/bios/base.o nouveau-y += core/subdev/bios/bit.o nouveau-y += core/subdev/bios/conn.o nouveau-y += core/subdev/bios/dcb.o -nouveau-y += core/subdev/bios/disp.o nouveau-y += core/subdev/bios/dp.o nouveau-y += core/subdev/bios/extdev.o nouveau-y += core/subdev/bios/gpio.o @@ -66,19 +64,9 @@ nouveau-y += core/subdev/devinit/nv50.o nouveau-y += core/subdev/fb/base.o nouveau-y += core/subdev/fb/nv04.o nouveau-y += core/subdev/fb/nv10.o -nouveau-y += core/subdev/fb/nv1a.o nouveau-y += core/subdev/fb/nv20.o -nouveau-y += core/subdev/fb/nv25.o nouveau-y += core/subdev/fb/nv30.o -nouveau-y += core/subdev/fb/nv35.o -nouveau-y += core/subdev/fb/nv36.o nouveau-y += core/subdev/fb/nv40.o -nouveau-y += core/subdev/fb/nv41.o -nouveau-y += core/subdev/fb/nv44.o -nouveau-y += core/subdev/fb/nv46.o -nouveau-y += core/subdev/fb/nv47.o -nouveau-y += core/subdev/fb/nv49.o -nouveau-y += core/subdev/fb/nv4e.o nouveau-y += core/subdev/fb/nv50.o nouveau-y += core/subdev/fb/nvc0.o nouveau-y += core/subdev/gpio/base.o @@ -123,10 +111,7 @@ nouveau-y += core/engine/dmaobj/base.o nouveau-y += core/engine/dmaobj/nv04.o nouveau-y += core/engine/dmaobj/nv50.o nouveau-y += core/engine/dmaobj/nvc0.o -nouveau-y += core/engine/dmaobj/nvd0.o nouveau-y += core/engine/bsp/nv84.o -nouveau-y += core/engine/bsp/nvc0.o -nouveau-y += core/engine/bsp/nve0.o nouveau-y += core/engine/copy/nva3.o nouveau-y += core/engine/copy/nvc0.o nouveau-y += core/engine/copy/nve0.o @@ -134,21 +119,7 @@ nouveau-y += core/engine/crypt/nv84.o nouveau-y += core/engine/crypt/nv98.o nouveau-y += core/engine/disp/nv04.o nouveau-y += core/engine/disp/nv50.o -nouveau-y += core/engine/disp/nv84.o -nouveau-y += core/engine/disp/nv94.o -nouveau-y += core/engine/disp/nva0.o -nouveau-y += core/engine/disp/nva3.o nouveau-y += core/engine/disp/nvd0.o -nouveau-y += core/engine/disp/nve0.o -nouveau-y += core/engine/disp/dacnv50.o -nouveau-y += core/engine/disp/hdanva3.o -nouveau-y += core/engine/disp/hdanvd0.o -nouveau-y += core/engine/disp/hdminv84.o -nouveau-y += core/engine/disp/hdminva3.o -nouveau-y += core/engine/disp/hdminvd0.o -nouveau-y += core/engine/disp/sornv50.o -nouveau-y += core/engine/disp/sornv94.o -nouveau-y += core/engine/disp/sornvd0.o nouveau-y += core/engine/disp/vga.o nouveau-y += core/engine/fifo/base.o nouveau-y += core/engine/fifo/nv04.o @@ -180,14 +151,11 @@ nouveau-y += core/engine/mpeg/nv40.o nouveau-y += core/engine/mpeg/nv50.o nouveau-y += core/engine/mpeg/nv84.o nouveau-y += core/engine/ppp/nv98.o -nouveau-y += core/engine/ppp/nvc0.o nouveau-y += core/engine/software/nv04.o nouveau-y += core/engine/software/nv10.o nouveau-y += core/engine/software/nv50.o nouveau-y += core/engine/software/nvc0.o nouveau-y += core/engine/vp/nv84.o -nouveau-y += core/engine/vp/nvc0.o -nouveau-y += core/engine/vp/nve0.o # drm/core nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o @@ -198,7 +166,7 @@ nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o # drm/kms nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o -nouveau-y += nouveau_connector.o nouveau_dp.o +nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o # drm/kms/nv04:nv50 @@ -207,7 +175,9 @@ nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o # drm/kms/nv50- -nouveau-y += nv50_display.o +nouveau-y += nv50_display.o nvd0_display.o +nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o +nouveau-y += nv50_evo.o # drm/pm nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o diff --git a/trunk/drivers/gpu/drm/nouveau/core/core/engctx.c b/trunk/drivers/gpu/drm/nouveau/core/core/engctx.c index 84c71fad2b6c..e41b10d5eb59 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/core/engctx.c +++ b/trunk/drivers/gpu/drm/nouveau/core/core/engctx.c @@ -189,21 +189,6 @@ nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend) return nouveau_gpuobj_fini(&engctx->base, suspend); } -int -_nouveau_engctx_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nouveau_engctx *engctx; - int ret; - - ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256, - NVOBJ_FLAG_ZERO_ALLOC, &engctx); - *pobject = nv_object(engctx); - return ret; -} - void _nouveau_engctx_dtor(struct nouveau_object *object) { diff --git a/trunk/drivers/gpu/drm/nouveau/core/core/falcon.c b/trunk/drivers/gpu/drm/nouveau/core/core/falcon.c deleted file mode 100644 index 6b0843c33877..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/core/falcon.c +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include - -#include - -u32 -_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) -{ - struct nouveau_falcon *falcon = (void *)object; - return nv_rd32(falcon, falcon->addr + addr); -} - -void -_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data) -{ - struct nouveau_falcon *falcon = (void *)object; - nv_wr32(falcon, falcon->addr + addr, data); -} - -int -_nouveau_falcon_init(struct nouveau_object *object) -{ - struct nouveau_device *device = nv_device(object); - struct nouveau_falcon *falcon = (void *)object; - const struct firmware *fw; - char name[32] = "internal"; - int ret, i; - u32 caps; - - /* enable engine, and determine its capabilities */ - ret = nouveau_engine_init(&falcon->base); - if (ret) - return ret; - - if (device->chipset < 0xa3 || - device->chipset == 0xaa || device->chipset == 0xac) { - falcon->version = 0; - falcon->secret = (falcon->addr == 0x087000) ? 1 : 0; - } else { - caps = nv_ro32(falcon, 0x12c); - falcon->version = (caps & 0x0000000f); - falcon->secret = (caps & 0x00000030) >> 4; - } - - caps = nv_ro32(falcon, 0x108); - falcon->code.limit = (caps & 0x000001ff) << 8; - falcon->data.limit = (caps & 0x0003fe00) >> 1; - - nv_debug(falcon, "falcon version: %d\n", falcon->version); - nv_debug(falcon, "secret level: %d\n", falcon->secret); - nv_debug(falcon, "code limit: %d\n", falcon->code.limit); - nv_debug(falcon, "data limit: %d\n", falcon->data.limit); - - /* wait for 'uc halted' to be signalled before continuing */ - if (falcon->secret) { - nv_wait(falcon, 0x008, 0x00000010, 0x00000010); - nv_wo32(falcon, 0x004, 0x00000010); - } - - /* disable all interrupts */ - nv_wo32(falcon, 0x014, 0xffffffff); - - /* no default ucode provided by the engine implementation, try and - * locate a "self-bootstrapping" firmware image for the engine - */ - if (!falcon->code.data) { - snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x", - device->chipset, falcon->addr >> 12); - - ret = request_firmware(&fw, name, &device->pdev->dev); - if (ret == 0) { - falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL); - falcon->code.size = fw->size; - falcon->data.data = NULL; - falcon->data.size = 0; - release_firmware(fw); - } - - falcon->external = true; - } - - /* next step is to try and load "static code/data segment" firmware - * images for the engine - */ - if (!falcon->code.data) { - snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd", - device->chipset, falcon->addr >> 12); - - ret = request_firmware(&fw, name, &device->pdev->dev); - if (ret) { - nv_error(falcon, "unable to load firmware data\n"); - return ret; - } - - falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL); - falcon->data.size = fw->size; - release_firmware(fw); - if (!falcon->data.data) - return -ENOMEM; - - snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc", - device->chipset, falcon->addr >> 12); - - ret = request_firmware(&fw, name, &device->pdev->dev); - if (ret) { - nv_error(falcon, "unable to load firmware code\n"); - return ret; - } - - falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL); - falcon->code.size = fw->size; - release_firmware(fw); - if (!falcon->code.data) - return -ENOMEM; - } - - nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ? - "static code/data segments" : "self-bootstrapping"); - - /* ensure any "self-bootstrapping" firmware image is in vram */ - if (!falcon->data.data && !falcon->core) { - ret = nouveau_gpuobj_new(object->parent, NULL, - falcon->code.size, 256, 0, - &falcon->core); - if (ret) { - nv_error(falcon, "core allocation failed, %d\n", ret); - return ret; - } - - for (i = 0; i < falcon->code.size; i += 4) - nv_wo32(falcon->core, i, falcon->code.data[i / 4]); - } - - /* upload firmware bootloader (or the full code segments) */ - if (falcon->core) { - if (device->card_type < NV_C0) - nv_wo32(falcon, 0x618, 0x04000000); - else - nv_wo32(falcon, 0x618, 0x00000114); - nv_wo32(falcon, 0x11c, 0); - nv_wo32(falcon, 0x110, falcon->core->addr >> 8); - nv_wo32(falcon, 0x114, 0); - nv_wo32(falcon, 0x118, 0x00006610); - } else { - if (falcon->code.size > falcon->code.limit || - falcon->data.size > falcon->data.limit) { - nv_error(falcon, "ucode exceeds falcon limit(s)\n"); - return -EINVAL; - } - - if (falcon->version < 3) { - nv_wo32(falcon, 0xff8, 0x00100000); - for (i = 0; i < falcon->code.size / 4; i++) - nv_wo32(falcon, 0xff4, falcon->code.data[i]); - } else { - nv_wo32(falcon, 0x180, 0x01000000); - for (i = 0; i < falcon->code.size / 4; i++) { - if ((i & 0x3f) == 0) - nv_wo32(falcon, 0x188, i >> 6); - nv_wo32(falcon, 0x184, falcon->code.data[i]); - } - } - } - - /* upload data segment (if necessary), zeroing the remainder */ - if (falcon->version < 3) { - nv_wo32(falcon, 0xff8, 0x00000000); - for (i = 0; !falcon->core && i < falcon->data.size / 4; i++) - nv_wo32(falcon, 0xff4, falcon->data.data[i]); - for (; i < falcon->data.limit; i += 4) - nv_wo32(falcon, 0xff4, 0x00000000); - } else { - nv_wo32(falcon, 0x1c0, 0x01000000); - for (i = 0; !falcon->core && i < falcon->data.size / 4; i++) - nv_wo32(falcon, 0x1c4, falcon->data.data[i]); - for (; i < falcon->data.limit / 4; i++) - nv_wo32(falcon, 0x1c4, 0x00000000); - } - - /* start it running */ - nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */ - nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */ - nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */ - nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */ - return 0; -} - -int -_nouveau_falcon_fini(struct nouveau_object *object, bool suspend) -{ - struct nouveau_falcon *falcon = (void *)object; - - if (!suspend) { - nouveau_gpuobj_ref(NULL, &falcon->core); - if (falcon->external) { - kfree(falcon->data.data); - kfree(falcon->code.data); - falcon->code.data = NULL; - } - } - - nv_mo32(falcon, 0x048, 0x00000003, 0x00000000); - nv_wo32(falcon, 0x014, 0xffffffff); - - return nouveau_engine_fini(&falcon->base, suspend); -} - -int -nouveau_falcon_create_(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, u32 addr, bool enable, - const char *iname, const char *fname, - int length, void **pobject) -{ - struct nouveau_falcon *falcon; - int ret; - - ret = nouveau_engine_create_(parent, engine, oclass, enable, iname, - fname, length, pobject); - falcon = *pobject; - if (ret) - return ret; - - falcon->addr = addr; - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/trunk/drivers/gpu/drm/nouveau/core/core/gpuobj.c index 560b2214cf1c..70586fde69cf 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/core/gpuobj.c +++ b/trunk/drivers/gpu/drm/nouveau/core/core/gpuobj.c @@ -183,7 +183,7 @@ _nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend) } u32 -_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr) +_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr) { struct nouveau_gpuobj *gpuobj = nv_gpuobj(object); struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); @@ -193,7 +193,7 @@ _nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr) } void -_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data) +_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data) { struct nouveau_gpuobj *gpuobj = nv_gpuobj(object); struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); diff --git a/trunk/drivers/gpu/drm/nouveau/core/core/mm.c b/trunk/drivers/gpu/drm/nouveau/core/core/mm.c index 0261a11b2ae0..a6d3cd6490f7 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/core/mm.c +++ b/trunk/drivers/gpu/drm/nouveau/core/core/mm.c @@ -234,18 +234,15 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block) int nouveau_mm_fini(struct nouveau_mm *mm) { - if (nouveau_mm_initialised(mm)) { - struct nouveau_mm_node *node, *heap = - list_first_entry(&mm->nodes, typeof(*heap), nl_entry); - int nodes = 0; - - list_for_each_entry(node, &mm->nodes, nl_entry) { - if (WARN_ON(nodes++ == mm->heap_nodes)) - return -EBUSY; - } + struct nouveau_mm_node *node, *heap = + list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry); + int nodes = 0; - kfree(heap); + list_for_each_entry(node, &mm->nodes, nl_entry) { + if (WARN_ON(nodes++ == mm->heap_nodes)) + return -EBUSY; } + kfree(heap); return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c index 1d9f614cb97d..66f7dfd907ee 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c @@ -22,13 +22,18 @@ * Authors: Ben Skeggs */ -#include +#include #include +#include #include struct nv84_bsp_priv { - struct nouveau_engine base; + struct nouveau_bsp base; +}; + +struct nv84_bsp_chan { + struct nouveau_bsp_chan base; }; /******************************************************************************* @@ -44,16 +49,61 @@ nv84_bsp_sclass[] = { * BSP context ******************************************************************************/ +static int +nv84_bsp_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv84_bsp_chan *priv; + int ret; + + ret = nouveau_bsp_context_create(parent, engine, oclass, NULL, + 0, 0, 0, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +static void +nv84_bsp_context_dtor(struct nouveau_object *object) +{ + struct nv84_bsp_chan *priv = (void *)object; + nouveau_bsp_context_destroy(&priv->base); +} + +static int +nv84_bsp_context_init(struct nouveau_object *object) +{ + struct nv84_bsp_chan *priv = (void *)object; + int ret; + + ret = nouveau_bsp_context_init(&priv->base); + if (ret) + return ret; + + return 0; +} + +static int +nv84_bsp_context_fini(struct nouveau_object *object, bool suspend) +{ + struct nv84_bsp_chan *priv = (void *)object; + return nouveau_bsp_context_fini(&priv->base, suspend); +} + static struct nouveau_oclass nv84_bsp_cclass = { .handle = NV_ENGCTX(BSP, 0x84), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_engctx_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, + .ctor = nv84_bsp_context_ctor, + .dtor = nv84_bsp_context_dtor, + .init = nv84_bsp_context_init, + .fini = nv84_bsp_context_fini, + .rd32 = _nouveau_bsp_context_rd32, + .wr32 = _nouveau_bsp_context_wr32, }, }; @@ -61,6 +111,11 @@ nv84_bsp_cclass = { * BSP engine/subdev functions ******************************************************************************/ +static void +nv84_bsp_intr(struct nouveau_subdev *subdev) +{ +} + static int nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -69,25 +124,52 @@ nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv84_bsp_priv *priv; int ret; - ret = nouveau_engine_create(parent, engine, oclass, true, - "PBSP", "bsp", &priv); + ret = nouveau_bsp_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x04008000; + nv_subdev(priv)->intr = nv84_bsp_intr; nv_engine(priv)->cclass = &nv84_bsp_cclass; nv_engine(priv)->sclass = nv84_bsp_sclass; return 0; } +static void +nv84_bsp_dtor(struct nouveau_object *object) +{ + struct nv84_bsp_priv *priv = (void *)object; + nouveau_bsp_destroy(&priv->base); +} + +static int +nv84_bsp_init(struct nouveau_object *object) +{ + struct nv84_bsp_priv *priv = (void *)object; + int ret; + + ret = nouveau_bsp_init(&priv->base); + if (ret) + return ret; + + return 0; +} + +static int +nv84_bsp_fini(struct nouveau_object *object, bool suspend) +{ + struct nv84_bsp_priv *priv = (void *)object; + return nouveau_bsp_fini(&priv->base, suspend); +} + struct nouveau_oclass nv84_bsp_oclass = { .handle = NV_ENGINE(BSP, 0x84), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv84_bsp_ctor, - .dtor = _nouveau_engine_dtor, - .init = _nouveau_engine_init, - .fini = _nouveau_engine_fini, + .dtor = nv84_bsp_dtor, + .init = nv84_bsp_init, + .fini = nv84_bsp_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c deleted file mode 100644 index 0a5aa6bb0870..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2012 Maarten Lankhorst - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Maarten Lankhorst - */ - -#include - -#include - -struct nvc0_bsp_priv { - struct nouveau_falcon base; -}; - -/******************************************************************************* - * BSP object classes - ******************************************************************************/ - -static struct nouveau_oclass -nvc0_bsp_sclass[] = { - { 0x90b1, &nouveau_object_ofuncs }, - {}, -}; - -/******************************************************************************* - * PBSP context - ******************************************************************************/ - -static struct nouveau_oclass -nvc0_bsp_cclass = { - .handle = NV_ENGCTX(BSP, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, - }, -}; - -/******************************************************************************* - * PBSP engine/subdev functions - ******************************************************************************/ - -static int -nvc0_bsp_init(struct nouveau_object *object) -{ - struct nvc0_bsp_priv *priv = (void *)object; - int ret; - - ret = nouveau_falcon_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x084010, 0x0000fff2); - nv_wr32(priv, 0x08401c, 0x0000fff2); - return 0; -} - -static int -nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nvc0_bsp_priv *priv; - int ret; - - ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true, - "PBSP", "bsp", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_subdev(priv)->unit = 0x00008000; - nv_engine(priv)->cclass = &nvc0_bsp_cclass; - nv_engine(priv)->sclass = nvc0_bsp_sclass; - return 0; -} - -struct nouveau_oclass -nvc0_bsp_oclass = { - .handle = NV_ENGINE(BSP, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nvc0_bsp_ctor, - .dtor = _nouveau_falcon_dtor, - .init = nvc0_bsp_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c deleted file mode 100644 index d4f23bbd75b4..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include - -#include - -struct nve0_bsp_priv { - struct nouveau_falcon base; -}; - -/******************************************************************************* - * BSP object classes - ******************************************************************************/ - -static struct nouveau_oclass -nve0_bsp_sclass[] = { - { 0x95b1, &nouveau_object_ofuncs }, - {}, -}; - -/******************************************************************************* - * PBSP context - ******************************************************************************/ - -static struct nouveau_oclass -nve0_bsp_cclass = { - .handle = NV_ENGCTX(BSP, 0xe0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, - }, -}; - -/******************************************************************************* - * PBSP engine/subdev functions - ******************************************************************************/ - -static int -nve0_bsp_init(struct nouveau_object *object) -{ - struct nve0_bsp_priv *priv = (void *)object; - int ret; - - ret = nouveau_falcon_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x084010, 0x0000fff2); - nv_wr32(priv, 0x08401c, 0x0000fff2); - return 0; -} - -static int -nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nve0_bsp_priv *priv; - int ret; - - ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true, - "PBSP", "bsp", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_subdev(priv)->unit = 0x00008000; - nv_engine(priv)->cclass = &nve0_bsp_cclass; - nv_engine(priv)->sclass = nve0_bsp_sclass; - return 0; -} - -struct nouveau_oclass -nve0_bsp_oclass = { - .handle = NV_ENGINE(BSP, 0xe0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nve0_bsp_ctor, - .dtor = _nouveau_falcon_dtor, - .init = nve0_bsp_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c index 283248c7b050..4df6da0af740 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c @@ -22,9 +22,10 @@ * Authors: Ben Skeggs */ -#include -#include +#include #include +#include +#include #include #include @@ -35,7 +36,11 @@ #include "fuc/nva3.fuc.h" struct nva3_copy_priv { - struct nouveau_falcon base; + struct nouveau_copy base; +}; + +struct nva3_copy_chan { + struct nouveau_copy_chan base; }; /******************************************************************************* @@ -52,16 +57,34 @@ nva3_copy_sclass[] = { * PCOPY context ******************************************************************************/ +static int +nva3_copy_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nva3_copy_chan *priv; + int ret; + + ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0, + NVOBJ_FLAG_ZERO_ALLOC, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + static struct nouveau_oclass nva3_copy_cclass = { .handle = NV_ENGCTX(COPY0, 0xa3), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, + .ctor = nva3_copy_context_ctor, + .dtor = _nouveau_copy_context_dtor, + .init = _nouveau_copy_context_init, + .fini = _nouveau_copy_context_fini, + .rd32 = _nouveau_copy_context_rd32, + .wr32 = _nouveau_copy_context_wr32, }, }; @@ -77,40 +100,41 @@ static const struct nouveau_enum nva3_copy_isr_error_name[] = { {} }; -void +static void nva3_copy_intr(struct nouveau_subdev *subdev) { struct nouveau_fifo *pfifo = nouveau_fifo(subdev); struct nouveau_engine *engine = nv_engine(subdev); - struct nouveau_falcon *falcon = (void *)subdev; struct nouveau_object *engctx; - u32 dispatch = nv_ro32(falcon, 0x01c); - u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16); - u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff; - u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff; - u32 addr = nv_ro32(falcon, 0x040) >> 16; + struct nva3_copy_priv *priv = (void *)subdev; + u32 dispatch = nv_rd32(priv, 0x10401c); + u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16); + u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff; + u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff; + u32 addr = nv_rd32(priv, 0x104040) >> 16; u32 mthd = (addr & 0x07ff) << 2; u32 subc = (addr & 0x3800) >> 11; - u32 data = nv_ro32(falcon, 0x044); + u32 data = nv_rd32(priv, 0x104044); int chid; engctx = nouveau_engctx_get(engine, inst); chid = pfifo->chid(pfifo, engctx); if (stat & 0x00000040) { - nv_error(falcon, "DISPATCH_ERROR ["); + nv_error(priv, "DISPATCH_ERROR ["); nouveau_enum_print(nva3_copy_isr_error_name, ssta); printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n", chid, inst << 12, subc, mthd, data); - nv_wo32(falcon, 0x004, 0x00000040); + nv_wr32(priv, 0x104004, 0x00000040); stat &= ~0x00000040; } if (stat) { - nv_error(falcon, "unhandled intr 0x%08x\n", stat); - nv_wo32(falcon, 0x004, stat); + nv_error(priv, "unhandled intr 0x%08x\n", stat); + nv_wr32(priv, 0x104004, stat); } + nv50_fb_trap(nouveau_fb(priv), 1); nouveau_engctx_put(engctx); } @@ -130,8 +154,7 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nva3_copy_priv *priv; int ret; - ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable, - "PCE0", "copy0", &priv); + ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv); *pobject = nv_object(priv); if (ret) return ret; @@ -141,22 +164,59 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine, nv_engine(priv)->cclass = &nva3_copy_cclass; nv_engine(priv)->sclass = nva3_copy_sclass; nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush; - nv_falcon(priv)->code.data = nva3_pcopy_code; - nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code); - nv_falcon(priv)->data.data = nva3_pcopy_data; - nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data); return 0; } +static int +nva3_copy_init(struct nouveau_object *object) +{ + struct nva3_copy_priv *priv = (void *)object; + int ret, i; + + ret = nouveau_copy_init(&priv->base); + if (ret) + return ret; + + /* disable all interrupts */ + nv_wr32(priv, 0x104014, 0xffffffff); + + /* upload ucode */ + nv_wr32(priv, 0x1041c0, 0x01000000); + for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++) + nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]); + + nv_wr32(priv, 0x104180, 0x01000000); + for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) { + if ((i & 0x3f) == 0) + nv_wr32(priv, 0x104188, i >> 6); + nv_wr32(priv, 0x104184, nva3_pcopy_code[i]); + } + + /* start it running */ + nv_wr32(priv, 0x10410c, 0x00000000); + nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */ + nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */ + return 0; +} + +static int +nva3_copy_fini(struct nouveau_object *object, bool suspend) +{ + struct nva3_copy_priv *priv = (void *)object; + + nv_mask(priv, 0x104048, 0x00000003, 0x00000000); + nv_wr32(priv, 0x104014, 0xffffffff); + + return nouveau_copy_fini(&priv->base, suspend); +} + struct nouveau_oclass nva3_copy_oclass = { .handle = NV_ENGINE(COPY0, 0xa3), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nva3_copy_ctor, - .dtor = _nouveau_falcon_dtor, - .init = _nouveau_falcon_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, + .dtor = _nouveau_copy_dtor, + .init = nva3_copy_init, + .fini = nva3_copy_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c index b3ed2737e21f..06d4a8791055 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c @@ -22,9 +22,10 @@ * Authors: Ben Skeggs */ -#include -#include +#include #include +#include +#include #include #include @@ -32,7 +33,11 @@ #include "fuc/nvc0.fuc.h" struct nvc0_copy_priv { - struct nouveau_falcon base; + struct nouveau_copy base; +}; + +struct nvc0_copy_chan { + struct nouveau_copy_chan base; }; /******************************************************************************* @@ -55,14 +60,32 @@ nvc0_copy1_sclass[] = { * PCOPY context ******************************************************************************/ +static int +nvc0_copy_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_copy_chan *priv; + int ret; + + ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, + 256, NVOBJ_FLAG_ZERO_ALLOC, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + static struct nouveau_ofuncs nvc0_copy_context_ofuncs = { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, + .ctor = nvc0_copy_context_ctor, + .dtor = _nouveau_copy_context_dtor, + .init = _nouveau_copy_context_init, + .fini = _nouveau_copy_context_fini, + .rd32 = _nouveau_copy_context_rd32, + .wr32 = _nouveau_copy_context_wr32, }; static struct nouveau_oclass @@ -81,18 +104,50 @@ nvc0_copy1_cclass = { * PCOPY engine/subdev functions ******************************************************************************/ -static int -nvc0_copy_init(struct nouveau_object *object) +static const struct nouveau_enum nvc0_copy_isr_error_name[] = { + { 0x0001, "ILLEGAL_MTHD" }, + { 0x0002, "INVALID_ENUM" }, + { 0x0003, "INVALID_BITFIELD" }, + {} +}; + +static void +nvc0_copy_intr(struct nouveau_subdev *subdev) { - struct nvc0_copy_priv *priv = (void *)object; - int ret; + struct nouveau_fifo *pfifo = nouveau_fifo(subdev); + struct nouveau_engine *engine = nv_engine(subdev); + struct nouveau_object *engctx; + int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0; + struct nvc0_copy_priv *priv = (void *)subdev; + u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000)); + u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000)); + u32 stat = intr & disp & ~(disp >> 16); + u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff; + u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff; + u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16; + u32 mthd = (addr & 0x07ff) << 2; + u32 subc = (addr & 0x3800) >> 11; + u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000)); + int chid; - ret = nouveau_falcon_init(&priv->base); - if (ret) - return ret; + engctx = nouveau_engctx_get(engine, inst); + chid = pfifo->chid(pfifo, engctx); - nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0); - return 0; + if (stat & 0x00000040) { + nv_error(priv, "DISPATCH_ERROR ["); + nouveau_enum_print(nvc0_copy_isr_error_name, ssta); + printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n", + chid, (u64)inst << 12, subc, mthd, data); + nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040); + stat &= ~0x00000040; + } + + if (stat) { + nv_error(priv, "unhandled intr 0x%08x\n", stat); + nv_wr32(priv, 0x104004 + (idx * 0x1000), stat); + } + + nouveau_engctx_put(engctx); } static int @@ -106,20 +161,15 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (nv_rd32(parent, 0x022500) & 0x00000100) return -ENODEV; - ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true, - "PCE0", "copy0", &priv); + ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x00000040; - nv_subdev(priv)->intr = nva3_copy_intr; + nv_subdev(priv)->intr = nvc0_copy_intr; nv_engine(priv)->cclass = &nvc0_copy0_cclass; nv_engine(priv)->sclass = nvc0_copy0_sclass; - nv_falcon(priv)->code.data = nvc0_pcopy_code; - nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code); - nv_falcon(priv)->data.data = nvc0_pcopy_data; - nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data); return 0; } @@ -134,33 +184,72 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (nv_rd32(parent, 0x022500) & 0x00000200) return -ENODEV; - ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true, - "PCE1", "copy1", &priv); + ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x00000080; - nv_subdev(priv)->intr = nva3_copy_intr; + nv_subdev(priv)->intr = nvc0_copy_intr; nv_engine(priv)->cclass = &nvc0_copy1_cclass; nv_engine(priv)->sclass = nvc0_copy1_sclass; - nv_falcon(priv)->code.data = nvc0_pcopy_code; - nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code); - nv_falcon(priv)->data.data = nvc0_pcopy_data; - nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data); return 0; } +static int +nvc0_copy_init(struct nouveau_object *object) +{ + int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0; + struct nvc0_copy_priv *priv = (void *)object; + int ret, i; + + ret = nouveau_copy_init(&priv->base); + if (ret) + return ret; + + /* disable all interrupts */ + nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff); + + /* upload ucode */ + nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000); + for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++) + nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]); + + nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000); + for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) { + if ((i & 0x3f) == 0) + nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6); + nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]); + } + + /* start it running */ + nv_wr32(priv, 0x104084 + (idx * 0x1000), idx); + nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000); + nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */ + nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */ + return 0; +} + +static int +nvc0_copy_fini(struct nouveau_object *object, bool suspend) +{ + int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0; + struct nvc0_copy_priv *priv = (void *)object; + + nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000); + nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff); + + return nouveau_copy_fini(&priv->base, suspend); +} + struct nouveau_oclass nvc0_copy0_oclass = { .handle = NV_ENGINE(COPY0, 0xc0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvc0_copy0_ctor, - .dtor = _nouveau_falcon_dtor, + .dtor = _nouveau_copy_dtor, .init = nvc0_copy_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, + .fini = nvc0_copy_fini, }, }; @@ -169,10 +258,8 @@ nvc0_copy1_oclass = { .handle = NV_ENGINE(COPY1, 0xc0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvc0_copy1_ctor, - .dtor = _nouveau_falcon_dtor, + .dtor = _nouveau_copy_dtor, .init = nvc0_copy_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, + .fini = nvc0_copy_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c index dbbe9e8998fe..2017c1579ac5 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c @@ -30,7 +30,11 @@ #include struct nve0_copy_priv { - struct nouveau_engine base; + struct nouveau_copy base; +}; + +struct nve0_copy_chan { + struct nouveau_copy_chan base; }; /******************************************************************************* @@ -47,14 +51,32 @@ nve0_copy_sclass[] = { * PCOPY context ******************************************************************************/ +static int +nve0_copy_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nve0_copy_chan *priv; + int ret; + + ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, + 256, NVOBJ_FLAG_ZERO_ALLOC, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + static struct nouveau_ofuncs nve0_copy_context_ofuncs = { - .ctor = _nouveau_engctx_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, + .ctor = nve0_copy_context_ctor, + .dtor = _nouveau_copy_context_dtor, + .init = _nouveau_copy_context_init, + .fini = _nouveau_copy_context_fini, + .rd32 = _nouveau_copy_context_rd32, + .wr32 = _nouveau_copy_context_wr32, }; static struct nouveau_oclass @@ -78,8 +100,7 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (nv_rd32(parent, 0x022500) & 0x00000100) return -ENODEV; - ret = nouveau_engine_create(parent, engine, oclass, true, - "PCE0", "copy0", &priv); + ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv); *pobject = nv_object(priv); if (ret) return ret; @@ -101,8 +122,7 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (nv_rd32(parent, 0x022500) & 0x00000200) return -ENODEV; - ret = nouveau_engine_create(parent, engine, oclass, true, - "PCE1", "copy1", &priv); + ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv); *pobject = nv_object(priv); if (ret) return ret; @@ -118,9 +138,9 @@ nve0_copy0_oclass = { .handle = NV_ENGINE(COPY0, 0xe0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nve0_copy0_ctor, - .dtor = _nouveau_engine_dtor, - .init = _nouveau_engine_init, - .fini = _nouveau_engine_fini, + .dtor = _nouveau_copy_dtor, + .init = _nouveau_copy_init, + .fini = _nouveau_copy_fini, }, }; @@ -129,8 +149,8 @@ nve0_copy1_oclass = { .handle = NV_ENGINE(COPY1, 0xe0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nve0_copy1_ctor, - .dtor = _nouveau_engine_dtor, - .init = _nouveau_engine_init, - .fini = _nouveau_engine_fini, + .dtor = _nouveau_copy_dtor, + .init = _nouveau_copy_init, + .fini = _nouveau_copy_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c index b97490512723..1d85e5b66ca0 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c @@ -34,7 +34,11 @@ #include struct nv84_crypt_priv { - struct nouveau_engine base; + struct nouveau_crypt base; +}; + +struct nv84_crypt_chan { + struct nouveau_crypt_chan base; }; /******************************************************************************* @@ -83,16 +87,34 @@ nv84_crypt_sclass[] = { * PCRYPT context ******************************************************************************/ +static int +nv84_crypt_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv84_crypt_chan *priv; + int ret; + + ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256, + 0, NVOBJ_FLAG_ZERO_ALLOC, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + static struct nouveau_oclass nv84_crypt_cclass = { .handle = NV_ENGCTX(CRYPT, 0x84), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_engctx_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, + .ctor = nv84_crypt_context_ctor, + .dtor = _nouveau_crypt_context_dtor, + .init = _nouveau_crypt_context_init, + .fini = _nouveau_crypt_context_fini, + .rd32 = _nouveau_crypt_context_rd32, + .wr32 = _nouveau_crypt_context_wr32, }, }; @@ -135,6 +157,7 @@ nv84_crypt_intr(struct nouveau_subdev *subdev) nv_wr32(priv, 0x102130, stat); nv_wr32(priv, 0x10200c, 0x10); + nv50_fb_trap(nouveau_fb(priv), 1); nouveau_engctx_put(engctx); } @@ -153,8 +176,7 @@ nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv84_crypt_priv *priv; int ret; - ret = nouveau_engine_create(parent, engine, oclass, true, - "PCRYPT", "crypt", &priv); + ret = nouveau_crypt_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; @@ -173,7 +195,7 @@ nv84_crypt_init(struct nouveau_object *object) struct nv84_crypt_priv *priv = (void *)object; int ret; - ret = nouveau_engine_init(&priv->base); + ret = nouveau_crypt_init(&priv->base); if (ret) return ret; @@ -188,8 +210,8 @@ nv84_crypt_oclass = { .handle = NV_ENGINE(CRYPT, 0x84), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv84_crypt_ctor, - .dtor = _nouveau_engine_dtor, + .dtor = _nouveau_crypt_dtor, .init = nv84_crypt_init, - .fini = _nouveau_engine_fini, + .fini = _nouveau_crypt_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c index 21986f3bf0c8..9e3876c89b96 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -37,7 +36,11 @@ #include "fuc/nv98.fuc.h" struct nv98_crypt_priv { - struct nouveau_falcon base; + struct nouveau_crypt base; +}; + +struct nv98_crypt_chan { + struct nouveau_crypt_chan base; }; /******************************************************************************* @@ -54,16 +57,34 @@ nv98_crypt_sclass[] = { * PCRYPT context ******************************************************************************/ +static int +nv98_crypt_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv98_crypt_chan *priv; + int ret; + + ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256, + 256, NVOBJ_FLAG_ZERO_ALLOC, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + static struct nouveau_oclass nv98_crypt_cclass = { .handle = NV_ENGCTX(CRYPT, 0x98), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, + .ctor = nv98_crypt_context_ctor, + .dtor = _nouveau_crypt_context_dtor, + .init = _nouveau_crypt_context_init, + .fini = _nouveau_crypt_context_fini, + .rd32 = _nouveau_crypt_context_rd32, + .wr32 = _nouveau_crypt_context_wr32, }, }; @@ -113,6 +134,7 @@ nv98_crypt_intr(struct nouveau_subdev *subdev) nv_wr32(priv, 0x087004, stat); } + nv50_fb_trap(nouveau_fb(priv), 1); nouveau_engctx_put(engctx); } @@ -131,8 +153,7 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv98_crypt_priv *priv; int ret; - ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true, - "PCRYPT", "crypt", &priv); + ret = nouveau_crypt_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; @@ -142,10 +163,36 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine, nv_engine(priv)->cclass = &nv98_crypt_cclass; nv_engine(priv)->sclass = nv98_crypt_sclass; nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush; - nv_falcon(priv)->code.data = nv98_pcrypt_code; - nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code); - nv_falcon(priv)->data.data = nv98_pcrypt_data; - nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data); + return 0; +} + +static int +nv98_crypt_init(struct nouveau_object *object) +{ + struct nv98_crypt_priv *priv = (void *)object; + int ret, i; + + ret = nouveau_crypt_init(&priv->base); + if (ret) + return ret; + + /* wait for exit interrupt to signal */ + nv_wait(priv, 0x087008, 0x00000010, 0x00000010); + nv_wr32(priv, 0x087004, 0x00000010); + + /* upload microcode code and data segments */ + nv_wr32(priv, 0x087ff8, 0x00100000); + for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++) + nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]); + + nv_wr32(priv, 0x087ff8, 0x00000000); + for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++) + nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]); + + /* start it running */ + nv_wr32(priv, 0x08710c, 0x00000000); + nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */ + nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */ return 0; } @@ -154,10 +201,8 @@ nv98_crypt_oclass = { .handle = NV_ENGINE(CRYPT, 0x98), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv98_crypt_ctor, - .dtor = _nouveau_falcon_dtor, - .init = _nouveau_falcon_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, + .dtor = _nouveau_crypt_dtor, + .init = nv98_crypt_init, + .fini = _nouveau_crypt_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c deleted file mode 100644 index d0817d94454c..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include -#include -#include - -#include "nv50.h" - -int -nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data) -{ - const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) | - (data & NV50_DISP_DAC_PWR_VSYNC) | - (data & NV50_DISP_DAC_PWR_DATA) | - (data & NV50_DISP_DAC_PWR_STATE); - const u32 doff = (or * 0x800); - nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); - nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat); - nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); - return 0; -} - -int -nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval) -{ - const u32 doff = (or * 0x800); - int load = -EINVAL; - nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval); - udelay(9500); - nv_wr32(priv, 0x61a00c + doff, 0x80000000); - load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27; - nv_wr32(priv, 0x61a00c + doff, 0x00000000); - return load; -} - -int -nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR); - u32 *data = args; - int ret; - - if (size < sizeof(u32)) - return -EINVAL; - - switch (mthd & ~0x3f) { - case NV50_DISP_DAC_PWR: - ret = priv->dac.power(priv, or, data[0]); - break; - case NV50_DISP_DAC_LOAD: - ret = priv->dac.sense(priv, or, data[0]); - if (ret >= 0) { - data[0] = ret; - ret = 0; - } - break; - default: - BUG_ON(1); - } - - return ret; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c deleted file mode 100644 index 373dbcc523b2..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include "nv50.h" - -int -nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) -{ - const u32 soff = (or * 0x800); - int i; - - if (data && data[0]) { - for (i = 0; i < size; i++) - nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); - nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); - } else - if (data) { - nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001); - } else { - nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000); - } - - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c deleted file mode 100644 index dc57e24fc1df..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include -#include -#include -#include - -#include "nv50.h" - -int -nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) -{ - const u32 soff = (or * 0x030); - int i; - - if (data && data[0]) { - for (i = 0; i < size; i++) - nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); - nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); - } else - if (data) { - nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001); - } else { - nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000); - } - - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c deleted file mode 100644 index 0d36bdc51417..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include "nv50.h" - -int -nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) -{ - const u32 hoff = (head * 0x800); - - if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { - nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000); - nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000); - nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000); - return 0; - } - - /* AVI InfoFrame */ - nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000); - nv_wr32(priv, 0x616528 + hoff, 0x000d0282); - nv_wr32(priv, 0x61652c + hoff, 0x0000006f); - nv_wr32(priv, 0x616530 + hoff, 0x00000000); - nv_wr32(priv, 0x616534 + hoff, 0x00000000); - nv_wr32(priv, 0x616538 + hoff, 0x00000000); - nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001); - - /* Audio InfoFrame */ - nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000); - nv_wr32(priv, 0x616508 + hoff, 0x000a0184); - nv_wr32(priv, 0x61650c + hoff, 0x00000071); - nv_wr32(priv, 0x616510 + hoff, 0x00000000); - nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001); - - /* ??? */ - nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ - nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ - nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ - - /* HDMI_CTRL */ - nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */); - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c deleted file mode 100644 index f065fc248adf..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include "nv50.h" - -int -nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) -{ - const u32 soff = (or * 0x800); - - if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { - nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000); - nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000); - nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000); - return 0; - } - - /* AVI InfoFrame */ - nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000); - nv_wr32(priv, 0x61c528 + soff, 0x000d0282); - nv_wr32(priv, 0x61c52c + soff, 0x0000006f); - nv_wr32(priv, 0x61c530 + soff, 0x00000000); - nv_wr32(priv, 0x61c534 + soff, 0x00000000); - nv_wr32(priv, 0x61c538 + soff, 0x00000000); - nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001); - - /* Audio InfoFrame */ - nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000); - nv_wr32(priv, 0x61c508 + soff, 0x000a0184); - nv_wr32(priv, 0x61c50c + soff, 0x00000071); - nv_wr32(priv, 0x61c510 + soff, 0x00000000); - nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001); - - /* ??? */ - nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ - nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ - nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ - - /* HDMI_CTRL */ - nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */); - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c deleted file mode 100644 index 5151bb261832..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include "nv50.h" - -int -nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) -{ - const u32 hoff = (head * 0x800); - - if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { - nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000); - nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000); - nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000); - return 0; - } - - /* AVI InfoFrame */ - nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000); - nv_wr32(priv, 0x61671c + hoff, 0x000d0282); - nv_wr32(priv, 0x616720 + hoff, 0x0000006f); - nv_wr32(priv, 0x616724 + hoff, 0x00000000); - nv_wr32(priv, 0x616728 + hoff, 0x00000000); - nv_wr32(priv, 0x61672c + hoff, 0x00000000); - nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001); - - /* ??? InfoFrame? */ - nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000); - nv_wr32(priv, 0x6167ac + hoff, 0x00000010); - nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001); - - /* HDMI_CTRL */ - nv_mask(priv, 0x616798 + hoff, 0x401f007f, data); - - /* NFI, audio doesn't work without it though.. */ - nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000); - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index c6f80055e988..15b182c84ce8 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -22,743 +22,24 @@ * Authors: Ben Skeggs */ -#include -#include -#include -#include +#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "nv50.h" - -/******************************************************************************* - * EVO channel base class - ******************************************************************************/ - -int -nv50_disp_chan_create_(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, int chid, - int length, void **pobject) -{ - struct nv50_disp_base *base = (void *)parent; - struct nv50_disp_chan *chan; - int ret; - - if (base->chan & (1 << chid)) - return -EBUSY; - base->chan |= (1 << chid); - - ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL, - (1ULL << NVDEV_ENGINE_DMAOBJ), - length, pobject); - chan = *pobject; - if (ret) - return ret; - - chan->chid = chid; - return 0; -} - -void -nv50_disp_chan_destroy(struct nv50_disp_chan *chan) -{ - struct nv50_disp_base *base = (void *)nv_object(chan)->parent; - base->chan &= ~(1 << chan->chid); - nouveau_namedb_destroy(&chan->base); -} - -u32 -nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_chan *chan = (void *)object; - return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr); -} - -void -nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_chan *chan = (void *)object; - nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data); -} - -/******************************************************************************* - * EVO DMA channel base class - ******************************************************************************/ - -static int -nv50_disp_dmac_object_attach(struct nouveau_object *parent, - struct nouveau_object *object, u32 name) -{ - struct nv50_disp_base *base = (void *)parent->parent; - struct nv50_disp_chan *chan = (void *)parent; - u32 addr = nv_gpuobj(object)->node->offset; - u32 chid = chan->chid; - u32 data = (chid << 28) | (addr << 10) | chid; - return nouveau_ramht_insert(base->ramht, chid, name, data); -} - -static void -nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie) -{ - struct nv50_disp_base *base = (void *)parent->parent; - nouveau_ramht_remove(base->ramht, cookie); -} - -int -nv50_disp_dmac_create_(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, u32 pushbuf, int chid, - int length, void **pobject) -{ - struct nv50_disp_dmac *dmac; - int ret; - - ret = nv50_disp_chan_create_(parent, engine, oclass, chid, - length, pobject); - dmac = *pobject; - if (ret) - return ret; - - dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf); - if (!dmac->pushdma) - return -ENOENT; - - switch (nv_mclass(dmac->pushdma)) { - case 0x0002: - case 0x003d: - if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff) - return -EINVAL; - - switch (dmac->pushdma->target) { - case NV_MEM_TARGET_VRAM: - dmac->push = 0x00000000 | dmac->pushdma->start >> 8; - break; - case NV_MEM_TARGET_PCI_NOSNOOP: - dmac->push = 0x00000003 | dmac->pushdma->start >> 8; - break; - default: - return -EINVAL; - } - break; - default: - return -EINVAL; - } - - return 0; -} - -void -nv50_disp_dmac_dtor(struct nouveau_object *object) -{ - struct nv50_disp_dmac *dmac = (void *)object; - nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma); - nv50_disp_chan_destroy(&dmac->base); -} - -static int -nv50_disp_dmac_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *dmac = (void *)object; - int chid = dmac->base.chid; - int ret; - - ret = nv50_disp_chan_init(&dmac->base); - if (ret) - return ret; - - /* enable error reporting */ - nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid); - - /* initialise channel for dma command submission */ - nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push); - nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000); - nv_wr32(priv, 0x61020c + (chid * 0x0010), chid); - nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010); - nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000); - nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013); - - /* wait for it to go inactive */ - if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) { - nv_error(dmac, "init timeout, 0x%08x\n", - nv_rd32(priv, 0x610200 + (chid * 0x10))); - return -EBUSY; - } - - return 0; -} - -static int -nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *dmac = (void *)object; - int chid = dmac->base.chid; - - /* deactivate channel */ - nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000); - nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000); - if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) { - nv_error(dmac, "fini timeout, 0x%08x\n", - nv_rd32(priv, 0x610200 + (chid * 0x10))); - if (suspend) - return -EBUSY; - } - - /* disable error reporting */ - nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid); - - return nv50_disp_chan_fini(&dmac->base, suspend); -} - -/******************************************************************************* - * EVO master channel object - ******************************************************************************/ - -static int -nv50_disp_mast_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_mast_class *args = data; - struct nv50_disp_dmac *mast; - int ret; - - if (size < sizeof(*args)) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 0, sizeof(*mast), (void **)&mast); - *pobject = nv_object(mast); - if (ret) - return ret; - - nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach; - nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach; - return 0; -} - -static int -nv50_disp_mast_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *mast = (void *)object; - int ret; - - ret = nv50_disp_chan_init(&mast->base); - if (ret) - return ret; - - /* enable error reporting */ - nv_mask(priv, 0x610028, 0x00010001, 0x00010001); - - /* attempt to unstick channel from some unknown state */ - if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000) - nv_mask(priv, 0x610200, 0x00800000, 0x00800000); - if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000) - nv_mask(priv, 0x610200, 0x00600000, 0x00600000); - - /* initialise channel for dma command submission */ - nv_wr32(priv, 0x610204, mast->push); - nv_wr32(priv, 0x610208, 0x00010000); - nv_wr32(priv, 0x61020c, 0x00000000); - nv_mask(priv, 0x610200, 0x00000010, 0x00000010); - nv_wr32(priv, 0x640000, 0x00000000); - nv_wr32(priv, 0x610200, 0x01000013); - - /* wait for it to go inactive */ - if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) { - nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200)); - return -EBUSY; - } - - return 0; -} - -static int -nv50_disp_mast_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *mast = (void *)object; - - /* deactivate channel */ - nv_mask(priv, 0x610200, 0x00000010, 0x00000000); - nv_mask(priv, 0x610200, 0x00000003, 0x00000000); - if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) { - nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200)); - if (suspend) - return -EBUSY; - } - - /* disable error reporting */ - nv_mask(priv, 0x610028, 0x00010001, 0x00000000); - - return nv50_disp_chan_fini(&mast->base, suspend); -} - -struct nouveau_ofuncs -nv50_disp_mast_ofuncs = { - .ctor = nv50_disp_mast_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nv50_disp_mast_init, - .fini = nv50_disp_mast_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO sync channel objects - ******************************************************************************/ - -static int -nv50_disp_sync_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_sync_class *args = data; - struct nv50_disp_dmac *dmac; - int ret; - - if (size < sizeof(*data) || args->head > 1) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 1 + args->head, sizeof(*dmac), - (void **)&dmac); - *pobject = nv_object(dmac); - if (ret) - return ret; - - nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach; - nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach; - return 0; -} - -struct nouveau_ofuncs -nv50_disp_sync_ofuncs = { - .ctor = nv50_disp_sync_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nv50_disp_dmac_init, - .fini = nv50_disp_dmac_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO overlay channel objects - ******************************************************************************/ - -static int -nv50_disp_ovly_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_ovly_class *args = data; - struct nv50_disp_dmac *dmac; - int ret; - - if (size < sizeof(*data) || args->head > 1) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 3 + args->head, sizeof(*dmac), - (void **)&dmac); - *pobject = nv_object(dmac); - if (ret) - return ret; - - nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach; - nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach; - return 0; -} - -struct nouveau_ofuncs -nv50_disp_ovly_ofuncs = { - .ctor = nv50_disp_ovly_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nv50_disp_dmac_init, - .fini = nv50_disp_dmac_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO PIO channel base class - ******************************************************************************/ - -static int -nv50_disp_pioc_create_(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, int chid, - int length, void **pobject) -{ - return nv50_disp_chan_create_(parent, engine, oclass, chid, - length, pobject); -} - -static void -nv50_disp_pioc_dtor(struct nouveau_object *object) -{ - struct nv50_disp_pioc *pioc = (void *)object; - nv50_disp_chan_destroy(&pioc->base); -} - -static int -nv50_disp_pioc_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_pioc *pioc = (void *)object; - int chid = pioc->base.chid; - int ret; - - ret = nv50_disp_chan_init(&pioc->base); - if (ret) - return ret; - - nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000); - if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) { - nv_error(pioc, "timeout0: 0x%08x\n", - nv_rd32(priv, 0x610200 + (chid * 0x10))); - return -EBUSY; - } - - nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001); - if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) { - nv_error(pioc, "timeout1: 0x%08x\n", - nv_rd32(priv, 0x610200 + (chid * 0x10))); - return -EBUSY; - } - - return 0; -} - -static int -nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_pioc *pioc = (void *)object; - int chid = pioc->base.chid; - - nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000); - if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) { - nv_error(pioc, "timeout: 0x%08x\n", - nv_rd32(priv, 0x610200 + (chid * 0x10))); - if (suspend) - return -EBUSY; - } - - return nv50_disp_chan_fini(&pioc->base, suspend); -} - -/******************************************************************************* - * EVO immediate overlay channel objects - ******************************************************************************/ - -static int -nv50_disp_oimm_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_oimm_class *args = data; - struct nv50_disp_pioc *pioc; - int ret; - - if (size < sizeof(*args) || args->head > 1) - return -EINVAL; - - ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head, - sizeof(*pioc), (void **)&pioc); - *pobject = nv_object(pioc); - if (ret) - return ret; - - return 0; -} - -struct nouveau_ofuncs -nv50_disp_oimm_ofuncs = { - .ctor = nv50_disp_oimm_ctor, - .dtor = nv50_disp_pioc_dtor, - .init = nv50_disp_pioc_init, - .fini = nv50_disp_pioc_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO cursor channel objects - ******************************************************************************/ - -static int -nv50_disp_curs_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_curs_class *args = data; - struct nv50_disp_pioc *pioc; - int ret; - - if (size < sizeof(*args) || args->head > 1) - return -EINVAL; - - ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head, - sizeof(*pioc), (void **)&pioc); - *pobject = nv_object(pioc); - if (ret) - return ret; - - return 0; -} - -struct nouveau_ofuncs -nv50_disp_curs_ofuncs = { - .ctor = nv50_disp_curs_ctor, - .dtor = nv50_disp_pioc_dtor, - .init = nv50_disp_pioc_init, - .fini = nv50_disp_pioc_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * Base display object - ******************************************************************************/ - -static int -nv50_disp_base_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_base *base; - int ret; - - ret = nouveau_parent_create(parent, engine, oclass, 0, - priv->sclass, 0, &base); - *pobject = nv_object(base); - if (ret) - return ret; - - return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht); -} - -static void -nv50_disp_base_dtor(struct nouveau_object *object) -{ - struct nv50_disp_base *base = (void *)object; - nouveau_ramht_ref(NULL, &base->ramht); - nouveau_parent_destroy(&base->base); -} - -static int -nv50_disp_base_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_base *base = (void *)object; - int ret, i; - u32 tmp; - - ret = nouveau_parent_init(&base->base); - if (ret) - return ret; - - /* The below segments of code copying values from one register to - * another appear to inform EVO of the display capabilities or - * something similar. NFI what the 0x614004 caps are for.. - */ - tmp = nv_rd32(priv, 0x614004); - nv_wr32(priv, 0x610184, tmp); - - /* ... CRTC caps */ - for (i = 0; i < priv->head.nr; i++) { - tmp = nv_rd32(priv, 0x616100 + (i * 0x800)); - nv_wr32(priv, 0x610190 + (i * 0x10), tmp); - tmp = nv_rd32(priv, 0x616104 + (i * 0x800)); - nv_wr32(priv, 0x610194 + (i * 0x10), tmp); - tmp = nv_rd32(priv, 0x616108 + (i * 0x800)); - nv_wr32(priv, 0x610198 + (i * 0x10), tmp); - tmp = nv_rd32(priv, 0x61610c + (i * 0x800)); - nv_wr32(priv, 0x61019c + (i * 0x10), tmp); - } - - /* ... DAC caps */ - for (i = 0; i < priv->dac.nr; i++) { - tmp = nv_rd32(priv, 0x61a000 + (i * 0x800)); - nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp); - } - - /* ... SOR caps */ - for (i = 0; i < priv->sor.nr; i++) { - tmp = nv_rd32(priv, 0x61c000 + (i * 0x800)); - nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp); - } - - /* ... EXT caps */ - for (i = 0; i < 3; i++) { - tmp = nv_rd32(priv, 0x61e000 + (i * 0x800)); - nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp); - } - - /* steal display away from vbios, or something like that */ - if (nv_rd32(priv, 0x610024) & 0x00000100) { - nv_wr32(priv, 0x610024, 0x00000100); - nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000); - if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) { - nv_error(priv, "timeout acquiring display\n"); - return -EBUSY; - } - } - - /* point at display engine memory area (hash table, objects) */ - nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9); - - /* enable supervisor interrupts, disable everything else */ - nv_wr32(priv, 0x61002c, 0x00000370); - nv_wr32(priv, 0x610028, 0x00000000); - return 0; -} - -static int -nv50_disp_base_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_base *base = (void *)object; - - /* disable all interrupts */ - nv_wr32(priv, 0x610024, 0x00000000); - nv_wr32(priv, 0x610020, 0x00000000); - - return nouveau_parent_fini(&base->base, suspend); -} - -struct nouveau_ofuncs -nv50_disp_base_ofuncs = { - .ctor = nv50_disp_base_ctor, - .dtor = nv50_disp_base_dtor, - .init = nv50_disp_base_init, - .fini = nv50_disp_base_fini, -}; - -static struct nouveau_omthds -nv50_disp_base_omthds[] = { - { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, - { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, - { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, - {}, -}; - -static struct nouveau_oclass -nv50_disp_base_oclass[] = { - { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds }, - {} +struct nv50_disp_priv { + struct nouveau_disp base; }; static struct nouveau_oclass nv50_disp_sclass[] = { - { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, - {} -}; - -/******************************************************************************* - * Display context, tracks instmem allocation and prevents more than one - * client using the display hardware at any time. - ******************************************************************************/ - -static int -nv50_disp_data_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv = (void *)engine; - struct nouveau_engctx *ectx; - int ret = -EBUSY; - - /* no context needed for channel objects... */ - if (nv_mclass(parent) != NV_DEVICE_CLASS) { - atomic_inc(&parent->refcount); - *pobject = parent; - return 0; - } - - /* allocate display hardware to client */ - mutex_lock(&nv_subdev(priv)->mutex); - if (list_empty(&nv_engine(priv)->contexts)) { - ret = nouveau_engctx_create(parent, engine, oclass, NULL, - 0x10000, 0x10000, - NVOBJ_FLAG_HEAP, &ectx); - *pobject = nv_object(ectx); - } - mutex_unlock(&nv_subdev(priv)->mutex); - return ret; -} - -struct nouveau_oclass -nv50_disp_cclass = { - .handle = NV_ENGCTX(DISP, 0x50), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv50_disp_data_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, - }, + {}, }; -/******************************************************************************* - * Display engine implementation - ******************************************************************************/ - -static void -nv50_disp_intr_error(struct nv50_disp_priv *priv) -{ - u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16; - u32 addr, data; - int chid; - - for (chid = 0; chid < 5; chid++) { - if (!(channels & (1 << chid))) - continue; - - nv_wr32(priv, 0x610020, 0x00010000 << chid); - addr = nv_rd32(priv, 0x610080 + (chid * 0x08)); - data = nv_rd32(priv, 0x610084 + (chid * 0x08)); - nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000); - - nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n", - chid, addr & 0xffc, data, addr); - } -} - static void nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) { + struct nouveau_bar *bar = nouveau_bar(priv); struct nouveau_disp *disp = &priv->base; struct nouveau_software_chan *chan, *temp; unsigned long flags; @@ -768,19 +49,25 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) if (chan->vblank.crtc != crtc) continue; - nv_wr32(priv, 0x001704, chan->vblank.channel); - nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma); - - if (nv_device(priv)->chipset == 0x50) { - nv_wr32(priv, 0x001570, chan->vblank.offset); - nv_wr32(priv, 0x001574, chan->vblank.value); + if (nv_device(priv)->chipset >= 0xc0) { + nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel); + bar->flush(bar); + nv_wr32(priv, 0x06000c, + upper_32_bits(chan->vblank.offset)); + nv_wr32(priv, 0x060010, + lower_32_bits(chan->vblank.offset)); + nv_wr32(priv, 0x060014, chan->vblank.value); } else { - if (nv_device(priv)->chipset >= 0xc0) { - nv_wr32(priv, 0x06000c, - upper_32_bits(chan->vblank.offset)); + nv_wr32(priv, 0x001704, chan->vblank.channel); + nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma); + bar->flush(bar); + if (nv_device(priv)->chipset == 0x50) { + nv_wr32(priv, 0x001570, chan->vblank.offset); + nv_wr32(priv, 0x001574, chan->vblank.value); + } else { + nv_wr32(priv, 0x060010, chan->vblank.offset); + nv_wr32(priv, 0x060014, chan->vblank.value); } - nv_wr32(priv, 0x060010, chan->vblank.offset); - nv_wr32(priv, 0x060014, chan->vblank.value); } list_del(&chan->vblank.head); @@ -793,422 +80,30 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) disp->vblank.notify(disp->vblank.data, crtc); } -static u16 -exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, - struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_outp *info) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - u16 mask, type, data; - - if (outp < 4) { - type = DCB_OUTPUT_ANALOG; - mask = 0; - } else { - outp -= 4; - switch (ctrl & 0x00000f00) { - case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break; - case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break; - case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break; - case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break; - case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break; - case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; - default: - nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); - return 0x0000; - } - } - - mask = 0x00c0 & (mask << 6); - mask |= 0x0001 << outp; - mask |= 0x0100 << head; - - data = dcb_outp_match(bios, type, mask, ver, hdr, dcb); - if (!data) - return 0x0000; - - return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info); -} - -static bool -exec_script(struct nv50_disp_priv *priv, int head, int id) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - struct nvbios_outp info; - struct dcb_output dcb; - u8 ver, hdr, cnt, len; - u16 data; - u32 ctrl = 0x00000000; - int i; - - for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) - ctrl = nv_rd32(priv, 0x610b5c + (i * 8)); - - if (nv_device(priv)->chipset < 0x90 || - nv_device(priv)->chipset == 0x92 || - nv_device(priv)->chipset == 0xa0) { - for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) - ctrl = nv_rd32(priv, 0x610b74 + (i * 8)); - i += 3; - } else { - for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) - ctrl = nv_rd32(priv, 0x610798 + (i * 8)); - i += 3; - } - - if (!(ctrl & (1 << head))) - return false; - - data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info); - if (data) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = info.script[id], - .outp = &dcb, - .crtc = head, - .execute = 1, - }; - - return nvbios_exec(&init) == 0; - } - - return false; -} - -static u32 -exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, - struct dcb_output *outp) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - struct nvbios_outp info1; - struct nvbios_ocfg info2; - u8 ver, hdr, cnt, len; - u16 data, conf; - u32 ctrl = 0x00000000; - int i; - - for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) - ctrl = nv_rd32(priv, 0x610b58 + (i * 8)); - - if (nv_device(priv)->chipset < 0x90 || - nv_device(priv)->chipset == 0x92 || - nv_device(priv)->chipset == 0xa0) { - for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) - ctrl = nv_rd32(priv, 0x610b70 + (i * 8)); - i += 3; - } else { - for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) - ctrl = nv_rd32(priv, 0x610794 + (i * 8)); - i += 3; - } - - if (!(ctrl & (1 << head))) - return 0x0000; - - data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1); - if (!data) - return 0x0000; - - switch (outp->type) { - case DCB_OUTPUT_TMDS: - conf = (ctrl & 0x00000f00) >> 8; - if (pclk >= 165000) - conf |= 0x0100; - break; - case DCB_OUTPUT_LVDS: - conf = priv->sor.lvdsconf; - break; - case DCB_OUTPUT_DP: - conf = (ctrl & 0x00000f00) >> 8; - break; - case DCB_OUTPUT_ANALOG: - default: - conf = 0x00ff; - break; - } - - data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2); - if (data) { - data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); - if (data) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = data, - .outp = outp, - .crtc = head, - .execute = 1, - }; - - if (nvbios_exec(&init)) - return 0x0000; - return conf; - } - } - - return 0x0000; -} - -static void -nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super) -{ - int head = ffs((super & 0x00000060) >> 5) - 1; - if (head >= 0) { - head = ffs((super & 0x00000180) >> 7) - 1; - if (head >= 0) - exec_script(priv, head, 1); - } - - nv_wr32(priv, 0x610024, 0x00000010); - nv_wr32(priv, 0x610030, 0x80000000); -} - static void -nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv, - struct dcb_output *outp, u32 pclk) -{ - const int link = !(outp->sorconf.link & 1); - const int or = ffs(outp->or) - 1; - const u32 soff = ( or * 0x800); - const u32 loff = (link * 0x080) + soff; - const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8)); - const u32 symbol = 100000; - u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000; - u32 clksor = nv_rd32(priv, 0x614300 + soff); - int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; - int TU, VTUi, VTUf, VTUa; - u64 link_data_rate, link_ratio, unk; - u32 best_diff = 64 * symbol; - u32 link_nr, link_bw, bits, r; - - /* calculate packed data rate for each lane */ - if (dpctrl > 0x00030000) link_nr = 4; - else if (dpctrl > 0x00010000) link_nr = 2; - else link_nr = 1; - - if (clksor & 0x000c0000) - link_bw = 270000; - else - link_bw = 162000; - - if ((ctrl & 0xf0000) == 0x60000) bits = 30; - else if ((ctrl & 0xf0000) == 0x50000) bits = 24; - else bits = 18; - - link_data_rate = (pclk * bits / 8) / link_nr; - - /* calculate ratio of packed data rate to link symbol rate */ - link_ratio = link_data_rate * symbol; - r = do_div(link_ratio, link_bw); - - for (TU = 64; TU >= 32; TU--) { - /* calculate average number of valid symbols in each TU */ - u32 tu_valid = link_ratio * TU; - u32 calc, diff; - - /* find a hw representation for the fraction.. */ - VTUi = tu_valid / symbol; - calc = VTUi * symbol; - diff = tu_valid - calc; - if (diff) { - if (diff >= (symbol / 2)) { - VTUf = symbol / (symbol - diff); - if (symbol - (VTUf * diff)) - VTUf++; - - if (VTUf <= 15) { - VTUa = 1; - calc += symbol - (symbol / VTUf); - } else { - VTUa = 0; - VTUf = 1; - calc += symbol; - } - } else { - VTUa = 0; - VTUf = min((int)(symbol / diff), 15); - calc += symbol / VTUf; - } - - diff = calc - tu_valid; - } else { - /* no remainder, but the hw doesn't like the fractional - * part to be zero. decrement the integer part and - * have the fraction add a whole symbol back - */ - VTUa = 0; - VTUf = 1; - VTUi--; - } - - if (diff < best_diff) { - best_diff = diff; - bestTU = TU; - bestVTUa = VTUa; - bestVTUf = VTUf; - bestVTUi = VTUi; - if (diff == 0) - break; - } - } - - if (!bestTU) { - nv_error(priv, "unable to find suitable dp config\n"); - return; - } - - /* XXX close to vbios numbers, but not right */ - unk = (symbol - link_ratio) * bestTU; - unk *= link_ratio; - r = do_div(unk, symbol); - r = do_div(unk, symbol); - unk += 6; - - nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2); - nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 | - bestVTUf << 16 | - bestVTUi << 8 | unk); -} - -static void -nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super) -{ - struct dcb_output outp; - u32 addr, mask, data; - int head; - - /* finish detaching encoder? */ - head = ffs((super & 0x00000180) >> 7) - 1; - if (head >= 0) - exec_script(priv, head, 2); - - /* check whether a vpll change is required */ - head = ffs((super & 0x00000600) >> 9) - 1; - if (head >= 0) { - u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; - if (pclk) { - struct nouveau_clock *clk = nouveau_clock(priv); - clk->pll_set(clk, PLL_VPLL0 + head, pclk); - } - - nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000); - } - - /* (re)attach the relevant OR to the head */ - head = ffs((super & 0x00000180) >> 7) - 1; - if (head >= 0) { - u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; - u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp); - if (conf) { - if (outp.type == DCB_OUTPUT_ANALOG) { - addr = 0x614280 + (ffs(outp.or) - 1) * 0x800; - mask = 0xffffffff; - data = 0x00000000; - } else { - if (outp.type == DCB_OUTPUT_DP) - nv50_disp_intr_unk20_dp(priv, &outp, pclk); - addr = 0x614300 + (ffs(outp.or) - 1) * 0x800; - mask = 0x00000707; - data = (conf & 0x0100) ? 0x0101 : 0x0000; - } - - nv_mask(priv, addr, mask, data); - } - } - - nv_wr32(priv, 0x610024, 0x00000020); - nv_wr32(priv, 0x610030, 0x80000000); -} - -/* If programming a TMDS output on a SOR that can also be configured for - * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off. - * - * It looks like the VBIOS TMDS scripts make an attempt at this, however, - * the VBIOS scripts on at least one board I have only switch it off on - * link 0, causing a blank display if the output has previously been - * programmed for DisplayPort. - */ -static void -nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - const int link = !(outp->sorconf.link & 1); - const int or = ffs(outp->or) - 1; - const u32 loff = (or * 0x800) + (link * 0x80); - const u16 mask = (outp->sorconf.link << 6) | outp->or; - u8 ver, hdr; - - if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp)) - nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000); -} - -static void -nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super) -{ - int head = ffs((super & 0x00000180) >> 7) - 1; - if (head >= 0) { - struct dcb_output outp; - u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; - if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) { - if (outp.type == DCB_OUTPUT_TMDS) - nv50_disp_intr_unk40_tmds(priv, &outp); - } - } - - nv_wr32(priv, 0x610024, 0x00000040); - nv_wr32(priv, 0x610030, 0x80000000); -} - -static void -nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1) -{ - u32 super = nv_rd32(priv, 0x610030); - - nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super); - - if (intr1 & 0x00000010) - nv50_disp_intr_unk10(priv, super); - if (intr1 & 0x00000020) - nv50_disp_intr_unk20(priv, super); - if (intr1 & 0x00000040) - nv50_disp_intr_unk40(priv, super); -} - -void nv50_disp_intr(struct nouveau_subdev *subdev) { struct nv50_disp_priv *priv = (void *)subdev; - u32 intr0 = nv_rd32(priv, 0x610020); - u32 intr1 = nv_rd32(priv, 0x610024); + u32 stat1 = nv_rd32(priv, 0x610024); - if (intr0 & 0x001f0000) { - nv50_disp_intr_error(priv); - intr0 &= ~0x001f0000; - } - - if (intr1 & 0x00000004) { + if (stat1 & 0x00000004) { nv50_disp_intr_vblank(priv, 0); nv_wr32(priv, 0x610024, 0x00000004); - intr1 &= ~0x00000004; + stat1 &= ~0x00000004; } - if (intr1 & 0x00000008) { + if (stat1 & 0x00000008) { nv50_disp_intr_vblank(priv, 1); nv_wr32(priv, 0x610024, 0x00000008); - intr1 &= ~0x00000008; + stat1 &= ~0x00000008; } - if (intr1 & 0x00000070) { - nv50_disp_intr_super(priv, intr1); - intr1 &= ~0x00000070; - } } static int nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) { struct nv50_disp_priv *priv; int ret; @@ -1219,16 +114,8 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - nv_engine(priv)->sclass = nv50_disp_base_oclass; - nv_engine(priv)->cclass = &nv50_disp_cclass; + nv_engine(priv)->sclass = nv50_disp_sclass; nv_subdev(priv)->intr = nv50_disp_intr; - priv->sclass = nv50_disp_sclass; - priv->head.nr = 2; - priv->dac.nr = 3; - priv->sor.nr = 2; - priv->dac.power = nv50_dac_power; - priv->dac.sense = nv50_dac_sense; - priv->sor.power = nv50_sor_power; INIT_LIST_HEAD(&priv->base.vblank.list); spin_lock_init(&priv->base.vblank.lock); diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h deleted file mode 100644 index a6bb931450f1..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h +++ /dev/null @@ -1,142 +0,0 @@ -#ifndef __NV50_DISP_H__ -#define __NV50_DISP_H__ - -#include -#include -#include - -#include -#include - -struct dcb_output; - -struct nv50_disp_priv { - struct nouveau_disp base; - struct nouveau_oclass *sclass; - struct { - int nr; - } head; - struct { - int nr; - int (*power)(struct nv50_disp_priv *, int dac, u32 data); - int (*sense)(struct nv50_disp_priv *, int dac, u32 load); - } dac; - struct { - int nr; - int (*power)(struct nv50_disp_priv *, int sor, u32 data); - int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32); - int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32); - int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link, - int head, u16 type, u16 mask, u32 data, - struct dcb_output *); - int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link, - int head, u16 type, u16 mask, u32 data, - struct dcb_output *); - int (*dp_train)(struct nv50_disp_priv *, int sor, int link, - u16 type, u16 mask, u32 data, - struct dcb_output *); - int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link, - int head, u16 type, u16 mask, u32 data, - struct dcb_output *); - int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link, - int lane, u16 type, u16 mask, u32 data, - struct dcb_output *); - u32 lvdsconf; - } sor; -}; - -#define DAC_MTHD(n) (n), (n) + 0x03 - -int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32); -int nv50_dac_power(struct nv50_disp_priv *, int, u32); -int nv50_dac_sense(struct nv50_disp_priv *, int, u32); - -#define SOR_MTHD(n) (n), (n) + 0x3f - -int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32); -int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32); - -int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); -int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); -int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); - -int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32); -int nv50_sor_power(struct nv50_disp_priv *, int, u32); - -int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16, - u32, struct dcb_output *); -int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16, - u32, struct dcb_output *); -int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32, - struct dcb_output *); -int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, - struct dcb_output *); -int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, - struct dcb_output *); - -int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32, - struct dcb_output *); -int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, - struct dcb_output *); -int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, - struct dcb_output *); - -struct nv50_disp_base { - struct nouveau_parent base; - struct nouveau_ramht *ramht; - u32 chan; -}; - -struct nv50_disp_chan { - struct nouveau_namedb base; - int chid; -}; - -int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, int, int, void **); -void nv50_disp_chan_destroy(struct nv50_disp_chan *); -u32 nv50_disp_chan_rd32(struct nouveau_object *, u64); -void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32); - -#define nv50_disp_chan_init(a) \ - nouveau_namedb_init(&(a)->base) -#define nv50_disp_chan_fini(a,b) \ - nouveau_namedb_fini(&(a)->base, (b)) - -int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, u32, int, int, void **); -void nv50_disp_dmac_dtor(struct nouveau_object *); - -struct nv50_disp_dmac { - struct nv50_disp_chan base; - struct nouveau_dmaobj *pushdma; - u32 push; -}; - -struct nv50_disp_pioc { - struct nv50_disp_chan base; -}; - -extern struct nouveau_ofuncs nv50_disp_mast_ofuncs; -extern struct nouveau_ofuncs nv50_disp_sync_ofuncs; -extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs; -extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs; -extern struct nouveau_ofuncs nv50_disp_curs_ofuncs; -extern struct nouveau_ofuncs nv50_disp_base_ofuncs; -extern struct nouveau_oclass nv50_disp_cclass; -void nv50_disp_intr(struct nouveau_subdev *); - -extern struct nouveau_omthds nv84_disp_base_omthds[]; - -extern struct nouveau_omthds nva3_disp_base_omthds[]; - -extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs; -extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs; -extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs; -extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs; -extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs; -extern struct nouveau_ofuncs nvd0_disp_base_ofuncs; -extern struct nouveau_oclass nvd0_disp_cclass; -void nvd0_disp_intr(struct nouveau_subdev *); - -#endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c deleted file mode 100644 index fc84eacdfbec..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include - -#include "nv50.h" - -static struct nouveau_oclass -nv84_disp_sclass[] = { - { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, - {} -}; - -struct nouveau_omthds -nv84_disp_base_omthds[] = { - { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, - { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, - { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, - {}, -}; - -static struct nouveau_oclass -nv84_disp_base_oclass[] = { - { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds }, - {} -}; - -static int -nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv; - int ret; - - ret = nouveau_disp_create(parent, engine, oclass, "PDISP", - "display", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_engine(priv)->sclass = nv84_disp_base_oclass; - nv_engine(priv)->cclass = &nv50_disp_cclass; - nv_subdev(priv)->intr = nv50_disp_intr; - priv->sclass = nv84_disp_sclass; - priv->head.nr = 2; - priv->dac.nr = 3; - priv->sor.nr = 2; - priv->dac.power = nv50_dac_power; - priv->dac.sense = nv50_dac_sense; - priv->sor.power = nv50_sor_power; - priv->sor.hdmi = nv84_hdmi_ctrl; - - INIT_LIST_HEAD(&priv->base.vblank.list); - spin_lock_init(&priv->base.vblank.lock); - return 0; -} - -struct nouveau_oclass -nv84_disp_oclass = { - .handle = NV_ENGINE(DISP, 0x82), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv84_disp_ctor, - .dtor = _nouveau_disp_dtor, - .init = _nouveau_disp_init, - .fini = _nouveau_disp_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c deleted file mode 100644 index ba9dfd4669a2..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include - -#include "nv50.h" - -static struct nouveau_oclass -nv94_disp_sclass[] = { - { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, - {} -}; - -static struct nouveau_omthds -nv94_disp_base_omthds[] = { - { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd }, - { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, - { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, - {}, -}; - -static struct nouveau_oclass -nv94_disp_base_oclass[] = { - { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds }, - {} -}; - -static int -nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv; - int ret; - - ret = nouveau_disp_create(parent, engine, oclass, "PDISP", - "display", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_engine(priv)->sclass = nv94_disp_base_oclass; - nv_engine(priv)->cclass = &nv50_disp_cclass; - nv_subdev(priv)->intr = nv50_disp_intr; - priv->sclass = nv94_disp_sclass; - priv->head.nr = 2; - priv->dac.nr = 3; - priv->sor.nr = 4; - priv->dac.power = nv50_dac_power; - priv->dac.sense = nv50_dac_sense; - priv->sor.power = nv50_sor_power; - priv->sor.hdmi = nv84_hdmi_ctrl; - priv->sor.dp_train = nv94_sor_dp_train; - priv->sor.dp_train_init = nv94_sor_dp_train_init; - priv->sor.dp_train_fini = nv94_sor_dp_train_fini; - priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl; - priv->sor.dp_drvctl = nv94_sor_dp_drvctl; - - INIT_LIST_HEAD(&priv->base.vblank.list); - spin_lock_init(&priv->base.vblank.lock); - return 0; -} - -struct nouveau_oclass -nv94_disp_oclass = { - .handle = NV_ENGINE(DISP, 0x88), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv94_disp_ctor, - .dtor = _nouveau_disp_dtor, - .init = _nouveau_disp_init, - .fini = _nouveau_disp_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c deleted file mode 100644 index 5d63902cdeda..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include - -#include "nv50.h" - -static struct nouveau_oclass -nva0_disp_sclass[] = { - { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, - {} -}; - -static struct nouveau_oclass -nva0_disp_base_oclass[] = { - { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds }, - {} -}; - -static int -nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv; - int ret; - - ret = nouveau_disp_create(parent, engine, oclass, "PDISP", - "display", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_engine(priv)->sclass = nva0_disp_base_oclass; - nv_engine(priv)->cclass = &nv50_disp_cclass; - nv_subdev(priv)->intr = nv50_disp_intr; - priv->sclass = nva0_disp_sclass; - priv->head.nr = 2; - priv->dac.nr = 3; - priv->sor.nr = 2; - priv->dac.power = nv50_dac_power; - priv->dac.sense = nv50_dac_sense; - priv->sor.power = nv50_sor_power; - priv->sor.hdmi = nv84_hdmi_ctrl; - - INIT_LIST_HEAD(&priv->base.vblank.list); - spin_lock_init(&priv->base.vblank.lock); - return 0; -} - -struct nouveau_oclass -nva0_disp_oclass = { - .handle = NV_ENGINE(DISP, 0x83), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nva0_disp_ctor, - .dtor = _nouveau_disp_dtor, - .init = _nouveau_disp_init, - .fini = _nouveau_disp_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c deleted file mode 100644 index e9192ca389fa..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include - -#include "nv50.h" - -static struct nouveau_oclass -nva3_disp_sclass[] = { - { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, - {} -}; - -struct nouveau_omthds -nva3_disp_base_omthds[] = { - { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd }, - { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd }, - { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, - { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, - {}, -}; - -static struct nouveau_oclass -nva3_disp_base_oclass[] = { - { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds }, - {} -}; - -static int -nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv; - int ret; - - ret = nouveau_disp_create(parent, engine, oclass, "PDISP", - "display", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_engine(priv)->sclass = nva3_disp_base_oclass; - nv_engine(priv)->cclass = &nv50_disp_cclass; - nv_subdev(priv)->intr = nv50_disp_intr; - priv->sclass = nva3_disp_sclass; - priv->head.nr = 2; - priv->dac.nr = 3; - priv->sor.nr = 4; - priv->dac.power = nv50_dac_power; - priv->dac.sense = nv50_dac_sense; - priv->sor.power = nv50_sor_power; - priv->sor.hda_eld = nva3_hda_eld; - priv->sor.hdmi = nva3_hdmi_ctrl; - priv->sor.dp_train = nv94_sor_dp_train; - priv->sor.dp_train_init = nv94_sor_dp_train_init; - priv->sor.dp_train_fini = nv94_sor_dp_train_fini; - priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl; - priv->sor.dp_drvctl = nv94_sor_dp_drvctl; - - INIT_LIST_HEAD(&priv->base.vblank.list); - spin_lock_init(&priv->base.vblank.lock); - return 0; -} - -struct nouveau_oclass -nva3_disp_oclass = { - .handle = NV_ENGINE(DISP, 0x85), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nva3_disp_ctor, - .dtor = _nouveau_disp_dtor, - .init = _nouveau_disp_init, - .fini = _nouveau_disp_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c index 9e38ebff5fb3..d93efbcf75b8 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c @@ -22,808 +22,22 @@ * Authors: Ben Skeggs */ -#include -#include -#include -#include +#include #include #include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "nv50.h" - -/******************************************************************************* - * EVO DMA channel base class - ******************************************************************************/ - -static int -nvd0_disp_dmac_object_attach(struct nouveau_object *parent, - struct nouveau_object *object, u32 name) -{ - struct nv50_disp_base *base = (void *)parent->parent; - struct nv50_disp_chan *chan = (void *)parent; - u32 addr = nv_gpuobj(object)->node->offset; - u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001; - return nouveau_ramht_insert(base->ramht, chan->chid, name, data); -} - -static void -nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie) -{ - struct nv50_disp_base *base = (void *)parent->parent; - nouveau_ramht_remove(base->ramht, cookie); -} - -static int -nvd0_disp_dmac_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *dmac = (void *)object; - int chid = dmac->base.chid; - int ret; - - ret = nv50_disp_chan_init(&dmac->base); - if (ret) - return ret; - - /* enable error reporting */ - nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid); - nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); - - /* initialise channel for dma command submission */ - nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push); - nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000); - nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001); - nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); - nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000); - nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013); - - /* wait for it to go inactive */ - if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) { - nv_error(dmac, "init: 0x%08x\n", - nv_rd32(priv, 0x610490 + (chid * 0x10))); - return -EBUSY; - } - - return 0; -} - -static int -nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *dmac = (void *)object; - int chid = dmac->base.chid; - - /* deactivate channel */ - nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000); - nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000); - if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) { - nv_error(dmac, "fini: 0x%08x\n", - nv_rd32(priv, 0x610490 + (chid * 0x10))); - if (suspend) - return -EBUSY; - } - - /* disable error reporting */ - nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000); - nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000); - - return nv50_disp_chan_fini(&dmac->base, suspend); -} - -/******************************************************************************* - * EVO master channel object - ******************************************************************************/ - -static int -nvd0_disp_mast_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_mast_class *args = data; - struct nv50_disp_dmac *mast; - int ret; - - if (size < sizeof(*args)) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 0, sizeof(*mast), (void **)&mast); - *pobject = nv_object(mast); - if (ret) - return ret; - - nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach; - nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach; - return 0; -} - -static int -nvd0_disp_mast_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *mast = (void *)object; - int ret; - - ret = nv50_disp_chan_init(&mast->base); - if (ret) - return ret; - - /* enable error reporting */ - nv_mask(priv, 0x610090, 0x00000001, 0x00000001); - nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001); - - /* initialise channel for dma command submission */ - nv_wr32(priv, 0x610494, mast->push); - nv_wr32(priv, 0x610498, 0x00010000); - nv_wr32(priv, 0x61049c, 0x00000001); - nv_mask(priv, 0x610490, 0x00000010, 0x00000010); - nv_wr32(priv, 0x640000, 0x00000000); - nv_wr32(priv, 0x610490, 0x01000013); - - /* wait for it to go inactive */ - if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) { - nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490)); - return -EBUSY; - } - - return 0; -} - -static int -nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *mast = (void *)object; - - /* deactivate channel */ - nv_mask(priv, 0x610490, 0x00000010, 0x00000000); - nv_mask(priv, 0x610490, 0x00000003, 0x00000000); - if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) { - nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490)); - if (suspend) - return -EBUSY; - } - - /* disable error reporting */ - nv_mask(priv, 0x610090, 0x00000001, 0x00000000); - nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000); - - return nv50_disp_chan_fini(&mast->base, suspend); -} - -struct nouveau_ofuncs -nvd0_disp_mast_ofuncs = { - .ctor = nvd0_disp_mast_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nvd0_disp_mast_init, - .fini = nvd0_disp_mast_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO sync channel objects - ******************************************************************************/ - -static int -nvd0_disp_sync_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_sync_class *args = data; - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_dmac *dmac; - int ret; - - if (size < sizeof(*data) || args->head >= priv->head.nr) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 1 + args->head, sizeof(*dmac), - (void **)&dmac); - *pobject = nv_object(dmac); - if (ret) - return ret; - - nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach; - nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach; - return 0; -} - -struct nouveau_ofuncs -nvd0_disp_sync_ofuncs = { - .ctor = nvd0_disp_sync_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nvd0_disp_dmac_init, - .fini = nvd0_disp_dmac_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO overlay channel objects - ******************************************************************************/ - -static int -nvd0_disp_ovly_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_ovly_class *args = data; - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_dmac *dmac; - int ret; - - if (size < sizeof(*data) || args->head >= priv->head.nr) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 5 + args->head, sizeof(*dmac), - (void **)&dmac); - *pobject = nv_object(dmac); - if (ret) - return ret; - - nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach; - nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach; - return 0; -} - -struct nouveau_ofuncs -nvd0_disp_ovly_ofuncs = { - .ctor = nvd0_disp_ovly_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nvd0_disp_dmac_init, - .fini = nvd0_disp_dmac_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO PIO channel base class - ******************************************************************************/ - -static int -nvd0_disp_pioc_create_(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, int chid, - int length, void **pobject) -{ - return nv50_disp_chan_create_(parent, engine, oclass, chid, - length, pobject); -} - -static void -nvd0_disp_pioc_dtor(struct nouveau_object *object) -{ - struct nv50_disp_pioc *pioc = (void *)object; - nv50_disp_chan_destroy(&pioc->base); -} - -static int -nvd0_disp_pioc_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_pioc *pioc = (void *)object; - int chid = pioc->base.chid; - int ret; - - ret = nv50_disp_chan_init(&pioc->base); - if (ret) - return ret; - - /* enable error reporting */ - nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid); - nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); - - /* activate channel */ - nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001); - if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) { - nv_error(pioc, "init: 0x%08x\n", - nv_rd32(priv, 0x610490 + (chid * 0x10))); - return -EBUSY; - } - - return 0; -} - -static int -nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_pioc *pioc = (void *)object; - int chid = pioc->base.chid; - - nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000); - if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) { - nv_error(pioc, "timeout: 0x%08x\n", - nv_rd32(priv, 0x610490 + (chid * 0x10))); - if (suspend) - return -EBUSY; - } - - /* disable error reporting */ - nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000); - nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000); - - return nv50_disp_chan_fini(&pioc->base, suspend); -} - -/******************************************************************************* - * EVO immediate overlay channel objects - ******************************************************************************/ - -static int -nvd0_disp_oimm_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_oimm_class *args = data; - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_pioc *pioc; - int ret; - - if (size < sizeof(*args) || args->head >= priv->head.nr) - return -EINVAL; - - ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head, - sizeof(*pioc), (void **)&pioc); - *pobject = nv_object(pioc); - if (ret) - return ret; - - return 0; -} - -struct nouveau_ofuncs -nvd0_disp_oimm_ofuncs = { - .ctor = nvd0_disp_oimm_ctor, - .dtor = nvd0_disp_pioc_dtor, - .init = nvd0_disp_pioc_init, - .fini = nvd0_disp_pioc_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO cursor channel objects - ******************************************************************************/ - -static int -nvd0_disp_curs_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_curs_class *args = data; - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_pioc *pioc; - int ret; - - if (size < sizeof(*args) || args->head >= priv->head.nr) - return -EINVAL; - - ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head, - sizeof(*pioc), (void **)&pioc); - *pobject = nv_object(pioc); - if (ret) - return ret; - - return 0; -} - -struct nouveau_ofuncs -nvd0_disp_curs_ofuncs = { - .ctor = nvd0_disp_curs_ctor, - .dtor = nvd0_disp_pioc_dtor, - .init = nvd0_disp_pioc_init, - .fini = nvd0_disp_pioc_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * Base display object - ******************************************************************************/ - -static int -nvd0_disp_base_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_base *base; - int ret; - - ret = nouveau_parent_create(parent, engine, oclass, 0, - priv->sclass, 0, &base); - *pobject = nv_object(base); - if (ret) - return ret; - - return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht); -} - -static void -nvd0_disp_base_dtor(struct nouveau_object *object) -{ - struct nv50_disp_base *base = (void *)object; - nouveau_ramht_ref(NULL, &base->ramht); - nouveau_parent_destroy(&base->base); -} - -static int -nvd0_disp_base_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_base *base = (void *)object; - int ret, i; - u32 tmp; - - ret = nouveau_parent_init(&base->base); - if (ret) - return ret; - - /* The below segments of code copying values from one register to - * another appear to inform EVO of the display capabilities or - * something similar. - */ - - /* ... CRTC caps */ - for (i = 0; i < priv->head.nr; i++) { - tmp = nv_rd32(priv, 0x616104 + (i * 0x800)); - nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp); - tmp = nv_rd32(priv, 0x616108 + (i * 0x800)); - nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp); - tmp = nv_rd32(priv, 0x61610c + (i * 0x800)); - nv_wr32(priv, 0x6101bc + (i * 0x800), tmp); - } - - /* ... DAC caps */ - for (i = 0; i < priv->dac.nr; i++) { - tmp = nv_rd32(priv, 0x61a000 + (i * 0x800)); - nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp); - } - - /* ... SOR caps */ - for (i = 0; i < priv->sor.nr; i++) { - tmp = nv_rd32(priv, 0x61c000 + (i * 0x800)); - nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp); - } - - /* steal display away from vbios, or something like that */ - if (nv_rd32(priv, 0x6100ac) & 0x00000100) { - nv_wr32(priv, 0x6100ac, 0x00000100); - nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000); - if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) { - nv_error(priv, "timeout acquiring display\n"); - return -EBUSY; - } - } - - /* point at display engine memory area (hash table, objects) */ - nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9); - - /* enable supervisor interrupts, disable everything else */ - nv_wr32(priv, 0x610090, 0x00000000); - nv_wr32(priv, 0x6100a0, 0x00000000); - nv_wr32(priv, 0x6100b0, 0x00000307); - - return 0; -} - -static int -nvd0_disp_base_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_base *base = (void *)object; - - /* disable all interrupts */ - nv_wr32(priv, 0x6100b0, 0x00000000); - - return nouveau_parent_fini(&base->base, suspend); -} - -struct nouveau_ofuncs -nvd0_disp_base_ofuncs = { - .ctor = nvd0_disp_base_ctor, - .dtor = nvd0_disp_base_dtor, - .init = nvd0_disp_base_init, - .fini = nvd0_disp_base_fini, -}; - -static struct nouveau_oclass -nvd0_disp_base_oclass[] = { - { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds }, - {} +struct nvd0_disp_priv { + struct nouveau_disp base; }; static struct nouveau_oclass nvd0_disp_sclass[] = { - { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, - { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, - { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, - { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, - { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, - {} + {}, }; -/******************************************************************************* - * Display engine implementation - ******************************************************************************/ - -static u16 -exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, - struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_outp *info) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - u16 mask, type, data; - - if (outp < 4) { - type = DCB_OUTPUT_ANALOG; - mask = 0; - } else { - outp -= 4; - switch (ctrl & 0x00000f00) { - case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break; - case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break; - case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break; - case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break; - case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break; - case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; - default: - nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); - return 0x0000; - } - dcb->sorconf.link = mask; - } - - mask = 0x00c0 & (mask << 6); - mask |= 0x0001 << outp; - mask |= 0x0100 << head; - - data = dcb_outp_match(bios, type, mask, ver, hdr, dcb); - if (!data) - return 0x0000; - - return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info); -} - -static bool -exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - struct nvbios_outp info; - struct dcb_output dcb; - u8 ver, hdr, cnt, len; - u16 data; - - data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info); - if (data) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = info.script[id], - .outp = &dcb, - .crtc = head, - .execute = 1, - }; - - return nvbios_exec(&init) == 0; - } - - return false; -} - -static u32 -exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp, - u32 ctrl, int id, u32 pclk) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - struct nvbios_outp info1; - struct nvbios_ocfg info2; - struct dcb_output dcb; - u8 ver, hdr, cnt, len; - u16 data, conf; - - data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1); - if (data == 0x0000) - return false; - - switch (dcb.type) { - case DCB_OUTPUT_TMDS: - conf = (ctrl & 0x00000f00) >> 8; - if (pclk >= 165000) - conf |= 0x0100; - break; - case DCB_OUTPUT_LVDS: - conf = priv->sor.lvdsconf; - break; - case DCB_OUTPUT_DP: - conf = (ctrl & 0x00000f00) >> 8; - break; - case DCB_OUTPUT_ANALOG: - default: - conf = 0x00ff; - break; - } - - data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2); - if (data) { - data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); - if (data) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = data, - .outp = &dcb, - .crtc = head, - .execute = 1, - }; - - if (nvbios_exec(&init)) - return 0x0000; - return conf; - } - } - - return 0x0000; -} - -static void -nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask) -{ - int i; - - for (i = 0; mask && i < 8; i++) { - u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20)); - if (mcc & (1 << head)) - exec_script(priv, head, i, mcc, 1); - } - - nv_wr32(priv, 0x6101d4, 0x00000000); - nv_wr32(priv, 0x6109d4, 0x00000000); - nv_wr32(priv, 0x6101d0, 0x80000000); -} - static void -nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or) -{ - const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020)); - const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300)); - const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; - const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1; - const u32 hoff = (head * 0x800); - const u32 soff = ( or * 0x800); - const u32 loff = (link * 0x080) + soff; - const u32 symbol = 100000; - const u32 TU = 64; - u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000; - u32 clksor = nv_rd32(priv, 0x612300 + soff); - u32 datarate, link_nr, link_bw, bits; - u64 ratio, value; - - if ((conf & 0x3c0) == 0x180) bits = 30; - else if ((conf & 0x3c0) == 0x140) bits = 24; - else bits = 18; - datarate = (pclk * bits) / 8; - - if (dpctrl > 0x00030000) link_nr = 4; - else if (dpctrl > 0x00010000) link_nr = 2; - else link_nr = 1; - - link_bw = (clksor & 0x007c0000) >> 18; - link_bw *= 27000; - - ratio = datarate; - ratio *= symbol; - do_div(ratio, link_nr * link_bw); - - value = (symbol - ratio) * TU; - value *= ratio; - do_div(value, symbol); - do_div(value, symbol); - - value += 5; - value |= 0x08000000; - - nv_wr32(priv, 0x616610 + hoff, value); -} - -static void -nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask) -{ - u32 pclk; - int i; - - for (i = 0; mask && i < 8; i++) { - u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20)); - if (mcc & (1 << head)) - exec_script(priv, head, i, mcc, 2); - } - - pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; - nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask); - if (pclk && (mask & 0x00010000)) { - struct nouveau_clock *clk = nouveau_clock(priv); - clk->pll_set(clk, PLL_VPLL0 + head, pclk); - } - - nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000); - - for (i = 0; mask && i < 8; i++) { - u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg; - if (mcp & (1 << head)) { - if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) { - u32 addr, mask, data = 0x00000000; - if (i < 4) { - addr = 0x612280 + ((i - 0) * 0x800); - mask = 0xffffffff; - } else { - switch (mcp & 0x00000f00) { - case 0x00000800: - case 0x00000900: - nvd0_display_unk2_calc_tu(priv, head, i - 4); - break; - default: - break; - } - - addr = 0x612300 + ((i - 4) * 0x800); - mask = 0x00000707; - if (cfg & 0x00000100) - data = 0x00000101; - } - nv_mask(priv, addr, mask, data); - } - break; - } - } - - nv_wr32(priv, 0x6101d4, 0x00000000); - nv_wr32(priv, 0x6109d4, 0x00000000); - nv_wr32(priv, 0x6101d0, 0x80000000); -} - -static void -nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask) -{ - int pclk, i; - - pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; - - for (i = 0; mask && i < 8; i++) { - u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)); - if (mcp & (1 << head)) - exec_clkcmp(priv, head, i, mcp, 1, pclk); - } - - nv_wr32(priv, 0x6101d4, 0x00000000); - nv_wr32(priv, 0x6109d4, 0x00000000); - nv_wr32(priv, 0x6101d0, 0x80000000); -} - -static void -nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) +nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc) { struct nouveau_bar *bar = nouveau_bar(priv); struct nouveau_disp *disp = &priv->base; @@ -851,71 +65,14 @@ nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) disp->vblank.notify(disp->vblank.data, crtc); } -void +static void nvd0_disp_intr(struct nouveau_subdev *subdev) { - struct nv50_disp_priv *priv = (void *)subdev; + struct nvd0_disp_priv *priv = (void *)subdev; u32 intr = nv_rd32(priv, 0x610088); int i; - if (intr & 0x00000001) { - u32 stat = nv_rd32(priv, 0x61008c); - nv_wr32(priv, 0x61008c, stat); - intr &= ~0x00000001; - } - - if (intr & 0x00000002) { - u32 stat = nv_rd32(priv, 0x61009c); - int chid = ffs(stat) - 1; - if (chid >= 0) { - u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12)); - u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12)); - u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12)); - - nv_error(priv, "chid %d mthd 0x%04x data 0x%08x " - "0x%08x 0x%08x\n", - chid, (mthd & 0x0000ffc), data, mthd, unkn); - nv_wr32(priv, 0x61009c, (1 << chid)); - nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000); - } - - intr &= ~0x00000002; - } - - if (intr & 0x00100000) { - u32 stat = nv_rd32(priv, 0x6100ac); - u32 mask = 0, crtc = ~0; - - while (!mask && ++crtc < priv->head.nr) - mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800)); - - if (stat & 0x00000001) { - nv_wr32(priv, 0x6100ac, 0x00000001); - nvd0_display_unk1_handler(priv, crtc, mask); - stat &= ~0x00000001; - } - - if (stat & 0x00000002) { - nv_wr32(priv, 0x6100ac, 0x00000002); - nvd0_display_unk2_handler(priv, crtc, mask); - stat &= ~0x00000002; - } - - if (stat & 0x00000004) { - nv_wr32(priv, 0x6100ac, 0x00000004); - nvd0_display_unk4_handler(priv, crtc, mask); - stat &= ~0x00000004; - } - - if (stat) { - nv_info(priv, "unknown intr24 0x%08x\n", stat); - nv_wr32(priv, 0x6100ac, stat); - } - - intr &= ~0x00100000; - } - - for (i = 0; i < priv->head.nr; i++) { + for (i = 0; i < 4; i++) { u32 mask = 0x01000000 << i; if (mask & intr) { u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800)); @@ -929,10 +86,10 @@ nvd0_disp_intr(struct nouveau_subdev *subdev) static int nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) { - struct nv50_disp_priv *priv; + struct nvd0_disp_priv *priv; int ret; ret = nouveau_disp_create(parent, engine, oclass, "PDISP", @@ -941,23 +98,8 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - nv_engine(priv)->sclass = nvd0_disp_base_oclass; - nv_engine(priv)->cclass = &nv50_disp_cclass; + nv_engine(priv)->sclass = nvd0_disp_sclass; nv_subdev(priv)->intr = nvd0_disp_intr; - priv->sclass = nvd0_disp_sclass; - priv->head.nr = nv_rd32(priv, 0x022448); - priv->dac.nr = 3; - priv->sor.nr = 4; - priv->dac.power = nv50_dac_power; - priv->dac.sense = nv50_dac_sense; - priv->sor.power = nv50_sor_power; - priv->sor.hda_eld = nvd0_hda_eld; - priv->sor.hdmi = nvd0_hdmi_ctrl; - priv->sor.dp_train = nvd0_sor_dp_train; - priv->sor.dp_train_init = nv94_sor_dp_train_init; - priv->sor.dp_train_fini = nv94_sor_dp_train_fini; - priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl; - priv->sor.dp_drvctl = nvd0_sor_dp_drvctl; INIT_LIST_HEAD(&priv->base.vblank.list); spin_lock_init(&priv->base.vblank.lock); @@ -966,7 +108,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass nvd0_disp_oclass = { - .handle = NV_ENGINE(DISP, 0x90), + .handle = NV_ENGINE(DISP, 0xd0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvd0_disp_ctor, .dtor = _nouveau_disp_dtor, diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c deleted file mode 100644 index 259537c4587e..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include - -#include "nv50.h" - -static struct nouveau_oclass -nve0_disp_sclass[] = { - { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, - { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, - { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, - { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, - { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, - {} -}; - -static struct nouveau_oclass -nve0_disp_base_oclass[] = { - { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds }, - {} -}; - -static int -nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv; - int ret; - - ret = nouveau_disp_create(parent, engine, oclass, "PDISP", - "display", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_engine(priv)->sclass = nve0_disp_base_oclass; - nv_engine(priv)->cclass = &nv50_disp_cclass; - nv_subdev(priv)->intr = nvd0_disp_intr; - priv->sclass = nve0_disp_sclass; - priv->head.nr = nv_rd32(priv, 0x022448); - priv->dac.nr = 3; - priv->sor.nr = 4; - priv->dac.power = nv50_dac_power; - priv->dac.sense = nv50_dac_sense; - priv->sor.power = nv50_sor_power; - priv->sor.hda_eld = nvd0_hda_eld; - priv->sor.hdmi = nvd0_hdmi_ctrl; - priv->sor.dp_train = nvd0_sor_dp_train; - priv->sor.dp_train_init = nv94_sor_dp_train_init; - priv->sor.dp_train_fini = nv94_sor_dp_train_fini; - priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl; - priv->sor.dp_drvctl = nvd0_sor_dp_drvctl; - - INIT_LIST_HEAD(&priv->base.vblank.list); - spin_lock_init(&priv->base.vblank.lock); - return 0; -} - -struct nouveau_oclass -nve0_disp_oclass = { - .handle = NV_ENGINE(DISP, 0x91), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nve0_disp_ctor, - .dtor = _nouveau_disp_dtor, - .init = _nouveau_disp_init, - .fini = _nouveau_disp_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c deleted file mode 100644 index 39b6b67732d0..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include -#include -#include - -#include "nv50.h" - -int -nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data) -{ - const u32 stat = data & NV50_DISP_SOR_PWR_STATE; - const u32 soff = (or * 0x800); - nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); - nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat); - nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); - nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000); - return 0; -} - -int -nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nouveau_bios *bios = nouveau_bios(priv); - const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12; - const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; - const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2; - const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); - const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or); - struct dcb_output outp; - u8 ver, hdr; - u32 data; - int ret = -EINVAL; - - if (size < sizeof(u32)) - return -EINVAL; - data = *(u32 *)args; - - if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp)) - return -ENODEV; - - switch (mthd & ~0x3f) { - case NV50_DISP_SOR_PWR: - ret = priv->sor.power(priv, or, data); - break; - case NVA3_DISP_SOR_HDA_ELD: - ret = priv->sor.hda_eld(priv, or, args, size); - break; - case NV84_DISP_SOR_HDMI_PWR: - ret = priv->sor.hdmi(priv, head, or, data); - break; - case NV50_DISP_SOR_LVDS_SCRIPT: - priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID; - ret = 0; - break; - case NV94_DISP_SOR_DP_TRAIN: - switch (data & NV94_DISP_SOR_DP_TRAIN_OP) { - case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN: - ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp); - break; - case NV94_DISP_SOR_DP_TRAIN_OP_INIT: - ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp); - break; - case NV94_DISP_SOR_DP_TRAIN_OP_FINI: - ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp); - break; - default: - break; - } - break; - case NV94_DISP_SOR_DP_LNKCTL: - ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp); - break; - case NV94_DISP_SOR_DP_DRVCTL(0): - case NV94_DISP_SOR_DP_DRVCTL(1): - case NV94_DISP_SOR_DP_DRVCTL(2): - case NV94_DISP_SOR_DP_DRVCTL(3): - ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6, - type, mask, data, &outp); - break; - default: - BUG_ON(1); - } - - return ret; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c deleted file mode 100644 index f6edd009762e..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include -#include -#include -#include - -#include "nv50.h" - -static inline u32 -nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane) -{ - static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */ - static const u8 nv94[] = { 16, 8, 0, 24 }; - if (nv_device(priv)->chipset == 0xaf) - return nvaf[lane]; - return nv94[lane]; -} - -int -nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head, - u16 type, u16 mask, u32 data, struct dcb_output *dcbo) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - struct nvbios_dpout info; - u8 ver, hdr, cnt, len; - u16 outp; - - outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info); - if (outp) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .outp = dcbo, - .crtc = head, - .execute = 1, - }; - - if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON) - init.offset = info.script[2]; - else - init.offset = info.script[3]; - nvbios_exec(&init); - - init.offset = info.script[0]; - nvbios_exec(&init); - } - - return 0; -} - -int -nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head, - u16 type, u16 mask, u32 data, struct dcb_output *dcbo) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - struct nvbios_dpout info; - u8 ver, hdr, cnt, len; - u16 outp; - - outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info); - if (outp) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = info.script[1], - .outp = dcbo, - .crtc = head, - .execute = 1, - }; - - nvbios_exec(&init); - } - - return 0; -} - -int -nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link, - u16 type, u16 mask, u32 data, struct dcb_output *info) -{ - const u32 loff = (or * 0x800) + (link * 0x80); - const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN); - nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24); - return 0; -} - -int -nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head, - u16 type, u16 mask, u32 data, struct dcb_output *dcbo) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - const u32 loff = (or * 0x800) + (link * 0x80); - const u32 soff = (or * 0x800); - u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8; - u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT); - u32 dpctrl = 0x00000000; - u32 clksor = 0x00000000; - u32 outp, lane = 0; - u8 ver, hdr, cnt, len; - struct nvbios_dpout info; - int i; - - /* -> 10Khz units */ - link_bw *= 2700; - - outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info); - if (outp && info.lnkcmp) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = 0x0000, - .outp = dcbo, - .crtc = head, - .execute = 1, - }; - - while (link_bw < nv_ro16(bios, info.lnkcmp)) - info.lnkcmp += 4; - init.offset = nv_ro16(bios, info.lnkcmp + 2); - - nvbios_exec(&init); - } - - dpctrl |= ((1 << link_nr) - 1) << 16; - if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH) - dpctrl |= 0x00004000; - if (link_bw > 16200) - clksor |= 0x00040000; - - for (i = 0; i < link_nr; i++) - lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3); - - nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor); - nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl); - nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane); - return 0; -} - -int -nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane, - u16 type, u16 mask, u32 data, struct dcb_output *dcbo) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - const u32 loff = (or * 0x800) + (link * 0x80); - const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8; - const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE); - u32 addr, shift = nv94_sor_dp_lane_map(priv, lane); - u8 ver, hdr, cnt, len; - struct nvbios_dpout outp; - struct nvbios_dpcfg ocfg; - - addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp); - if (!addr) - return -ENODEV; - - addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg); - if (!addr) - return -EINVAL; - - nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift); - nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift); - nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8); - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c deleted file mode 100644 index c37ce7e29f5d..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include -#include -#include -#include - -#include "nv50.h" - -static inline u32 -nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane) -{ - static const u8 nvd0[] = { 16, 8, 0, 24 }; - return nvd0[lane]; -} - -int -nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link, - u16 type, u16 mask, u32 data, struct dcb_output *info) -{ - const u32 loff = (or * 0x800) + (link * 0x80); - const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN); - nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt); - return 0; -} - -int -nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head, - u16 type, u16 mask, u32 data, struct dcb_output *dcbo) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - const u32 loff = (or * 0x800) + (link * 0x80); - const u32 soff = (or * 0x800); - const u8 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8; - const u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT); - u32 dpctrl = 0x00000000; - u32 clksor = 0x00000000; - u32 outp, lane = 0; - u8 ver, hdr, cnt, len; - struct nvbios_dpout info; - int i; - - outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info); - if (outp && info.lnkcmp) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = 0x0000, - .outp = dcbo, - .crtc = head, - .execute = 1, - }; - - while (nv_ro08(bios, info.lnkcmp) < link_bw) - info.lnkcmp += 3; - init.offset = nv_ro16(bios, info.lnkcmp + 1); - - nvbios_exec(&init); - } - - clksor |= link_bw << 18; - dpctrl |= ((1 << link_nr) - 1) << 16; - if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH) - dpctrl |= 0x00004000; - - for (i = 0; i < link_nr; i++) - lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3); - - nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor); - nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl); - nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane); - return 0; -} - -int -nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane, - u16 type, u16 mask, u32 data, struct dcb_output *dcbo) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - const u32 loff = (or * 0x800) + (link * 0x80); - const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8; - const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE); - u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane); - u8 ver, hdr, cnt, len; - struct nvbios_dpout outp; - struct nvbios_dpcfg ocfg; - - addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp); - if (!addr) - return -ENODEV; - - addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg); - if (!addr) - return -EINVAL; - - nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift); - nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift); - nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8); - nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000); - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c index 5103e88d1877..e1f013d39768 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c @@ -28,39 +28,37 @@ #include #include -static int -nouveau_dmaobj_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) +int +nouveau_dmaobj_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, + void *data, u32 size, int len, void **pobject) { - struct nouveau_dmaeng *dmaeng = (void *)engine; - struct nouveau_dmaobj *dmaobj; - struct nouveau_gpuobj *gpuobj; struct nv_dma_class *args = data; + struct nouveau_dmaobj *object; int ret; if (size < sizeof(*args)) return -EINVAL; - ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj); - *pobject = nv_object(dmaobj); + ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject); + object = *pobject; if (ret) return ret; switch (args->flags & NV_DMA_TARGET_MASK) { case NV_DMA_TARGET_VM: - dmaobj->target = NV_MEM_TARGET_VM; + object->target = NV_MEM_TARGET_VM; break; case NV_DMA_TARGET_VRAM: - dmaobj->target = NV_MEM_TARGET_VRAM; + object->target = NV_MEM_TARGET_VRAM; break; case NV_DMA_TARGET_PCI: - dmaobj->target = NV_MEM_TARGET_PCI; + object->target = NV_MEM_TARGET_PCI; break; case NV_DMA_TARGET_PCI_US: case NV_DMA_TARGET_AGP: - dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP; + object->target = NV_MEM_TARGET_PCI_NOSNOOP; break; default: return -EINVAL; @@ -68,53 +66,22 @@ nouveau_dmaobj_ctor(struct nouveau_object *parent, switch (args->flags & NV_DMA_ACCESS_MASK) { case NV_DMA_ACCESS_VM: - dmaobj->access = NV_MEM_ACCESS_VM; + object->access = NV_MEM_ACCESS_VM; break; case NV_DMA_ACCESS_RD: - dmaobj->access = NV_MEM_ACCESS_RO; + object->access = NV_MEM_ACCESS_RO; break; case NV_DMA_ACCESS_WR: - dmaobj->access = NV_MEM_ACCESS_WO; + object->access = NV_MEM_ACCESS_WO; break; case NV_DMA_ACCESS_RDWR: - dmaobj->access = NV_MEM_ACCESS_RW; + object->access = NV_MEM_ACCESS_RW; break; default: return -EINVAL; } - dmaobj->start = args->start; - dmaobj->limit = args->limit; - dmaobj->conf0 = args->conf0; - - switch (nv_mclass(parent)) { - case NV_DEVICE_CLASS: - /* delayed, or no, binding */ - break; - default: - ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj); - if (ret == 0) { - nouveau_object_ref(NULL, pobject); - *pobject = nv_object(gpuobj); - } - break; - } - - return ret; + object->start = args->start; + object->limit = args->limit; + return 0; } - -static struct nouveau_ofuncs -nouveau_dmaobj_ofuncs = { - .ctor = nouveau_dmaobj_ctor, - .dtor = nouveau_object_destroy, - .init = nouveau_object_init, - .fini = nouveau_object_fini, -}; - -struct nouveau_oclass -nouveau_dmaobj_sclass[] = { - { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, - { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, - { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, - {} -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c index 027d8217c0fa..9f4cc2f31994 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c @@ -34,6 +34,10 @@ struct nv04_dmaeng_priv { struct nouveau_dmaeng base; }; +struct nv04_dmaobj_priv { + struct nouveau_dmaobj base; +}; + static int nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng, struct nouveau_object *parent, @@ -49,18 +53,6 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng, u32 length = dmaobj->limit - dmaobj->start; int ret; - if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { - switch (nv_mclass(parent->parent)) { - case NV03_CHANNEL_DMA_CLASS: - case NV10_CHANNEL_DMA_CLASS: - case NV17_CHANNEL_DMA_CLASS: - case NV40_CHANNEL_DMA_CLASS: - break; - default: - return -EINVAL; - } - } - if (dmaobj->target == NV_MEM_TARGET_VM) { if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) { struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0]; @@ -113,6 +105,56 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng, return ret; } +static int +nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_dmaeng *dmaeng = (void *)engine; + struct nv04_dmaobj_priv *dmaobj; + struct nouveau_gpuobj *gpuobj; + int ret; + + ret = nouveau_dmaobj_create(parent, engine, oclass, + data, size, &dmaobj); + *pobject = nv_object(dmaobj); + if (ret) + return ret; + + switch (nv_mclass(parent)) { + case NV_DEVICE_CLASS: + break; + case NV03_CHANNEL_DMA_CLASS: + case NV10_CHANNEL_DMA_CLASS: + case NV17_CHANNEL_DMA_CLASS: + case NV40_CHANNEL_DMA_CLASS: + ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj); + nouveau_object_ref(NULL, pobject); + *pobject = nv_object(gpuobj); + break; + default: + return -EINVAL; + } + + return ret; +} + +static struct nouveau_ofuncs +nv04_dmaobj_ofuncs = { + .ctor = nv04_dmaobj_ctor, + .dtor = _nouveau_dmaobj_dtor, + .init = _nouveau_dmaobj_init, + .fini = _nouveau_dmaobj_fini, +}; + +static struct nouveau_oclass +nv04_dmaobj_sclass[] = { + { 0x0002, &nv04_dmaobj_ofuncs }, + { 0x0003, &nv04_dmaobj_ofuncs }, + { 0x003d, &nv04_dmaobj_ofuncs }, + {} +}; + static int nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -126,7 +168,7 @@ nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - nv_engine(priv)->sclass = nouveau_dmaobj_sclass; + priv->base.base.sclass = nv04_dmaobj_sclass; priv->base.bind = nv04_dmaobj_bind; return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c index 750183f7c057..045d2565e289 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c @@ -32,74 +32,36 @@ struct nv50_dmaeng_priv { struct nouveau_dmaeng base; }; +struct nv50_dmaobj_priv { + struct nouveau_dmaobj base; +}; + static int nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng, struct nouveau_object *parent, struct nouveau_dmaobj *dmaobj, struct nouveau_gpuobj **pgpuobj) { - u32 flags0 = nv_mclass(dmaobj); - u32 flags5 = 0x00000000; + u32 flags = nv_mclass(dmaobj); int ret; - if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { - switch (nv_mclass(parent->parent)) { - case NV50_CHANNEL_DMA_CLASS: - case NV84_CHANNEL_DMA_CLASS: - case NV50_CHANNEL_IND_CLASS: - case NV84_CHANNEL_IND_CLASS: - case NV50_DISP_MAST_CLASS: - case NV84_DISP_MAST_CLASS: - case NV94_DISP_MAST_CLASS: - case NVA0_DISP_MAST_CLASS: - case NVA3_DISP_MAST_CLASS: - case NV50_DISP_SYNC_CLASS: - case NV84_DISP_SYNC_CLASS: - case NV94_DISP_SYNC_CLASS: - case NVA0_DISP_SYNC_CLASS: - case NVA3_DISP_SYNC_CLASS: - case NV50_DISP_OVLY_CLASS: - case NV84_DISP_OVLY_CLASS: - case NV94_DISP_OVLY_CLASS: - case NVA0_DISP_OVLY_CLASS: - case NVA3_DISP_OVLY_CLASS: - break; - default: - return -EINVAL; - } - } - - if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) { - if (dmaobj->target == NV_MEM_TARGET_VM) { - dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM; - dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM; - dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM; - dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM; - } else { - dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US; - dmaobj->conf0 |= NV50_DMA_CONF0_PART_256; - dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE; - dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR; - } - } - - flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22; - flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22; - flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV); - flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART); - switch (dmaobj->target) { case NV_MEM_TARGET_VM: - flags0 |= 0x00000000; + flags |= 0x00000000; + flags |= 0x60000000; /* COMPRESSION_USEVM */ + flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */ break; case NV_MEM_TARGET_VRAM: - flags0 |= 0x00010000; + flags |= 0x00010000; + flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */ break; case NV_MEM_TARGET_PCI: - flags0 |= 0x00020000; + flags |= 0x00020000; + flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */ break; case NV_MEM_TARGET_PCI_NOSNOOP: - flags0 |= 0x00030000; + flags |= 0x00030000; + flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */ break; default: return -EINVAL; @@ -109,28 +71,78 @@ nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng, case NV_MEM_ACCESS_VM: break; case NV_MEM_ACCESS_RO: - flags0 |= 0x00040000; + flags |= 0x00040000; break; case NV_MEM_ACCESS_WO: case NV_MEM_ACCESS_RW: - flags0 |= 0x00080000; + flags |= 0x00080000; break; } ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); if (ret == 0) { - nv_wo32(*pgpuobj, 0x00, flags0); + nv_wo32(*pgpuobj, 0x00, flags); nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | upper_32_bits(dmaobj->start)); nv_wo32(*pgpuobj, 0x10, 0x00000000); - nv_wo32(*pgpuobj, 0x14, flags5); + nv_wo32(*pgpuobj, 0x14, 0x00000000); } return ret; } +static int +nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_dmaeng *dmaeng = (void *)engine; + struct nv50_dmaobj_priv *dmaobj; + struct nouveau_gpuobj *gpuobj; + int ret; + + ret = nouveau_dmaobj_create(parent, engine, oclass, + data, size, &dmaobj); + *pobject = nv_object(dmaobj); + if (ret) + return ret; + + switch (nv_mclass(parent)) { + case NV_DEVICE_CLASS: + break; + case NV50_CHANNEL_DMA_CLASS: + case NV84_CHANNEL_DMA_CLASS: + case NV50_CHANNEL_IND_CLASS: + case NV84_CHANNEL_IND_CLASS: + ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj); + nouveau_object_ref(NULL, pobject); + *pobject = nv_object(gpuobj); + break; + default: + return -EINVAL; + } + + return ret; +} + +static struct nouveau_ofuncs +nv50_dmaobj_ofuncs = { + .ctor = nv50_dmaobj_ctor, + .dtor = _nouveau_dmaobj_dtor, + .init = _nouveau_dmaobj_init, + .fini = _nouveau_dmaobj_fini, +}; + +static struct nouveau_oclass +nv50_dmaobj_sclass[] = { + { 0x0002, &nv50_dmaobj_ofuncs }, + { 0x0003, &nv50_dmaobj_ofuncs }, + { 0x003d, &nv50_dmaobj_ofuncs }, + {} +}; + static int nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -144,7 +156,7 @@ nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - nv_engine(priv)->sclass = nouveau_dmaobj_sclass; + priv->base.base.sclass = nv50_dmaobj_sclass; priv->base.bind = nv50_dmaobj_bind; return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c index cd3970d03b80..5baa08695535 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c @@ -22,9 +22,7 @@ * Authors: Ben Skeggs */ -#include #include -#include #include #include @@ -33,85 +31,44 @@ struct nvc0_dmaeng_priv { struct nouveau_dmaeng base; }; +struct nvc0_dmaobj_priv { + struct nouveau_dmaobj base; +}; + static int -nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, - struct nouveau_object *parent, - struct nouveau_dmaobj *dmaobj, - struct nouveau_gpuobj **pgpuobj) +nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) { - u32 flags0 = nv_mclass(dmaobj); - u32 flags5 = 0x00000000; + struct nvc0_dmaobj_priv *dmaobj; int ret; - if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { - switch (nv_mclass(parent->parent)) { - case NVA3_DISP_MAST_CLASS: - case NVA3_DISP_SYNC_CLASS: - case NVA3_DISP_OVLY_CLASS: - break; - default: - return -EINVAL; - } - } else - return 0; - - if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) { - if (dmaobj->target == NV_MEM_TARGET_VM) { - dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM; - dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM; - } else { - dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US; - dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR; - dmaobj->conf0 |= 0x00020000; - } - } - - flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22; - flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV); - flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN); + ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj); + *pobject = nv_object(dmaobj); + if (ret) + return ret; - switch (dmaobj->target) { - case NV_MEM_TARGET_VM: - flags0 |= 0x00000000; - break; - case NV_MEM_TARGET_VRAM: - flags0 |= 0x00010000; - break; - case NV_MEM_TARGET_PCI: - flags0 |= 0x00020000; - break; - case NV_MEM_TARGET_PCI_NOSNOOP: - flags0 |= 0x00030000; - break; - default: + if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start) return -EINVAL; - } - switch (dmaobj->access) { - case NV_MEM_ACCESS_VM: - break; - case NV_MEM_ACCESS_RO: - flags0 |= 0x00040000; - break; - case NV_MEM_ACCESS_WO: - case NV_MEM_ACCESS_RW: - flags0 |= 0x00080000; - break; - } + return 0; +} - ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); - if (ret == 0) { - nv_wo32(*pgpuobj, 0x00, flags0); - nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); - nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); - nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | - upper_32_bits(dmaobj->start)); - nv_wo32(*pgpuobj, 0x10, 0x00000000); - nv_wo32(*pgpuobj, 0x14, flags5); - } +static struct nouveau_ofuncs +nvc0_dmaobj_ofuncs = { + .ctor = nvc0_dmaobj_ctor, + .dtor = _nouveau_dmaobj_dtor, + .init = _nouveau_dmaobj_init, + .fini = _nouveau_dmaobj_fini, +}; - return ret; -} +static struct nouveau_oclass +nvc0_dmaobj_sclass[] = { + { 0x0002, &nvc0_dmaobj_ofuncs }, + { 0x0003, &nvc0_dmaobj_ofuncs }, + { 0x003d, &nvc0_dmaobj_ofuncs }, + {} +}; static int nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, @@ -126,8 +83,7 @@ nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - nv_engine(priv)->sclass = nouveau_dmaobj_sclass; - priv->base.bind = nvc0_dmaobj_bind; + priv->base.base.sclass = nvc0_dmaobj_sclass; return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c deleted file mode 100644 index d1528752980c..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include -#include - -#include -#include - -struct nvd0_dmaeng_priv { - struct nouveau_dmaeng base; -}; - -static int -nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, - struct nouveau_object *parent, - struct nouveau_dmaobj *dmaobj, - struct nouveau_gpuobj **pgpuobj) -{ - u32 flags0 = 0x00000000; - int ret; - - if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { - switch (nv_mclass(parent->parent)) { - case NVD0_DISP_MAST_CLASS: - case NVD0_DISP_SYNC_CLASS: - case NVD0_DISP_OVLY_CLASS: - case NVE0_DISP_MAST_CLASS: - case NVE0_DISP_SYNC_CLASS: - case NVE0_DISP_OVLY_CLASS: - break; - default: - return -EINVAL; - } - } else - return 0; - - if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) { - if (dmaobj->target == NV_MEM_TARGET_VM) { - dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM; - dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP; - } else { - dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR; - dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP; - } - } - - flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20; - flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4; - - switch (dmaobj->target) { - case NV_MEM_TARGET_VRAM: - flags0 |= 0x00000009; - break; - default: - return -EINVAL; - break; - } - - ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); - if (ret == 0) { - nv_wo32(*pgpuobj, 0x00, flags0); - nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8); - nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8); - nv_wo32(*pgpuobj, 0x0c, 0x00000000); - nv_wo32(*pgpuobj, 0x10, 0x00000000); - nv_wo32(*pgpuobj, 0x14, 0x00000000); - } - - return ret; -} - -static int -nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nvd0_dmaeng_priv *priv; - int ret; - - ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_engine(priv)->sclass = nouveau_dmaobj_sclass; - priv->base.bind = nvd0_dmaobj_bind; - return 0; -} - -struct nouveau_oclass -nvd0_dmaeng_oclass = { - .handle = NV_ENGINE(DMAOBJ, 0xd0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nvd0_dmaeng_ctor, - .dtor = _nouveau_dmaeng_dtor, - .init = _nouveau_dmaeng_init, - .fini = _nouveau_dmaeng_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/base.c index c2b9db335816..bbb43c67c2ae 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/base.c @@ -24,7 +24,6 @@ #include #include -#include #include #include @@ -34,7 +33,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, int bar, u32 addr, u32 size, u32 pushbuf, - u64 engmask, int len, void **ptr) + u32 engmask, int len, void **ptr) { struct nouveau_device *device = nv_device(engine); struct nouveau_fifo *priv = (void *)engine; @@ -57,16 +56,18 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent, dmaeng = (void *)chan->pushdma->base.engine; switch (chan->pushdma->base.oclass->handle) { - case NV_DMA_FROM_MEMORY_CLASS: - case NV_DMA_IN_MEMORY_CLASS: + case 0x0002: + case 0x003d: break; default: return -EINVAL; } - ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu); - if (ret) - return ret; + if (dmaeng->bind) { + ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu); + if (ret) + return ret; + } /* find a free fifo channel */ spin_lock_irqsave(&priv->lock, flags); @@ -118,14 +119,14 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object) } u32 -_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr) +_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr) { struct nouveau_fifo_chan *chan = (void *)object; return ioread32_native(chan->user + addr); } void -_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data) +_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data) { struct nouveau_fifo_chan *chan = (void *)object; iowrite32_native(data, chan->user + addr); diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c index a47a8548f9e0..ea76e3e8c9c2 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c @@ -126,9 +126,9 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 0x10000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR), &chan); + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR), &chan); *pobject = nv_object(chan); if (ret) return ret; @@ -440,7 +440,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) } if (!nv04_fifo_swmthd(priv, chid, mthd, data)) { - nv_error(priv, "CACHE_ERROR - Ch %d/%d " + nv_info(priv, "CACHE_ERROR - Ch %d/%d " "Mthd 0x%04x Data 0x%08x\n", chid, (mthd >> 13) & 7, mthd & 0x1ffc, data); @@ -476,7 +476,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) u32 ib_get = nv_rd32(priv, 0x003334); u32 ib_put = nv_rd32(priv, 0x003330); - nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x " + nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x " "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " "State 0x%08x (err: %s) Push 0x%08x\n", chid, ho_get, dma_get, ho_put, @@ -494,7 +494,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) nv_wr32(priv, 0x003334, ib_put); } } else { - nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x " + nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x " "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n", chid, dma_get, dma_put, state, nv_dma_state_err(state), push); @@ -525,13 +525,14 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) if (device->card_type == NV_50) { if (status & 0x00000010) { + nv50_fb_trap(nouveau_fb(priv), 1); status &= ~0x00000010; nv_wr32(priv, 0x002100, 0x00000010); } } if (status) { - nv_warn(priv, "unknown intr 0x%08x, ch %d\n", + nv_info(priv, "unknown intr 0x%08x, ch %d\n", status, chid); nv_wr32(priv, NV03_PFIFO_INTR_0, status); status = 0; @@ -541,7 +542,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) } if (status) { - nv_error(priv, "still angry after %d spins, halt\n", cnt); + nv_info(priv, "still angry after %d spins, halt\n", cnt); nv_wr32(priv, 0x002140, 0); nv_wr32(priv, 0x000140, 0); } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c index 2c927c1d173b..4ba75422b89d 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c @@ -69,9 +69,9 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 0x10000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR), &chan); + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR), &chan); *pobject = nv_object(chan); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c index a9cb51d38c57..b96e6b0ae2b1 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c @@ -74,10 +74,10 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 0x10000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR) | - (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */ + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR) | + (1 << NVDEV_ENGINE_MPEG), /* NV31- */ &chan); *pobject = nv_object(chan); if (ret) diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c index 2b1f91721225..559c3b4e1b86 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c @@ -192,10 +192,10 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 0x1000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR) | - (1ULL << NVDEV_ENGINE_MPEG), &chan); + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR) | + (1 << NVDEV_ENGINE_MPEG), &chan); *pobject = nv_object(chan); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c index bd096364f680..536e7634a00d 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c @@ -112,6 +112,14 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend, return -EINVAL; } + nv_wo32(base->eng, addr + 0x00, 0x00000000); + nv_wo32(base->eng, addr + 0x04, 0x00000000); + nv_wo32(base->eng, addr + 0x08, 0x00000000); + nv_wo32(base->eng, addr + 0x0c, 0x00000000); + nv_wo32(base->eng, addr + 0x10, 0x00000000); + nv_wo32(base->eng, addr + 0x14, 0x00000000); + bar->flush(bar); + /* HW bug workaround: * * PFIFO will hang forever if the connected engines don't report @@ -133,18 +141,8 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend, if (suspend) ret = -EBUSY; } - nv_wr32(priv, 0x00b860, me); - - if (ret == 0) { - nv_wo32(base->eng, addr + 0x00, 0x00000000); - nv_wo32(base->eng, addr + 0x04, 0x00000000); - nv_wo32(base->eng, addr + 0x08, 0x00000000); - nv_wo32(base->eng, addr + 0x0c, 0x00000000); - nv_wo32(base->eng, addr + 0x10, 0x00000000); - nv_wo32(base->eng, addr + 0x14, 0x00000000); - bar->flush(bar); - } + nv_wr32(priv, 0x00b860, me); return ret; } @@ -196,10 +194,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 0x2000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR) | - (1ULL << NVDEV_ENGINE_MPEG), &chan); + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR) | + (1 << NVDEV_ENGINE_MPEG), &chan); *pobject = nv_object(chan); if (ret) return ret; @@ -249,10 +247,10 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 0x2000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR) | - (1ULL << NVDEV_ENGINE_MPEG), &chan); + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR) | + (1 << NVDEV_ENGINE_MPEG), &chan); *pobject = nv_object(chan); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c index 1eb1c512f503..b4fd26d8f166 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c @@ -95,6 +95,14 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend, return -EINVAL; } + nv_wo32(base->eng, addr + 0x00, 0x00000000); + nv_wo32(base->eng, addr + 0x04, 0x00000000); + nv_wo32(base->eng, addr + 0x08, 0x00000000); + nv_wo32(base->eng, addr + 0x0c, 0x00000000); + nv_wo32(base->eng, addr + 0x10, 0x00000000); + nv_wo32(base->eng, addr + 0x14, 0x00000000); + bar->flush(bar); + save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn); nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12); done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff); @@ -104,14 +112,6 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend, if (suspend) return -EBUSY; } - - nv_wo32(base->eng, addr + 0x00, 0x00000000); - nv_wo32(base->eng, addr + 0x04, 0x00000000); - nv_wo32(base->eng, addr + 0x08, 0x00000000); - nv_wo32(base->eng, addr + 0x0c, 0x00000000); - nv_wo32(base->eng, addr + 0x10, 0x00000000); - nv_wo32(base->eng, addr + 0x14, 0x00000000); - bar->flush(bar); return 0; } @@ -163,17 +163,17 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 0x2000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR) | - (1ULL << NVDEV_ENGINE_MPEG) | - (1ULL << NVDEV_ENGINE_ME) | - (1ULL << NVDEV_ENGINE_VP) | - (1ULL << NVDEV_ENGINE_CRYPT) | - (1ULL << NVDEV_ENGINE_BSP) | - (1ULL << NVDEV_ENGINE_PPP) | - (1ULL << NVDEV_ENGINE_COPY0) | - (1ULL << NVDEV_ENGINE_UNK1C1), &chan); + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR) | + (1 << NVDEV_ENGINE_MPEG) | + (1 << NVDEV_ENGINE_ME) | + (1 << NVDEV_ENGINE_VP) | + (1 << NVDEV_ENGINE_CRYPT) | + (1 << NVDEV_ENGINE_BSP) | + (1 << NVDEV_ENGINE_PPP) | + (1 << NVDEV_ENGINE_COPY0) | + (1 << NVDEV_ENGINE_UNK1C1), &chan); *pobject = nv_object(chan); if (ret) return ret; @@ -225,17 +225,17 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 0x2000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR) | - (1ULL << NVDEV_ENGINE_MPEG) | - (1ULL << NVDEV_ENGINE_ME) | - (1ULL << NVDEV_ENGINE_VP) | - (1ULL << NVDEV_ENGINE_CRYPT) | - (1ULL << NVDEV_ENGINE_BSP) | - (1ULL << NVDEV_ENGINE_PPP) | - (1ULL << NVDEV_ENGINE_COPY0) | - (1ULL << NVDEV_ENGINE_UNK1C1), &chan); + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR) | + (1 << NVDEV_ENGINE_MPEG) | + (1 << NVDEV_ENGINE_ME) | + (1 << NVDEV_ENGINE_VP) | + (1 << NVDEV_ENGINE_CRYPT) | + (1 << NVDEV_ENGINE_BSP) | + (1 << NVDEV_ENGINE_PPP) | + (1 << NVDEV_ENGINE_COPY0) | + (1 << NVDEV_ENGINE_UNK1C1), &chan); *pobject = nv_object(chan); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c index b4365dde1859..6f21be600557 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c @@ -103,9 +103,6 @@ nvc0_fifo_context_attach(struct nouveau_object *parent, case NVDEV_ENGINE_GR : addr = 0x0210; break; case NVDEV_ENGINE_COPY0: addr = 0x0230; break; case NVDEV_ENGINE_COPY1: addr = 0x0240; break; - case NVDEV_ENGINE_BSP : addr = 0x0270; break; - case NVDEV_ENGINE_VP : addr = 0x0250; break; - case NVDEV_ENGINE_PPP : addr = 0x0260; break; default: return -EINVAL; } @@ -140,13 +137,14 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend, case NVDEV_ENGINE_GR : addr = 0x0210; break; case NVDEV_ENGINE_COPY0: addr = 0x0230; break; case NVDEV_ENGINE_COPY1: addr = 0x0240; break; - case NVDEV_ENGINE_BSP : addr = 0x0270; break; - case NVDEV_ENGINE_VP : addr = 0x0250; break; - case NVDEV_ENGINE_PPP : addr = 0x0260; break; default: return -EINVAL; } + nv_wo32(base, addr + 0x00, 0x00000000); + nv_wo32(base, addr + 0x04, 0x00000000); + bar->flush(bar); + nv_wr32(priv, 0x002634, chan->base.chid); if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { nv_error(priv, "channel %d kick timeout\n", chan->base.chid); @@ -154,9 +152,6 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend, return -EBUSY; } - nv_wo32(base, addr + 0x00, 0x00000000); - nv_wo32(base, addr + 0x04, 0x00000000); - bar->flush(bar); return 0; } @@ -180,13 +175,10 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, priv->user.bar.offset, 0x1000, args->pushbuf, - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR) | - (1ULL << NVDEV_ENGINE_COPY0) | - (1ULL << NVDEV_ENGINE_COPY1) | - (1ULL << NVDEV_ENGINE_BSP) | - (1ULL << NVDEV_ENGINE_VP) | - (1ULL << NVDEV_ENGINE_PPP), &chan); + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR) | + (1 << NVDEV_ENGINE_COPY0) | + (1 << NVDEV_ENGINE_COPY1), &chan); *pobject = nv_object(chan); if (ret) return ret; @@ -502,7 +494,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev) u32 stat = nv_rd32(priv, 0x002100) & mask; if (stat & 0x00000100) { - nv_warn(priv, "unknown status 0x00000100\n"); + nv_info(priv, "unknown status 0x00000100\n"); nv_wr32(priv, 0x002100, 0x00000100); stat &= ~0x00000100; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index c930da99c2c1..36e81b6fafbc 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c @@ -38,12 +38,12 @@ #include #include -#define _(a,b) { (a), ((1ULL << (a)) | (b)) } +#define _(a,b) { (a), ((1 << (a)) | (b)) } static const struct { - u64 subdev; - u64 mask; + int subdev; + u32 mask; } fifo_engine[] = { - _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW)), + _(NVDEV_ENGINE_GR , (1 << NVDEV_ENGINE_SW)), _(NVDEV_ENGINE_VP , 0), _(NVDEV_ENGINE_PPP , 0), _(NVDEV_ENGINE_BSP , 0), @@ -138,9 +138,6 @@ nve0_fifo_context_attach(struct nouveau_object *parent, case NVDEV_ENGINE_GR : case NVDEV_ENGINE_COPY0: case NVDEV_ENGINE_COPY1: addr = 0x0210; break; - case NVDEV_ENGINE_BSP : addr = 0x0270; break; - case NVDEV_ENGINE_VP : addr = 0x0250; break; - case NVDEV_ENGINE_PPP : addr = 0x0260; break; default: return -EINVAL; } @@ -175,13 +172,14 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend, case NVDEV_ENGINE_GR : case NVDEV_ENGINE_COPY0: case NVDEV_ENGINE_COPY1: addr = 0x0210; break; - case NVDEV_ENGINE_BSP : addr = 0x0270; break; - case NVDEV_ENGINE_VP : addr = 0x0250; break; - case NVDEV_ENGINE_PPP : addr = 0x0260; break; default: return -EINVAL; } + nv_wo32(base, addr + 0x00, 0x00000000); + nv_wo32(base, addr + 0x04, 0x00000000); + bar->flush(bar); + nv_wr32(priv, 0x002634, chan->base.chid); if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { nv_error(priv, "channel %d kick timeout\n", chan->base.chid); @@ -189,9 +187,6 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend, return -EBUSY; } - nv_wo32(base, addr + 0x00, 0x00000000); - nv_wo32(base, addr + 0x04, 0x00000000); - bar->flush(bar); return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c index e45035efb8ca..7bbb1e1b7a8d 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c @@ -669,21 +669,27 @@ nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem) }); } -void +int nv40_grctx_init(struct nouveau_device *device, u32 *size) { - u32 ctxprog[256], i; + u32 *ctxprog = kmalloc(256 * 4, GFP_KERNEL), i; struct nouveau_grctx ctx = { .device = device, .mode = NOUVEAU_GRCTX_PROG, .data = ctxprog, - .ctxprog_max = ARRAY_SIZE(ctxprog) + .ctxprog_max = 256, }; + if (!ctxprog) + return -ENOMEM; + nv40_grctx_generate(&ctx); nv_wr32(device, 0x400324, 0); for (i = 0; i < ctx.ctxprog_len; i++) nv_wr32(device, 0x400328, ctxprog[i]); *size = ctx.ctxvals_pos * 4; + + kfree(ctxprog); + return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c index e30a9c5ff1fc..618528248457 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c @@ -787,168 +787,168 @@ nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd, static struct nouveau_omthds nv03_graph_gdi_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_patt }, - { 0x0188, 0x0188, nv04_graph_mthd_bind_rop }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_beta1 }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_patt }, + { 0x0188, nv04_graph_mthd_bind_rop }, + { 0x018c, nv04_graph_mthd_bind_beta1 }, + { 0x0190, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_gdi_omthds[] = { - { 0x0188, 0x0188, nv04_graph_mthd_bind_patt }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0188, nv04_graph_mthd_bind_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv01_graph_blit_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, - { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, - { 0x018c, 0x018c, nv01_graph_mthd_bind_patt }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_rop }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst }, - { 0x019c, 0x019c, nv04_graph_mthd_bind_surf_src }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, nv01_graph_mthd_bind_patt }, + { 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, nv04_graph_mthd_bind_surf_dst }, + { 0x019c, nv04_graph_mthd_bind_surf_src }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_blit_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, - { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_patt }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_rop }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 }, - { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, nv04_graph_mthd_bind_patt }, + { 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, nv04_graph_mthd_bind_beta4 }, + { 0x019c, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_iifc_omthds[] = { - { 0x0188, 0x0188, nv01_graph_mthd_bind_chroma }, - { 0x018c, 0x018c, nv01_graph_mthd_bind_clip }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_patt }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_rop }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_beta1 }, - { 0x019c, 0x019c, nv04_graph_mthd_bind_beta4 }, - { 0x01a0, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf }, - { 0x03e4, 0x03e4, nv04_graph_mthd_set_operation }, + { 0x0188, nv01_graph_mthd_bind_chroma }, + { 0x018c, nv01_graph_mthd_bind_clip }, + { 0x0190, nv04_graph_mthd_bind_patt }, + { 0x0194, nv04_graph_mthd_bind_rop }, + { 0x0198, nv04_graph_mthd_bind_beta1 }, + { 0x019c, nv04_graph_mthd_bind_beta4 }, + { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf }, + { 0x03e4, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv01_graph_ifc_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, - { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, - { 0x018c, 0x018c, nv01_graph_mthd_bind_patt }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_rop }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, nv01_graph_mthd_bind_patt }, + { 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_ifc_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, - { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_patt }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_rop }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 }, - { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, nv04_graph_mthd_bind_patt }, + { 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, nv04_graph_mthd_bind_beta4 }, + { 0x019c, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv03_graph_sifc_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, - { 0x0188, 0x0188, nv01_graph_mthd_bind_patt }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, nv01_graph_mthd_bind_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_sifc_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, - { 0x0188, 0x0188, nv04_graph_mthd_bind_patt }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, nv04_graph_mthd_bind_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv03_graph_sifm_omthds[] = { - { 0x0188, 0x0188, nv01_graph_mthd_bind_patt }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst }, - { 0x0304, 0x0304, nv04_graph_mthd_set_operation }, + { 0x0188, nv01_graph_mthd_bind_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_surf_dst }, + { 0x0304, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_sifm_omthds[] = { - { 0x0188, 0x0188, nv04_graph_mthd_bind_patt }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d }, - { 0x0304, 0x0304, nv04_graph_mthd_set_operation }, + { 0x0188, nv04_graph_mthd_bind_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x0304, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_surf3d_omthds[] = { - { 0x02f8, 0x02f8, nv04_graph_mthd_surf3d_clip_h }, - { 0x02fc, 0x02fc, nv04_graph_mthd_surf3d_clip_v }, + { 0x02f8, nv04_graph_mthd_surf3d_clip_h }, + { 0x02fc, nv04_graph_mthd_surf3d_clip_v }, {} }; static struct nouveau_omthds nv03_graph_ttri_omthds[] = { - { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_surf_color }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_zeta }, + { 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, nv04_graph_mthd_bind_surf_color }, + { 0x0190, nv04_graph_mthd_bind_surf_zeta }, {} }; static struct nouveau_omthds nv01_graph_prim_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_clip }, - { 0x0188, 0x0188, nv01_graph_mthd_bind_patt }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_clip }, + { 0x0188, nv01_graph_mthd_bind_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_prim_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_clip }, - { 0x0188, 0x0188, nv04_graph_mthd_bind_patt }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_clip }, + { 0x0188, nv04_graph_mthd_bind_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c index 5c0f843ea249..92521c89e77f 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c @@ -570,11 +570,11 @@ nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd, static struct nouveau_omthds nv17_celcius_omthds[] = { - { 0x1638, 0x1638, nv17_graph_mthd_lma_window }, - { 0x163c, 0x163c, nv17_graph_mthd_lma_window }, - { 0x1640, 0x1640, nv17_graph_mthd_lma_window }, - { 0x1644, 0x1644, nv17_graph_mthd_lma_window }, - { 0x1658, 0x1658, nv17_graph_mthd_lma_enable }, + { 0x1638, nv17_graph_mthd_lma_window }, + { 0x163c, nv17_graph_mthd_lma_window }, + { 0x1640, nv17_graph_mthd_lma_window }, + { 0x1644, nv17_graph_mthd_lma_window }, + { 0x1658, nv17_graph_mthd_lma_enable }, {} }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c index 5b20401bf911..8f3f619c4a78 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c @@ -183,7 +183,7 @@ nv20_graph_tile_prog(struct nouveau_engine *engine, int i) nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr); - if (nv_device(engine)->chipset != 0x34) { + if (nv_device(engine)->card_type == NV_20) { nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp); nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i); nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp); @@ -224,14 +224,14 @@ nv20_graph_intr(struct nouveau_subdev *subdev) nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); if (show) { - nv_error(priv, ""); + nv_info(priv, ""); nouveau_bitfield_print(nv10_graph_intr_name, show); printk(" nsource:"); nouveau_bitfield_print(nv04_graph_nsource, nsource); printk(" nstatus:"); nouveau_bitfield_print(nv10_graph_nstatus, nstatus); printk("\n"); - nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n", + nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n", chid, subc, class, mthd, data); } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c index 8fc1221408b2..cc6574eeb80e 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c @@ -156,8 +156,8 @@ nv40_graph_context_ctor(struct nouveau_object *parent, static int nv40_graph_context_fini(struct nouveau_object *object, bool suspend) { - struct nv04_graph_priv *priv = (void *)object->engine; - struct nv04_graph_chan *chan = (void *)object; + struct nv40_graph_priv *priv = (void *)object->engine; + struct nv40_graph_chan *chan = (void *)object; u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4; int ret = 0; @@ -216,10 +216,10 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i) switch (nv_device(priv)->chipset) { case 0x40: - case 0x41: + case 0x41: /* guess */ case 0x42: case 0x43: - case 0x45: + case 0x45: /* guess */ case 0x4e: nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch); nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit); @@ -227,21 +227,6 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i) nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch); nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit); nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr); - switch (nv_device(priv)->chipset) { - case 0x40: - case 0x45: - nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp); - nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp); - break; - case 0x41: - case 0x42: - case 0x43: - nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp); - nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp); - break; - default: - break; - } break; case 0x44: case 0x4a: @@ -250,31 +235,18 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i) nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr); break; case 0x46: - case 0x4c: case 0x47: case 0x49: case 0x4b: - case 0x63: + case 0x4c: case 0x67: - case 0x68: + default: nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch); nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit); nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr); nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch); nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit); nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr); - switch (nv_device(priv)->chipset) { - case 0x47: - case 0x49: - case 0x4b: - nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp); - nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp); - break; - default: - break; - } - break; - default: break; } @@ -321,7 +293,7 @@ nv40_graph_intr(struct nouveau_subdev *subdev) nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); if (show) { - nv_error(priv, ""); + nv_info(priv, ""); nouveau_bitfield_print(nv10_graph_intr_name, show); printk(" nsource:"); nouveau_bitfield_print(nv04_graph_nsource, nsource); @@ -374,7 +346,9 @@ nv40_graph_init(struct nouveau_object *object) return ret; /* generate and upload context program */ - nv40_grctx_init(nv_device(priv), &priv->size); + ret = nv40_grctx_init(nv_device(priv), &priv->size); + if (ret) + return ret; /* No context present currently */ nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h index d2ac975afc2e..7da35a4e7970 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h @@ -15,7 +15,7 @@ nv44_graph_class(void *priv) return !(0x0baf & (1 << (device->chipset & 0x0f))); } -void nv40_grctx_init(struct nouveau_device *, u32 *size); +int nv40_grctx_init(struct nouveau_device *, u32 *size); void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *); #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c index b1c3d835b4c2..ab3b9dcaf478 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c @@ -184,65 +184,6 @@ nv50_graph_tlb_flush(struct nouveau_engine *engine) return 0; } -static const struct nouveau_bitfield nv50_pgraph_status[] = { - { 0x00000001, "BUSY" }, /* set when any bit is set */ - { 0x00000002, "DISPATCH" }, - { 0x00000004, "UNK2" }, - { 0x00000008, "UNK3" }, - { 0x00000010, "UNK4" }, - { 0x00000020, "UNK5" }, - { 0x00000040, "M2MF" }, - { 0x00000080, "UNK7" }, - { 0x00000100, "CTXPROG" }, - { 0x00000200, "VFETCH" }, - { 0x00000400, "CCACHE_UNK4" }, - { 0x00000800, "STRMOUT_GSCHED_UNK5" }, - { 0x00001000, "UNK14XX" }, - { 0x00002000, "UNK24XX_CSCHED" }, - { 0x00004000, "UNK1CXX" }, - { 0x00008000, "CLIPID" }, - { 0x00010000, "ZCULL" }, - { 0x00020000, "ENG2D" }, - { 0x00040000, "UNK34XX" }, - { 0x00080000, "TPRAST" }, - { 0x00100000, "TPROP" }, - { 0x00200000, "TEX" }, - { 0x00400000, "TPVP" }, - { 0x00800000, "MP" }, - { 0x01000000, "ROP" }, - {} -}; - -static const char *const nv50_pgraph_vstatus_0[] = { - "VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL -}; - -static const char *const nv50_pgraph_vstatus_1[] = { - "TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL -}; - -static const char *const nv50_pgraph_vstatus_2[] = { - "UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX", - "ROP", NULL -}; - -static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r, - const char *const units[], u32 status) -{ - int i; - - nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status); - - for (i = 0; units[i] && status; i++) { - if ((status & 7) == 1) - pr_cont(" %s", units[i]); - status >>= 3; - } - if (status) - pr_cont(" (invalid: 0x%x)", status); - pr_cont("\n"); -} - static int nv84_graph_tlb_flush(struct nouveau_engine *engine) { @@ -278,19 +219,10 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine) !(timeout = ptimer->read(ptimer) - start > 2000000000)); if (timeout) { - nv_error(priv, "PGRAPH TLB flush idle timeout fail\n"); - - tmp = nv_rd32(priv, 0x400700); - nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp); - nouveau_bitfield_print(nv50_pgraph_status, tmp); - pr_cont("\n"); - - nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0, - nv_rd32(priv, 0x400380)); - nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1, - nv_rd32(priv, 0x400384)); - nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2, - nv_rd32(priv, 0x400388)); + nv_error(priv, "PGRAPH TLB flush idle timeout fail: " + "0x%08x 0x%08x 0x%08x 0x%08x\n", + nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380), + nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388)); } nv50_vm_flush_engine(&engine->base, 0x00); @@ -521,13 +453,13 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old, } if (ustatus) { if (display) - nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); + nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); } nv_wr32(priv, ustatus_addr, 0xc0000000); } if (!tps && display) - nv_warn(priv, "%s - No TPs claiming errors?\n", name); + nv_info(priv, "%s - No TPs claiming errors?\n", name); } static int @@ -786,12 +718,13 @@ nv50_graph_intr(struct nouveau_subdev *subdev) nv_wr32(priv, 0x400500, 0x00010001); if (show) { - nv_error(priv, ""); + nv_info(priv, ""); nouveau_bitfield_print(nv50_graph_intr_name, show); printk("\n"); nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x " "mthd 0x%04x data 0x%08x\n", chid, (u64)inst << 12, subc, class, mthd, data); + nv50_fb_trap(nouveau_fb(priv), 1); } if (nv_rd32(priv, 0x400824) & (1 << 31)) diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c index 47a02081d708..c62f2d0f5f0a 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c @@ -814,7 +814,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv) nv_wr32(priv, 0x41a100, 0x00000002); nv_wr32(priv, 0x409100, 0x00000002); if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001)) - nv_warn(priv, "0x409800 wait failed\n"); + nv_info(priv, "0x409800 wait failed\n"); nv_wr32(priv, 0x409840, 0xffffffff); nv_wr32(priv, 0x409500, 0x7fffffff); diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/regs.h index fde8e24415e4..9c715a25cecb 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/regs.h +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/regs.h @@ -205,7 +205,6 @@ #define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) #define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) #define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i)) -#define NV41_PGRAPH_ZCOMP0(i) (0x004009c0 + 4*(i)) #define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) #define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) #define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) @@ -217,7 +216,6 @@ #define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16)) #define NV04_PGRAPH_V_RAM 0x00400D40 #define NV04_PGRAPH_W_RAM 0x00400D80 -#define NV47_PGRAPH_ZCOMP0(i) (0x00400e00 + 4*(i)) #define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 #define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44 #define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48 @@ -263,12 +261,9 @@ #define NV04_PGRAPH_DMA_B_OFFSET 0x00401098 #define NV04_PGRAPH_DMA_B_SIZE 0x0040109C #define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0 -#define NV47_PGRAPH_ZCOMP1(i) (0x004068c0 + 4*(i)) #define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16)) #define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16)) #define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16)) #define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16)) -#define NV40_PGRAPH_ZCOMP1(i) (0x00406980 + 4*(i)) -#define NV41_PGRAPH_ZCOMP1(i) (0x004069c0 + 4*(i)) #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c index 9fd86375f4c4..1f394a2629e7 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c @@ -121,9 +121,9 @@ nv31_mpeg_ofuncs = { static struct nouveau_omthds nv31_mpeg_omthds[] = { - { 0x0190, 0x0190, nv31_mpeg_mthd_dma }, - { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma }, - { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma }, + { 0x0190, nv31_mpeg_mthd_dma }, + { 0x01a0, nv31_mpeg_mthd_dma }, + { 0x01b0, nv31_mpeg_mthd_dma }, {} }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c index 12418574efea..f7c581ad1991 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c @@ -38,7 +38,7 @@ struct nv40_mpeg_priv { }; struct nv40_mpeg_chan { - struct nouveau_mpeg base; + struct nouveau_mpeg_chan base; }; /******************************************************************************* diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c index bc7d12b30fc1..8678a9996d57 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c @@ -157,6 +157,7 @@ nv50_mpeg_intr(struct nouveau_subdev *subdev) nv_wr32(priv, 0x00b100, stat); nv_wr32(priv, 0x00b230, 0x00000001); + nv50_fb_trap(nouveau_fb(priv), 1); } static void diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c index 5a5b2a773ed7..50e7e0da1981 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c @@ -22,18 +22,18 @@ * Authors: Ben Skeggs */ -#include -#include +#include #include +#include #include struct nv98_ppp_priv { - struct nouveau_engine base; + struct nouveau_ppp base; }; struct nv98_ppp_chan { - struct nouveau_engctx base; + struct nouveau_ppp_chan base; }; /******************************************************************************* @@ -49,16 +49,61 @@ nv98_ppp_sclass[] = { * PPPP context ******************************************************************************/ +static int +nv98_ppp_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv98_ppp_chan *priv; + int ret; + + ret = nouveau_ppp_context_create(parent, engine, oclass, NULL, + 0, 0, 0, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +static void +nv98_ppp_context_dtor(struct nouveau_object *object) +{ + struct nv98_ppp_chan *priv = (void *)object; + nouveau_ppp_context_destroy(&priv->base); +} + +static int +nv98_ppp_context_init(struct nouveau_object *object) +{ + struct nv98_ppp_chan *priv = (void *)object; + int ret; + + ret = nouveau_ppp_context_init(&priv->base); + if (ret) + return ret; + + return 0; +} + +static int +nv98_ppp_context_fini(struct nouveau_object *object, bool suspend) +{ + struct nv98_ppp_chan *priv = (void *)object; + return nouveau_ppp_context_fini(&priv->base, suspend); +} + static struct nouveau_oclass nv98_ppp_cclass = { .handle = NV_ENGCTX(PPP, 0x98), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_engctx_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, + .ctor = nv98_ppp_context_ctor, + .dtor = nv98_ppp_context_dtor, + .init = nv98_ppp_context_init, + .fini = nv98_ppp_context_fini, + .rd32 = _nouveau_ppp_context_rd32, + .wr32 = _nouveau_ppp_context_wr32, }, }; @@ -66,6 +111,11 @@ nv98_ppp_cclass = { * PPPP engine/subdev functions ******************************************************************************/ +static void +nv98_ppp_intr(struct nouveau_subdev *subdev) +{ +} + static int nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -74,25 +124,52 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv98_ppp_priv *priv; int ret; - ret = nouveau_engine_create(parent, engine, oclass, true, - "PPPP", "ppp", &priv); + ret = nouveau_ppp_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x00400002; + nv_subdev(priv)->intr = nv98_ppp_intr; nv_engine(priv)->cclass = &nv98_ppp_cclass; nv_engine(priv)->sclass = nv98_ppp_sclass; return 0; } +static void +nv98_ppp_dtor(struct nouveau_object *object) +{ + struct nv98_ppp_priv *priv = (void *)object; + nouveau_ppp_destroy(&priv->base); +} + +static int +nv98_ppp_init(struct nouveau_object *object) +{ + struct nv98_ppp_priv *priv = (void *)object; + int ret; + + ret = nouveau_ppp_init(&priv->base); + if (ret) + return ret; + + return 0; +} + +static int +nv98_ppp_fini(struct nouveau_object *object, bool suspend) +{ + struct nv98_ppp_priv *priv = (void *)object; + return nouveau_ppp_fini(&priv->base, suspend); +} + struct nouveau_oclass nv98_ppp_oclass = { .handle = NV_ENGINE(PPP, 0x98), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv98_ppp_ctor, - .dtor = _nouveau_engine_dtor, - .init = _nouveau_engine_init, - .fini = _nouveau_engine_fini, + .dtor = nv98_ppp_dtor, + .init = nv98_ppp_init, + .fini = nv98_ppp_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c deleted file mode 100644 index ebf0d860e2dd..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2012 Maarten Lankhorst - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Maarten Lankhorst - */ - -#include - -#include - -struct nvc0_ppp_priv { - struct nouveau_falcon base; -}; - -/******************************************************************************* - * PPP object classes - ******************************************************************************/ - -static struct nouveau_oclass -nvc0_ppp_sclass[] = { - { 0x90b3, &nouveau_object_ofuncs }, - {}, -}; - -/******************************************************************************* - * PPPP context - ******************************************************************************/ - -static struct nouveau_oclass -nvc0_ppp_cclass = { - .handle = NV_ENGCTX(PPP, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, - }, -}; - -/******************************************************************************* - * PPPP engine/subdev functions - ******************************************************************************/ - -static int -nvc0_ppp_init(struct nouveau_object *object) -{ - struct nvc0_ppp_priv *priv = (void *)object; - int ret; - - ret = nouveau_falcon_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x086010, 0x0000fff2); - nv_wr32(priv, 0x08601c, 0x0000fff2); - return 0; -} - -static int -nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nvc0_ppp_priv *priv; - int ret; - - ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true, - "PPPP", "ppp", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_subdev(priv)->unit = 0x00000002; - nv_engine(priv)->cclass = &nvc0_ppp_cclass; - nv_engine(priv)->sclass = nvc0_ppp_sclass; - return 0; -} - -struct nouveau_oclass -nvc0_ppp_oclass = { - .handle = NV_ENGINE(PPP, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nvc0_ppp_ctor, - .dtor = _nouveau_falcon_dtor, - .init = nvc0_ppp_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv04.c index 2a859a31c30d..3ca4c3aa90b7 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv04.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv04.c @@ -63,8 +63,8 @@ nv04_software_flip(struct nouveau_object *object, u32 mthd, static struct nouveau_omthds nv04_software_omthds[] = { - { 0x0150, 0x0150, nv04_software_set_ref }, - { 0x0500, 0x0500, nv04_software_flip }, + { 0x0150, nv04_software_set_ref }, + { 0x0500, nv04_software_flip }, {} }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv10.c index a019364b1e13..6e699afbfdb7 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv10.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv10.c @@ -52,7 +52,7 @@ nv10_software_flip(struct nouveau_object *object, u32 mthd, static struct nouveau_omthds nv10_software_omthds[] = { - { 0x0500, 0x0500, nv10_software_flip }, + { 0x0500, nv10_software_flip }, {} }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv50.c index b0e7e1c01ce6..a2edcd38544a 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv50.c @@ -117,11 +117,11 @@ nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd, static struct nouveau_omthds nv50_software_omthds[] = { - { 0x018c, 0x018c, nv50_software_mthd_dma_vblsem }, - { 0x0400, 0x0400, nv50_software_mthd_vblsem_offset }, - { 0x0404, 0x0404, nv50_software_mthd_vblsem_value }, - { 0x0408, 0x0408, nv50_software_mthd_vblsem_release }, - { 0x0500, 0x0500, nv50_software_mthd_flip }, + { 0x018c, nv50_software_mthd_dma_vblsem }, + { 0x0400, nv50_software_mthd_vblsem_offset }, + { 0x0404, nv50_software_mthd_vblsem_value }, + { 0x0408, nv50_software_mthd_vblsem_release }, + { 0x0500, nv50_software_mthd_flip }, {} }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c index 282a1cd1bc2f..b7b0d7e330d6 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c @@ -99,11 +99,11 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd, static struct nouveau_omthds nvc0_software_omthds[] = { - { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset }, - { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset }, - { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value }, - { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release }, - { 0x0500, 0x0500, nvc0_software_mthd_flip }, + { 0x0400, nvc0_software_mthd_vblsem_offset }, + { 0x0404, nvc0_software_mthd_vblsem_offset }, + { 0x0408, nvc0_software_mthd_vblsem_value }, + { 0x040c, nvc0_software_mthd_vblsem_release }, + { 0x0500, nvc0_software_mthd_flip }, {} }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c index 261cd96e6951..dd23c80e5405 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c @@ -22,13 +22,18 @@ * Authors: Ben Skeggs */ -#include +#include #include +#include #include struct nv84_vp_priv { - struct nouveau_engine base; + struct nouveau_vp base; +}; + +struct nv84_vp_chan { + struct nouveau_vp_chan base; }; /******************************************************************************* @@ -44,16 +49,61 @@ nv84_vp_sclass[] = { * PVP context ******************************************************************************/ +static int +nv84_vp_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv84_vp_chan *priv; + int ret; + + ret = nouveau_vp_context_create(parent, engine, oclass, NULL, + 0, 0, 0, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +static void +nv84_vp_context_dtor(struct nouveau_object *object) +{ + struct nv84_vp_chan *priv = (void *)object; + nouveau_vp_context_destroy(&priv->base); +} + +static int +nv84_vp_context_init(struct nouveau_object *object) +{ + struct nv84_vp_chan *priv = (void *)object; + int ret; + + ret = nouveau_vp_context_init(&priv->base); + if (ret) + return ret; + + return 0; +} + +static int +nv84_vp_context_fini(struct nouveau_object *object, bool suspend) +{ + struct nv84_vp_chan *priv = (void *)object; + return nouveau_vp_context_fini(&priv->base, suspend); +} + static struct nouveau_oclass nv84_vp_cclass = { .handle = NV_ENGCTX(VP, 0x84), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_engctx_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, + .ctor = nv84_vp_context_ctor, + .dtor = nv84_vp_context_dtor, + .init = nv84_vp_context_init, + .fini = nv84_vp_context_fini, + .rd32 = _nouveau_vp_context_rd32, + .wr32 = _nouveau_vp_context_wr32, }, }; @@ -61,6 +111,11 @@ nv84_vp_cclass = { * PVP engine/subdev functions ******************************************************************************/ +static void +nv84_vp_intr(struct nouveau_subdev *subdev) +{ +} + static int nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -69,25 +124,52 @@ nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv84_vp_priv *priv; int ret; - ret = nouveau_engine_create(parent, engine, oclass, true, - "PVP", "vp", &priv); + ret = nouveau_vp_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x01020000; + nv_subdev(priv)->intr = nv84_vp_intr; nv_engine(priv)->cclass = &nv84_vp_cclass; nv_engine(priv)->sclass = nv84_vp_sclass; return 0; } +static void +nv84_vp_dtor(struct nouveau_object *object) +{ + struct nv84_vp_priv *priv = (void *)object; + nouveau_vp_destroy(&priv->base); +} + +static int +nv84_vp_init(struct nouveau_object *object) +{ + struct nv84_vp_priv *priv = (void *)object; + int ret; + + ret = nouveau_vp_init(&priv->base); + if (ret) + return ret; + + return 0; +} + +static int +nv84_vp_fini(struct nouveau_object *object, bool suspend) +{ + struct nv84_vp_priv *priv = (void *)object; + return nouveau_vp_fini(&priv->base, suspend); +} + struct nouveau_oclass nv84_vp_oclass = { .handle = NV_ENGINE(VP, 0x84), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv84_vp_ctor, - .dtor = _nouveau_engine_dtor, - .init = _nouveau_engine_init, - .fini = _nouveau_engine_fini, + .dtor = nv84_vp_dtor, + .init = nv84_vp_init, + .fini = nv84_vp_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c deleted file mode 100644 index f761949d7039..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2012 Maarten Lankhorst - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Maarten Lankhorst - */ - -#include - -#include - -struct nvc0_vp_priv { - struct nouveau_falcon base; -}; - -/******************************************************************************* - * VP object classes - ******************************************************************************/ - -static struct nouveau_oclass -nvc0_vp_sclass[] = { - { 0x90b2, &nouveau_object_ofuncs }, - {}, -}; - -/******************************************************************************* - * PVP context - ******************************************************************************/ - -static struct nouveau_oclass -nvc0_vp_cclass = { - .handle = NV_ENGCTX(VP, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, - }, -}; - -/******************************************************************************* - * PVP engine/subdev functions - ******************************************************************************/ - -static int -nvc0_vp_init(struct nouveau_object *object) -{ - struct nvc0_vp_priv *priv = (void *)object; - int ret; - - ret = nouveau_falcon_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x085010, 0x0000fff2); - nv_wr32(priv, 0x08501c, 0x0000fff2); - return 0; -} - -static int -nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nvc0_vp_priv *priv; - int ret; - - ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true, - "PVP", "vp", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_subdev(priv)->unit = 0x00020000; - nv_engine(priv)->cclass = &nvc0_vp_cclass; - nv_engine(priv)->sclass = nvc0_vp_sclass; - return 0; -} - -struct nouveau_oclass -nvc0_vp_oclass = { - .handle = NV_ENGINE(VP, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nvc0_vp_ctor, - .dtor = _nouveau_falcon_dtor, - .init = nvc0_vp_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c deleted file mode 100644 index 2384ce5dbe16..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include - -#include - -struct nve0_vp_priv { - struct nouveau_falcon base; -}; - -/******************************************************************************* - * VP object classes - ******************************************************************************/ - -static struct nouveau_oclass -nve0_vp_sclass[] = { - { 0x95b2, &nouveau_object_ofuncs }, - {}, -}; - -/******************************************************************************* - * PVP context - ******************************************************************************/ - -static struct nouveau_oclass -nve0_vp_cclass = { - .handle = NV_ENGCTX(VP, 0xe0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, - }, -}; - -/******************************************************************************* - * PVP engine/subdev functions - ******************************************************************************/ - -static int -nve0_vp_init(struct nouveau_object *object) -{ - struct nve0_vp_priv *priv = (void *)object; - int ret; - - ret = nouveau_falcon_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x085010, 0x0000fff2); - nv_wr32(priv, 0x08501c, 0x0000fff2); - return 0; -} - -static int -nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nve0_vp_priv *priv; - int ret; - - ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true, - "PVP", "vp", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_subdev(priv)->unit = 0x00020000; - nv_engine(priv)->cclass = &nve0_vp_cclass; - nv_engine(priv)->sclass = nve0_vp_sclass; - return 0; -} - -struct nouveau_oclass -nve0_vp_oclass = { - .handle = NV_ENGINE(VP, 0xe0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nve0_vp_ctor, - .dtor = _nouveau_falcon_dtor, - .init = nve0_vp_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/class.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/class.h index 47c4b3a5bd3a..6180ae9800fc 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/class.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/core/class.h @@ -23,7 +23,6 @@ #define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL #define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL #define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL -#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL struct nv_device_class { u64 device; /* device identifier, ~0 for client default */ @@ -53,49 +52,11 @@ struct nv_device_class { #define NV_DMA_ACCESS_WR 0x00000200 #define NV_DMA_ACCESS_RDWR 0x00000300 -/* NV50:NVC0 */ -#define NV50_DMA_CONF0_ENABLE 0x80000000 -#define NV50_DMA_CONF0_PRIV 0x00300000 -#define NV50_DMA_CONF0_PRIV_VM 0x00000000 -#define NV50_DMA_CONF0_PRIV_US 0x00100000 -#define NV50_DMA_CONF0_PRIV__S 0x00200000 -#define NV50_DMA_CONF0_PART 0x00030000 -#define NV50_DMA_CONF0_PART_VM 0x00000000 -#define NV50_DMA_CONF0_PART_256 0x00010000 -#define NV50_DMA_CONF0_PART_1KB 0x00020000 -#define NV50_DMA_CONF0_COMP 0x00000180 -#define NV50_DMA_CONF0_COMP_NONE 0x00000000 -#define NV50_DMA_CONF0_COMP_VM 0x00000180 -#define NV50_DMA_CONF0_TYPE 0x0000007f -#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000 -#define NV50_DMA_CONF0_TYPE_VM 0x0000007f - -/* NVC0:NVD9 */ -#define NVC0_DMA_CONF0_ENABLE 0x80000000 -#define NVC0_DMA_CONF0_PRIV 0x00300000 -#define NVC0_DMA_CONF0_PRIV_VM 0x00000000 -#define NVC0_DMA_CONF0_PRIV_US 0x00100000 -#define NVC0_DMA_CONF0_PRIV__S 0x00200000 -#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000 -#define NVC0_DMA_CONF0_TYPE 0x000000ff -#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000 -#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff - -/* NVD9- */ -#define NVD0_DMA_CONF0_ENABLE 0x80000000 -#define NVD0_DMA_CONF0_PAGE 0x00000400 -#define NVD0_DMA_CONF0_PAGE_LP 0x00000000 -#define NVD0_DMA_CONF0_PAGE_SP 0x00000400 -#define NVD0_DMA_CONF0_TYPE 0x000000ff -#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000 -#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff - struct nv_dma_class { u32 flags; u32 pad0; u64 start; u64 limit; - u32 conf0; }; /* DMA FIFO channel classes @@ -154,190 +115,4 @@ struct nve0_channel_ind_class { u32 engine; }; -/* 5070: NV50_DISP - * 8270: NV84_DISP - * 8370: NVA0_DISP - * 8870: NV94_DISP - * 8570: NVA3_DISP - * 9070: NVD0_DISP - * 9170: NVE0_DISP - */ - -#define NV50_DISP_CLASS 0x00005070 -#define NV84_DISP_CLASS 0x00008270 -#define NVA0_DISP_CLASS 0x00008370 -#define NV94_DISP_CLASS 0x00008870 -#define NVA3_DISP_CLASS 0x00008570 -#define NVD0_DISP_CLASS 0x00009070 -#define NVE0_DISP_CLASS 0x00009170 - -#define NV50_DISP_SOR_MTHD 0x00010000 -#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000 -#define NV50_DISP_SOR_MTHD_HEAD 0x00000018 -#define NV50_DISP_SOR_MTHD_LINK 0x00000004 -#define NV50_DISP_SOR_MTHD_OR 0x00000003 - -#define NV50_DISP_SOR_PWR 0x00010000 -#define NV50_DISP_SOR_PWR_STATE 0x00000001 -#define NV50_DISP_SOR_PWR_STATE_ON 0x00000001 -#define NV50_DISP_SOR_PWR_STATE_OFF 0x00000000 -#define NVA3_DISP_SOR_HDA_ELD 0x00010100 -#define NV84_DISP_SOR_HDMI_PWR 0x00012000 -#define NV84_DISP_SOR_HDMI_PWR_STATE 0x40000000 -#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF 0x00000000 -#define NV84_DISP_SOR_HDMI_PWR_STATE_ON 0x40000000 -#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET 0x001f0000 -#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f -#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000 -#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff -#define NV94_DISP_SOR_DP_TRAIN 0x00016000 -#define NV94_DISP_SOR_DP_TRAIN_OP 0xf0000000 -#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN 0x00000000 -#define NV94_DISP_SOR_DP_TRAIN_OP_INIT 0x10000000 -#define NV94_DISP_SOR_DP_TRAIN_OP_FINI 0x20000000 -#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD 0x00000001 -#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF 0x00000000 -#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON 0x00000001 -#define NV94_DISP_SOR_DP_TRAIN_PATTERN 0x00000003 -#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED 0x00000000 -#define NV94_DISP_SOR_DP_LNKCTL 0x00016040 -#define NV94_DISP_SOR_DP_LNKCTL_FRAME 0x80000000 -#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD 0x00000000 -#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH 0x80000000 -#define NV94_DISP_SOR_DP_LNKCTL_WIDTH 0x00001f00 -#define NV94_DISP_SOR_DP_LNKCTL_COUNT 0x00000007 -#define NV94_DISP_SOR_DP_DRVCTL(l) ((l) * 0x40 + 0x00016100) -#define NV94_DISP_SOR_DP_DRVCTL_VS 0x00000300 -#define NV94_DISP_SOR_DP_DRVCTL_PE 0x00000003 - -#define NV50_DISP_DAC_MTHD 0x00020000 -#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000 -#define NV50_DISP_DAC_MTHD_OR 0x00000003 - -#define NV50_DISP_DAC_PWR 0x00020000 -#define NV50_DISP_DAC_PWR_HSYNC 0x00000001 -#define NV50_DISP_DAC_PWR_HSYNC_ON 0x00000000 -#define NV50_DISP_DAC_PWR_HSYNC_LO 0x00000001 -#define NV50_DISP_DAC_PWR_VSYNC 0x00000004 -#define NV50_DISP_DAC_PWR_VSYNC_ON 0x00000000 -#define NV50_DISP_DAC_PWR_VSYNC_LO 0x00000004 -#define NV50_DISP_DAC_PWR_DATA 0x00000010 -#define NV50_DISP_DAC_PWR_DATA_ON 0x00000000 -#define NV50_DISP_DAC_PWR_DATA_LO 0x00000010 -#define NV50_DISP_DAC_PWR_STATE 0x00000040 -#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000 -#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040 -#define NV50_DISP_DAC_LOAD 0x0002000c -#define NV50_DISP_DAC_LOAD_VALUE 0x00000007 - -struct nv50_display_class { -}; - -/* 507a: NV50_DISP_CURS - * 827a: NV84_DISP_CURS - * 837a: NVA0_DISP_CURS - * 887a: NV94_DISP_CURS - * 857a: NVA3_DISP_CURS - * 907a: NVD0_DISP_CURS - * 917a: NVE0_DISP_CURS - */ - -#define NV50_DISP_CURS_CLASS 0x0000507a -#define NV84_DISP_CURS_CLASS 0x0000827a -#define NVA0_DISP_CURS_CLASS 0x0000837a -#define NV94_DISP_CURS_CLASS 0x0000887a -#define NVA3_DISP_CURS_CLASS 0x0000857a -#define NVD0_DISP_CURS_CLASS 0x0000907a -#define NVE0_DISP_CURS_CLASS 0x0000917a - -struct nv50_display_curs_class { - u32 head; -}; - -/* 507b: NV50_DISP_OIMM - * 827b: NV84_DISP_OIMM - * 837b: NVA0_DISP_OIMM - * 887b: NV94_DISP_OIMM - * 857b: NVA3_DISP_OIMM - * 907b: NVD0_DISP_OIMM - * 917b: NVE0_DISP_OIMM - */ - -#define NV50_DISP_OIMM_CLASS 0x0000507b -#define NV84_DISP_OIMM_CLASS 0x0000827b -#define NVA0_DISP_OIMM_CLASS 0x0000837b -#define NV94_DISP_OIMM_CLASS 0x0000887b -#define NVA3_DISP_OIMM_CLASS 0x0000857b -#define NVD0_DISP_OIMM_CLASS 0x0000907b -#define NVE0_DISP_OIMM_CLASS 0x0000917b - -struct nv50_display_oimm_class { - u32 head; -}; - -/* 507c: NV50_DISP_SYNC - * 827c: NV84_DISP_SYNC - * 837c: NVA0_DISP_SYNC - * 887c: NV94_DISP_SYNC - * 857c: NVA3_DISP_SYNC - * 907c: NVD0_DISP_SYNC - * 917c: NVE0_DISP_SYNC - */ - -#define NV50_DISP_SYNC_CLASS 0x0000507c -#define NV84_DISP_SYNC_CLASS 0x0000827c -#define NVA0_DISP_SYNC_CLASS 0x0000837c -#define NV94_DISP_SYNC_CLASS 0x0000887c -#define NVA3_DISP_SYNC_CLASS 0x0000857c -#define NVD0_DISP_SYNC_CLASS 0x0000907c -#define NVE0_DISP_SYNC_CLASS 0x0000917c - -struct nv50_display_sync_class { - u32 pushbuf; - u32 head; -}; - -/* 507d: NV50_DISP_MAST - * 827d: NV84_DISP_MAST - * 837d: NVA0_DISP_MAST - * 887d: NV94_DISP_MAST - * 857d: NVA3_DISP_MAST - * 907d: NVD0_DISP_MAST - * 917d: NVE0_DISP_MAST - */ - -#define NV50_DISP_MAST_CLASS 0x0000507d -#define NV84_DISP_MAST_CLASS 0x0000827d -#define NVA0_DISP_MAST_CLASS 0x0000837d -#define NV94_DISP_MAST_CLASS 0x0000887d -#define NVA3_DISP_MAST_CLASS 0x0000857d -#define NVD0_DISP_MAST_CLASS 0x0000907d -#define NVE0_DISP_MAST_CLASS 0x0000917d - -struct nv50_display_mast_class { - u32 pushbuf; -}; - -/* 507e: NV50_DISP_OVLY - * 827e: NV84_DISP_OVLY - * 837e: NVA0_DISP_OVLY - * 887e: NV94_DISP_OVLY - * 857e: NVA3_DISP_OVLY - * 907e: NVD0_DISP_OVLY - * 917e: NVE0_DISP_OVLY - */ - -#define NV50_DISP_OVLY_CLASS 0x0000507e -#define NV84_DISP_OVLY_CLASS 0x0000827e -#define NVA0_DISP_OVLY_CLASS 0x0000837e -#define NV94_DISP_OVLY_CLASS 0x0000887e -#define NVA3_DISP_OVLY_CLASS 0x0000857e -#define NVD0_DISP_OVLY_CLASS 0x0000907e -#define NVE0_DISP_OVLY_CLASS 0x0000917e - -struct nv50_display_ovly_class { - u32 pushbuf; - u32 head; -}; - #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/engctx.h index 2fd48b564c7d..8a947b6872eb 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/engctx.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/core/engctx.h @@ -39,9 +39,6 @@ void nouveau_engctx_destroy(struct nouveau_engctx *); int nouveau_engctx_init(struct nouveau_engctx *); int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend); -int _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, void *, u32, - struct nouveau_object **); void _nouveau_engctx_dtor(struct nouveau_object *); int _nouveau_engctx_init(struct nouveau_object *); int _nouveau_engctx_fini(struct nouveau_object *, bool suspend); diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/falcon.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/falcon.h deleted file mode 100644 index 1edec386ab36..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/falcon.h +++ /dev/null @@ -1,81 +0,0 @@ -#ifndef __NOUVEAU_FALCON_H__ -#define __NOUVEAU_FALCON_H__ - -#include -#include -#include - -struct nouveau_falcon_chan { - struct nouveau_engctx base; -}; - -#define nouveau_falcon_context_create(p,e,c,g,s,a,f,d) \ - nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) -#define nouveau_falcon_context_destroy(d) \ - nouveau_engctx_destroy(&(d)->base) -#define nouveau_falcon_context_init(d) \ - nouveau_engctx_init(&(d)->base) -#define nouveau_falcon_context_fini(d,s) \ - nouveau_engctx_fini(&(d)->base, (s)) - -#define _nouveau_falcon_context_ctor _nouveau_engctx_ctor -#define _nouveau_falcon_context_dtor _nouveau_engctx_dtor -#define _nouveau_falcon_context_init _nouveau_engctx_init -#define _nouveau_falcon_context_fini _nouveau_engctx_fini -#define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32 -#define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32 - -struct nouveau_falcon_data { - bool external; -}; - -struct nouveau_falcon { - struct nouveau_engine base; - - u32 addr; - u8 version; - u8 secret; - - struct nouveau_gpuobj *core; - bool external; - - struct { - u32 limit; - u32 *data; - u32 size; - } code; - - struct { - u32 limit; - u32 *data; - u32 size; - } data; -}; - -#define nv_falcon(priv) (&(priv)->base) - -#define nouveau_falcon_create(p,e,c,b,d,i,f,r) \ - nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f), \ - sizeof(**r),(void **)r) -#define nouveau_falcon_destroy(p) \ - nouveau_engine_destroy(&(p)->base) -#define nouveau_falcon_init(p) ({ \ - struct nouveau_falcon *falcon = (p); \ - _nouveau_falcon_init(nv_object(falcon)); \ -}) -#define nouveau_falcon_fini(p,s) ({ \ - struct nouveau_falcon *falcon = (p); \ - _nouveau_falcon_fini(nv_object(falcon), (s)); \ -}) - -int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, u32, bool, const char *, - const char *, int, void **); - -#define _nouveau_falcon_dtor _nouveau_engine_dtor -int _nouveau_falcon_init(struct nouveau_object *); -int _nouveau_falcon_fini(struct nouveau_object *, bool); -u32 _nouveau_falcon_rd32(struct nouveau_object *, u64); -void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32); - -#endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h index b3b9ce4e9d38..6eaff79377ae 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h @@ -65,7 +65,7 @@ nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref) void _nouveau_gpuobj_dtor(struct nouveau_object *); int _nouveau_gpuobj_init(struct nouveau_object *); int _nouveau_gpuobj_fini(struct nouveau_object *, bool); -u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u64); -void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32); +u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u32); +void _nouveau_gpuobj_wr32(struct nouveau_object *, u32, u32); #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/mm.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/mm.h index 2514e81ade02..975137ba34a6 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/mm.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/core/mm.h @@ -21,12 +21,6 @@ struct nouveau_mm { int heap_nodes; }; -static inline bool -nouveau_mm_initialised(struct nouveau_mm *mm) -{ - return mm->block_size != 0; -} - int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block); int nouveau_mm_fini(struct nouveau_mm *); int nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min, diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/object.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/object.h index 27d17a9852e5..486f1a9217fd 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/object.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/core/object.h @@ -70,8 +70,7 @@ nv_pclass(struct nouveau_object *parent, u32 oclass) } struct nouveau_omthds { - u32 start; - u32 limit; + u32 method; int (*call)(struct nouveau_object *, u32, void *, u32); }; @@ -82,12 +81,12 @@ struct nouveau_ofuncs { void (*dtor)(struct nouveau_object *); int (*init)(struct nouveau_object *); int (*fini)(struct nouveau_object *, bool suspend); - u8 (*rd08)(struct nouveau_object *, u64 offset); - u16 (*rd16)(struct nouveau_object *, u64 offset); - u32 (*rd32)(struct nouveau_object *, u64 offset); - void (*wr08)(struct nouveau_object *, u64 offset, u8 data); - void (*wr16)(struct nouveau_object *, u64 offset, u16 data); - void (*wr32)(struct nouveau_object *, u64 offset, u32 data); + u8 (*rd08)(struct nouveau_object *, u32 offset); + u16 (*rd16)(struct nouveau_object *, u32 offset); + u32 (*rd32)(struct nouveau_object *, u32 offset); + void (*wr08)(struct nouveau_object *, u32 offset, u8 data); + void (*wr16)(struct nouveau_object *, u32 offset, u16 data); + void (*wr32)(struct nouveau_object *, u32 offset, u32 data); }; static inline struct nouveau_ofuncs * @@ -110,27 +109,21 @@ int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle); void nouveau_object_debug(void); static inline int -nv_exec(void *obj, u32 mthd, void *data, u32 size) +nv_call(void *obj, u32 mthd, u32 data) { struct nouveau_omthds *method = nv_oclass(obj)->omthds; while (method && method->call) { - if (mthd >= method->start && mthd <= method->limit) - return method->call(obj, mthd, data, size); + if (method->method == mthd) + return method->call(obj, mthd, &data, sizeof(data)); method++; } return -EINVAL; } -static inline int -nv_call(void *obj, u32 mthd, u32 data) -{ - return nv_exec(obj, mthd, &data, sizeof(data)); -} - static inline u8 -nv_ro08(void *obj, u64 addr) +nv_ro08(void *obj, u32 addr) { u8 data = nv_ofuncs(obj)->rd08(obj, addr); nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data); @@ -138,7 +131,7 @@ nv_ro08(void *obj, u64 addr) } static inline u16 -nv_ro16(void *obj, u64 addr) +nv_ro16(void *obj, u32 addr) { u16 data = nv_ofuncs(obj)->rd16(obj, addr); nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data); @@ -146,7 +139,7 @@ nv_ro16(void *obj, u64 addr) } static inline u32 -nv_ro32(void *obj, u64 addr) +nv_ro32(void *obj, u32 addr) { u32 data = nv_ofuncs(obj)->rd32(obj, addr); nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data); @@ -154,42 +147,46 @@ nv_ro32(void *obj, u64 addr) } static inline void -nv_wo08(void *obj, u64 addr, u8 data) +nv_wo08(void *obj, u32 addr, u8 data) { nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data); nv_ofuncs(obj)->wr08(obj, addr, data); } static inline void -nv_wo16(void *obj, u64 addr, u16 data) +nv_wo16(void *obj, u32 addr, u16 data) { nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data); nv_ofuncs(obj)->wr16(obj, addr, data); } static inline void -nv_wo32(void *obj, u64 addr, u32 data) +nv_wo32(void *obj, u32 addr, u32 data) { nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data); nv_ofuncs(obj)->wr32(obj, addr, data); } static inline u32 -nv_mo32(void *obj, u64 addr, u32 mask, u32 data) +nv_mo32(void *obj, u32 addr, u32 mask, u32 data) { u32 temp = nv_ro32(obj, addr); nv_wo32(obj, addr, (temp & ~mask) | data); return temp; } -static inline bool -nv_strncmp(void *obj, u32 addr, u32 len, const char *str) +static inline int +nv_memcmp(void *obj, u32 addr, const char *str, u32 len) { + unsigned char c1, c2; + while (len--) { - if (nv_ro08(obj, addr++) != *(str++)) - return false; + c1 = nv_ro08(obj, addr++); + c2 = *(str++); + if (c1 != c2) + return c1 - c2; } - return true; + return 0; } #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/parent.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/parent.h index 31cd852c96df..3c2e940eb0f8 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/parent.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/core/parent.h @@ -14,7 +14,7 @@ struct nouveau_parent { struct nouveau_object base; struct nouveau_sclass *sclass; - u64 engine; + u32 engine; int (*context_attach)(struct nouveau_object *, struct nouveau_object *); diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/bsp.h index 13ccdf54dfad..75d1ed5f85fd 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/bsp.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/bsp.h @@ -1,8 +1,45 @@ #ifndef __NOUVEAU_BSP_H__ #define __NOUVEAU_BSP_H__ +#include +#include + +struct nouveau_bsp_chan { + struct nouveau_engctx base; +}; + +#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d) \ + nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) +#define nouveau_bsp_context_destroy(d) \ + nouveau_engctx_destroy(&(d)->base) +#define nouveau_bsp_context_init(d) \ + nouveau_engctx_init(&(d)->base) +#define nouveau_bsp_context_fini(d,s) \ + nouveau_engctx_fini(&(d)->base, (s)) + +#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor +#define _nouveau_bsp_context_init _nouveau_engctx_init +#define _nouveau_bsp_context_fini _nouveau_engctx_fini +#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32 +#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32 + +struct nouveau_bsp { + struct nouveau_engine base; +}; + +#define nouveau_bsp_create(p,e,c,d) \ + nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d)) +#define nouveau_bsp_destroy(d) \ + nouveau_engine_destroy(&(d)->base) +#define nouveau_bsp_init(d) \ + nouveau_engine_init(&(d)->base) +#define nouveau_bsp_fini(d,s) \ + nouveau_engine_fini(&(d)->base, (s)) + +#define _nouveau_bsp_dtor _nouveau_engine_dtor +#define _nouveau_bsp_init _nouveau_engine_init +#define _nouveau_bsp_fini _nouveau_engine_fini + extern struct nouveau_oclass nv84_bsp_oclass; -extern struct nouveau_oclass nvc0_bsp_oclass; -extern struct nouveau_oclass nve0_bsp_oclass; #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/copy.h index 8cad2cf28cef..70b9d8c5fcf5 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/copy.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/copy.h @@ -1,7 +1,44 @@ #ifndef __NOUVEAU_COPY_H__ #define __NOUVEAU_COPY_H__ -void nva3_copy_intr(struct nouveau_subdev *); +#include +#include + +struct nouveau_copy_chan { + struct nouveau_engctx base; +}; + +#define nouveau_copy_context_create(p,e,c,g,s,a,f,d) \ + nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) +#define nouveau_copy_context_destroy(d) \ + nouveau_engctx_destroy(&(d)->base) +#define nouveau_copy_context_init(d) \ + nouveau_engctx_init(&(d)->base) +#define nouveau_copy_context_fini(d,s) \ + nouveau_engctx_fini(&(d)->base, (s)) + +#define _nouveau_copy_context_dtor _nouveau_engctx_dtor +#define _nouveau_copy_context_init _nouveau_engctx_init +#define _nouveau_copy_context_fini _nouveau_engctx_fini +#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32 +#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32 + +struct nouveau_copy { + struct nouveau_engine base; +}; + +#define nouveau_copy_create(p,e,c,y,i,d) \ + nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d)) +#define nouveau_copy_destroy(d) \ + nouveau_engine_destroy(&(d)->base) +#define nouveau_copy_init(d) \ + nouveau_engine_init(&(d)->base) +#define nouveau_copy_fini(d,s) \ + nouveau_engine_fini(&(d)->base, (s)) + +#define _nouveau_copy_dtor _nouveau_engine_dtor +#define _nouveau_copy_init _nouveau_engine_init +#define _nouveau_copy_fini _nouveau_engine_fini extern struct nouveau_oclass nva3_copy_oclass; extern struct nouveau_oclass nvc0_copy0_oclass; diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/crypt.h index db975618e937..e3674743baaa 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/crypt.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/crypt.h @@ -1,6 +1,45 @@ #ifndef __NOUVEAU_CRYPT_H__ #define __NOUVEAU_CRYPT_H__ +#include +#include + +struct nouveau_crypt_chan { + struct nouveau_engctx base; +}; + +#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d) \ + nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) +#define nouveau_crypt_context_destroy(d) \ + nouveau_engctx_destroy(&(d)->base) +#define nouveau_crypt_context_init(d) \ + nouveau_engctx_init(&(d)->base) +#define nouveau_crypt_context_fini(d,s) \ + nouveau_engctx_fini(&(d)->base, (s)) + +#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor +#define _nouveau_crypt_context_init _nouveau_engctx_init +#define _nouveau_crypt_context_fini _nouveau_engctx_fini +#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32 +#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32 + +struct nouveau_crypt { + struct nouveau_engine base; +}; + +#define nouveau_crypt_create(p,e,c,d) \ + nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d)) +#define nouveau_crypt_destroy(d) \ + nouveau_engine_destroy(&(d)->base) +#define nouveau_crypt_init(d) \ + nouveau_engine_init(&(d)->base) +#define nouveau_crypt_fini(d,s) \ + nouveau_engine_fini(&(d)->base, (s)) + +#define _nouveau_crypt_dtor _nouveau_engine_dtor +#define _nouveau_crypt_init _nouveau_engine_init +#define _nouveau_crypt_fini _nouveau_engine_fini + extern struct nouveau_oclass nv84_crypt_oclass; extern struct nouveau_oclass nv98_crypt_oclass; diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/disp.h index 46948285f3e7..38ec1252cbaa 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/disp.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/disp.h @@ -39,11 +39,6 @@ nouveau_disp(void *obj) extern struct nouveau_oclass nv04_disp_oclass; extern struct nouveau_oclass nv50_disp_oclass; -extern struct nouveau_oclass nv84_disp_oclass; -extern struct nouveau_oclass nva0_disp_oclass; -extern struct nouveau_oclass nv94_disp_oclass; -extern struct nouveau_oclass nva3_disp_oclass; extern struct nouveau_oclass nvd0_disp_oclass; -extern struct nouveau_oclass nve0_disp_oclass; #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h index b28914ed1752..700ccbb1941f 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h @@ -12,17 +12,29 @@ struct nouveau_dmaobj { u32 access; u64 start; u64 limit; - u32 conf0; }; +#define nouveau_dmaobj_create(p,e,c,a,s,d) \ + nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d) +#define nouveau_dmaobj_destroy(p) \ + nouveau_object_destroy(&(p)->base) +#define nouveau_dmaobj_init(p) \ + nouveau_object_init(&(p)->base) +#define nouveau_dmaobj_fini(p,s) \ + nouveau_object_fini(&(p)->base, (s)) + +int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *data, u32 size, + int length, void **); + +#define _nouveau_dmaobj_dtor nouveau_object_destroy +#define _nouveau_dmaobj_init nouveau_object_init +#define _nouveau_dmaobj_fini nouveau_object_fini + struct nouveau_dmaeng { struct nouveau_engine base; - - /* creates a "physical" dma object from a struct nouveau_dmaobj */ - int (*bind)(struct nouveau_dmaeng *dmaeng, - struct nouveau_object *parent, - struct nouveau_dmaobj *dmaobj, - struct nouveau_gpuobj **); + int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent, + struct nouveau_dmaobj *, struct nouveau_gpuobj **); }; #define nouveau_dmaeng_create(p,e,c,d) \ @@ -41,8 +53,5 @@ struct nouveau_dmaeng { extern struct nouveau_oclass nv04_dmaeng_oclass; extern struct nouveau_oclass nv50_dmaeng_oclass; extern struct nouveau_oclass nvc0_dmaeng_oclass; -extern struct nouveau_oclass nvd0_dmaeng_oclass; - -extern struct nouveau_oclass nouveau_dmaobj_sclass[]; #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/fifo.h index f18846c8c6fe..d67fed1e3970 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/fifo.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/fifo.h @@ -33,15 +33,15 @@ int nouveau_fifo_channel_create_(struct nouveau_object *, struct nouveau_object *, struct nouveau_oclass *, int bar, u32 addr, u32 size, u32 push, - u64 engmask, int len, void **); + u32 engmask, int len, void **); void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *); #define _nouveau_fifo_channel_init _nouveau_namedb_init #define _nouveau_fifo_channel_fini _nouveau_namedb_fini void _nouveau_fifo_channel_dtor(struct nouveau_object *); -u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64); -void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32); +u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u32); +void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32); struct nouveau_fifo_base { struct nouveau_gpuobj base; diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/ppp.h index 0a66781e8cf1..74d554fb3281 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/ppp.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/ppp.h @@ -1,7 +1,45 @@ #ifndef __NOUVEAU_PPP_H__ #define __NOUVEAU_PPP_H__ +#include +#include + +struct nouveau_ppp_chan { + struct nouveau_engctx base; +}; + +#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d) \ + nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) +#define nouveau_ppp_context_destroy(d) \ + nouveau_engctx_destroy(&(d)->base) +#define nouveau_ppp_context_init(d) \ + nouveau_engctx_init(&(d)->base) +#define nouveau_ppp_context_fini(d,s) \ + nouveau_engctx_fini(&(d)->base, (s)) + +#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor +#define _nouveau_ppp_context_init _nouveau_engctx_init +#define _nouveau_ppp_context_fini _nouveau_engctx_fini +#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32 +#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32 + +struct nouveau_ppp { + struct nouveau_engine base; +}; + +#define nouveau_ppp_create(p,e,c,d) \ + nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d)) +#define nouveau_ppp_destroy(d) \ + nouveau_engine_destroy(&(d)->base) +#define nouveau_ppp_init(d) \ + nouveau_engine_init(&(d)->base) +#define nouveau_ppp_fini(d,s) \ + nouveau_engine_fini(&(d)->base, (s)) + +#define _nouveau_ppp_dtor _nouveau_engine_dtor +#define _nouveau_ppp_init _nouveau_engine_init +#define _nouveau_ppp_fini _nouveau_engine_fini + extern struct nouveau_oclass nv98_ppp_oclass; -extern struct nouveau_oclass nvc0_ppp_oclass; #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/vp.h index d7b287b115bf..05cd08fba377 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/vp.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/vp.h @@ -1,8 +1,45 @@ #ifndef __NOUVEAU_VP_H__ #define __NOUVEAU_VP_H__ +#include +#include + +struct nouveau_vp_chan { + struct nouveau_engctx base; +}; + +#define nouveau_vp_context_create(p,e,c,g,s,a,f,d) \ + nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) +#define nouveau_vp_context_destroy(d) \ + nouveau_engctx_destroy(&(d)->base) +#define nouveau_vp_context_init(d) \ + nouveau_engctx_init(&(d)->base) +#define nouveau_vp_context_fini(d,s) \ + nouveau_engctx_fini(&(d)->base, (s)) + +#define _nouveau_vp_context_dtor _nouveau_engctx_dtor +#define _nouveau_vp_context_init _nouveau_engctx_init +#define _nouveau_vp_context_fini _nouveau_engctx_fini +#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32 +#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32 + +struct nouveau_vp { + struct nouveau_engine base; +}; + +#define nouveau_vp_create(p,e,c,d) \ + nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d)) +#define nouveau_vp_destroy(d) \ + nouveau_engine_destroy(&(d)->base) +#define nouveau_vp_init(d) \ + nouveau_engine_init(&(d)->base) +#define nouveau_vp_fini(d,s) \ + nouveau_engine_fini(&(d)->base, (s)) + +#define _nouveau_vp_dtor _nouveau_engine_dtor +#define _nouveau_vp_init _nouveau_engine_init +#define _nouveau_vp_fini _nouveau_engine_fini + extern struct nouveau_oclass nv84_vp_oclass; -extern struct nouveau_oclass nvc0_vp_oclass; -extern struct nouveau_oclass nve0_vp_oclass; #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h index b79025da581e..d682fb625833 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h @@ -23,7 +23,6 @@ struct dcb_output { uint8_t bus; uint8_t location; uint8_t or; - uint8_t link; bool duallink_possible; union { struct sor_conf { @@ -56,11 +55,36 @@ struct dcb_output { u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len); u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len); -u16 dcb_outp_parse(struct nouveau_bios *, u8 idx, u8 *, u8 *, - struct dcb_output *); -u16 dcb_outp_match(struct nouveau_bios *, u16 type, u16 mask, u8 *, u8 *, - struct dcb_output *); int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec) (struct nouveau_bios *, void *, int index, u16 entry)); + +/* BIT 'U'/'d' table encoder subtables have hashes matching them to + * a particular set of encoders. + * + * This function returns true if a particular DCB entry matches. + */ +static inline bool +dcb_hash_match(struct dcb_output *dcb, u32 hash) +{ + if ((hash & 0x000000f0) != (dcb->location << 4)) + return false; + if ((hash & 0x0000000f) != dcb->type) + return false; + if (!(hash & (dcb->or << 16))) + return false; + + switch (dcb->type) { + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_LVDS: + case DCB_OUTPUT_DP: + if (hash & 0x00c00000) { + if (!(hash & (dcb->sorconf.link << 22))) + return false; + } + default: + return true; + } +} + #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h deleted file mode 100644 index c35937e2f6a4..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef __NVBIOS_DISP_H__ -#define __NVBIOS_DISP_H__ - -u16 nvbios_disp_table(struct nouveau_bios *, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub); - -struct nvbios_disp { - u16 data; -}; - -u16 nvbios_disp_entry(struct nouveau_bios *, u8 idx, - u8 *ver, u8 *hdr__, u8 *sub); -u16 nvbios_disp_parse(struct nouveau_bios *, u8 idx, - u8 *ver, u8 *hdr__, u8 *sub, - struct nvbios_disp *); - -struct nvbios_outp { - u16 type; - u16 mask; - u16 script[3]; -}; - -u16 nvbios_outp_entry(struct nouveau_bios *, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len); -u16 nvbios_outp_parse(struct nouveau_bios *, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_outp *); -u16 nvbios_outp_match(struct nouveau_bios *, u16 type, u16 mask, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_outp *); - - -struct nvbios_ocfg { - u16 match; - u16 clkcmp[2]; -}; - -u16 nvbios_ocfg_entry(struct nouveau_bios *, u16 outp, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len); -u16 nvbios_ocfg_parse(struct nouveau_bios *, u16 outp, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_ocfg *); -u16 nvbios_ocfg_match(struct nouveau_bios *, u16 outp, u16 type, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_ocfg *); -u16 nvbios_oclk_match(struct nouveau_bios *, u16 cmp, u32 khz); - -#endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h index 6e54218b55fc..73b5e5d3e75a 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h @@ -1,34 +1,8 @@ #ifndef __NVBIOS_DP_H__ #define __NVBIOS_DP_H__ -struct nvbios_dpout { - u16 type; - u16 mask; - u8 flags; - u32 script[5]; - u32 lnkcmp; -}; - -u16 nvbios_dpout_parse(struct nouveau_bios *, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpout *); -u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpout *); - -struct nvbios_dpcfg { - u8 drv; - u8 pre; - u8 unk; -}; - -u16 -nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpcfg *); -u16 -nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpcfg *); +u16 dp_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); +u16 dp_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len); +u16 dp_outp_match(struct nouveau_bios *, struct dcb_output *, u8 *ver, u8 *len); #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/clock.h index 39e73b91d360..41b7a6a76f19 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/clock.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/clock.h @@ -54,6 +54,7 @@ int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, int clk, struct nouveau_pll_vals *); int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1, struct nouveau_pll_vals *); - +int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, + int clk, struct nouveau_pll_vals *); #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/fb.h index da470e6851b1..5c1b5e1904f9 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/fb.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/fb.h @@ -69,11 +69,8 @@ struct nouveau_fb { } type; u64 stolen; u64 size; - int ranks; - int parts; - int (*init)(struct nouveau_fb *); int (*get)(struct nouveau_fb *, u64 size, u32 align, u32 size_nc, u32 type, struct nouveau_mem **); void (*put)(struct nouveau_fb *, struct nouveau_mem **); @@ -87,8 +84,6 @@ struct nouveau_fb { int regions; void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nouveau_fb_tile *); - void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags, - struct nouveau_fb_tile *); void (*fini)(struct nouveau_fb *, int i, struct nouveau_fb_tile *); void (*prog)(struct nouveau_fb *, int i, @@ -104,7 +99,7 @@ nouveau_fb(void *obj) #define nouveau_fb_create(p,e,c,d) \ nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d)) -int nouveau_fb_preinit(struct nouveau_fb *); +int nouveau_fb_created(struct nouveau_fb *); void nouveau_fb_destroy(struct nouveau_fb *); int nouveau_fb_init(struct nouveau_fb *); #define nouveau_fb_fini(p,s) \ @@ -116,19 +111,9 @@ int _nouveau_fb_init(struct nouveau_object *); extern struct nouveau_oclass nv04_fb_oclass; extern struct nouveau_oclass nv10_fb_oclass; -extern struct nouveau_oclass nv1a_fb_oclass; extern struct nouveau_oclass nv20_fb_oclass; -extern struct nouveau_oclass nv25_fb_oclass; extern struct nouveau_oclass nv30_fb_oclass; -extern struct nouveau_oclass nv35_fb_oclass; -extern struct nouveau_oclass nv36_fb_oclass; extern struct nouveau_oclass nv40_fb_oclass; -extern struct nouveau_oclass nv41_fb_oclass; -extern struct nouveau_oclass nv44_fb_oclass; -extern struct nouveau_oclass nv46_fb_oclass; -extern struct nouveau_oclass nv47_fb_oclass; -extern struct nouveau_oclass nv49_fb_oclass; -extern struct nouveau_oclass nv4e_fb_oclass; extern struct nouveau_oclass nv50_fb_oclass; extern struct nouveau_oclass nvc0_fb_oclass; @@ -137,35 +122,13 @@ int nouveau_fb_bios_memtype(struct nouveau_bios *); bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype); -void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, - u32 pitch, u32 flags, struct nouveau_fb_tile *); -void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *); void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); -int nv20_fb_vram_init(struct nouveau_fb *); -void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, - u32 pitch, u32 flags, struct nouveau_fb_tile *); -void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *); -void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); - -int nv30_fb_init(struct nouveau_object *); void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nouveau_fb_tile *); - -void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags, - struct nouveau_fb_tile *); - -int nv41_fb_vram_init(struct nouveau_fb *); -int nv41_fb_init(struct nouveau_object *); -void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); - -int nv44_fb_vram_init(struct nouveau_fb *); -int nv44_fb_init(struct nouveau_object *); -void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); - -void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, - u32 pitch, u32 flags, struct nouveau_fb_tile *); +void nv30_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *); void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **); +void nv50_fb_trap(struct nouveau_fb *, int display); #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bar/base.c index d70ba342aa2e..cd01c533007a 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bar/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bar/base.c @@ -65,14 +65,14 @@ nouveau_barobj_dtor(struct nouveau_object *object) } static u32 -nouveau_barobj_rd32(struct nouveau_object *object, u64 addr) +nouveau_barobj_rd32(struct nouveau_object *object, u32 addr) { struct nouveau_barobj *barobj = (void *)object; return ioread32_native(barobj->iomem + addr); } static void -nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data) +nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data) { struct nouveau_barobj *barobj = (void *)object; iowrite32_native(data, barobj->iomem + addr); diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index dd111947eb86..70ca7d5a1aa1 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c @@ -63,7 +63,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios) struct pci_dev *pdev = nv_device(bios)->pdev; struct device_node *dn; const u32 *data; - int size; + int size, i; dn = pci_device_to_OF_node(pdev); if (!dn) { @@ -210,19 +210,11 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios) return; bios->data = kmalloc(bios->size, GFP_KERNEL); - if (bios->data) { - /* disobey the acpi spec - much faster on at least w530 ... */ - ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size); - if (ret != bios->size || - nvbios_checksum(bios->data, bios->size)) { - /* ... that didn't work, ok, i'll be good now */ - for (i = 0; i < bios->size; i += cnt) { - cnt = min((bios->size - i), (u32)4096); - ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt); - if (ret != cnt) - break; - } - } + for (i = 0; bios->data && i < bios->size; i += cnt) { + cnt = min((bios->size - i), (u32)4096); + ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt); + if (ret != cnt) + break; } } @@ -366,42 +358,42 @@ nouveau_bios_shadow(struct nouveau_bios *bios) } static u8 -nouveau_bios_rd08(struct nouveau_object *object, u64 addr) +nouveau_bios_rd08(struct nouveau_object *object, u32 addr) { struct nouveau_bios *bios = (void *)object; return bios->data[addr]; } static u16 -nouveau_bios_rd16(struct nouveau_object *object, u64 addr) +nouveau_bios_rd16(struct nouveau_object *object, u32 addr) { struct nouveau_bios *bios = (void *)object; return get_unaligned_le16(&bios->data[addr]); } static u32 -nouveau_bios_rd32(struct nouveau_object *object, u64 addr) +nouveau_bios_rd32(struct nouveau_object *object, u32 addr) { struct nouveau_bios *bios = (void *)object; return get_unaligned_le32(&bios->data[addr]); } static void -nouveau_bios_wr08(struct nouveau_object *object, u64 addr, u8 data) +nouveau_bios_wr08(struct nouveau_object *object, u32 addr, u8 data) { struct nouveau_bios *bios = (void *)object; bios->data[addr] = data; } static void -nouveau_bios_wr16(struct nouveau_object *object, u64 addr, u16 data) +nouveau_bios_wr16(struct nouveau_object *object, u32 addr, u16 data) { struct nouveau_bios *bios = (void *)object; put_unaligned_le16(data, &bios->data[addr]); } static void -nouveau_bios_wr32(struct nouveau_object *object, u64 addr, u32 data) +nouveau_bios_wr32(struct nouveau_object *object, u32 addr, u32 data) { struct nouveau_bios *bios = (void *)object; put_unaligned_le32(data, &bios->data[addr]); diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c index bbd709fba0a9..c51197157749 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c @@ -64,7 +64,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) } } else if (*ver >= 0x15) { - if (!nv_strncmp(bios, dcb - 7, 7, "DEV_REC")) { + if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) { u16 i2c = nv_ro16(bios, dcb + 2); *hdr = 4; *cnt = (i2c - dcb) / 10; @@ -107,69 +107,6 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len) return 0x0000; } -u16 -dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len, - struct dcb_output *outp) -{ - u16 dcb = dcb_outp(bios, idx, ver, len); - if (dcb) { - if (*ver >= 0x20) { - u32 conn = nv_ro32(bios, dcb + 0x00); - outp->or = (conn & 0x0f000000) >> 24; - outp->location = (conn & 0x00300000) >> 20; - outp->bus = (conn & 0x000f0000) >> 16; - outp->connector = (conn & 0x0000f000) >> 12; - outp->heads = (conn & 0x00000f00) >> 8; - outp->i2c_index = (conn & 0x000000f0) >> 4; - outp->type = (conn & 0x0000000f); - outp->link = 0; - } else { - dcb = 0x0000; - } - - if (*ver >= 0x40) { - u32 conf = nv_ro32(bios, dcb + 0x04); - switch (outp->type) { - case DCB_OUTPUT_TMDS: - case DCB_OUTPUT_LVDS: - case DCB_OUTPUT_DP: - outp->link = (conf & 0x00000030) >> 4; - outp->sorconf.link = outp->link; /*XXX*/ - break; - default: - break; - } - } - } - return dcb; -} - -static inline u16 -dcb_outp_hasht(struct dcb_output *outp) -{ - return outp->type; -} - -static inline u16 -dcb_outp_hashm(struct dcb_output *outp) -{ - return (outp->heads << 8) | (outp->link << 6) | outp->or; -} - -u16 -dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask, - u8 *ver, u8 *len, struct dcb_output *outp) -{ - u16 dcb, idx = 0; - while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) { - if (dcb_outp_hasht(outp) == type) { - if ((dcb_outp_hashm(outp) & mask) == mask) - break; - } - } - return dcb; -} - int dcb_outp_foreach(struct nouveau_bios *bios, void *data, int (*exec)(struct nouveau_bios *, void *, int, u16)) diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c deleted file mode 100644 index 7f16e52d9bea..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include -#include - -u16 -nvbios_disp_table(struct nouveau_bios *bios, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub) -{ - struct bit_entry U; - - if (!bit_entry(bios, 'U', &U)) { - if (U.version == 1) { - u16 data = nv_ro16(bios, U.offset); - if (data) { - *ver = nv_ro08(bios, data + 0x00); - switch (*ver) { - case 0x20: - case 0x21: - *hdr = nv_ro08(bios, data + 0x01); - *len = nv_ro08(bios, data + 0x02); - *cnt = nv_ro08(bios, data + 0x03); - *sub = nv_ro08(bios, data + 0x04); - return data; - default: - break; - } - } - } - } - - return 0x0000; -} - -u16 -nvbios_disp_entry(struct nouveau_bios *bios, u8 idx, - u8 *ver, u8 *len, u8 *sub) -{ - u8 hdr, cnt; - u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub); - if (data && idx < cnt) - return data + hdr + (idx * *len); - *ver = 0x00; - return 0x0000; -} - -u16 -nvbios_disp_parse(struct nouveau_bios *bios, u8 idx, - u8 *ver, u8 *len, u8 *sub, - struct nvbios_disp *info) -{ - u16 data = nvbios_disp_entry(bios, idx, ver, len, sub); - if (data && *len >= 2) { - info->data = nv_ro16(bios, data + 0); - return data; - } - return 0x0000; -} - -u16 -nvbios_outp_entry(struct nouveau_bios *bios, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len) -{ - struct nvbios_disp info; - u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info); - if (data) { - *cnt = nv_ro08(bios, info.data + 0x05); - *len = 0x06; - data = info.data; - } - return data; -} - -u16 -nvbios_outp_parse(struct nouveau_bios *bios, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_outp *info) -{ - u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len); - if (data && *hdr >= 0x0a) { - info->type = nv_ro16(bios, data + 0x00); - info->mask = nv_ro32(bios, data + 0x02); - if (*ver <= 0x20) /* match any link */ - info->mask |= 0x00c0; - info->script[0] = nv_ro16(bios, data + 0x06); - info->script[1] = nv_ro16(bios, data + 0x08); - info->script[2] = 0x0000; - if (*hdr >= 0x0c) - info->script[2] = nv_ro16(bios, data + 0x0a); - return data; - } - return 0x0000; -} - -u16 -nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_outp *info) -{ - u16 data, idx = 0; - while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) { - if (data && info->type == type) { - if ((info->mask & mask) == mask) - break; - } - } - return data; -} - -u16 -nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len) -{ - if (idx < *cnt) - return outp + *hdr + (idx * *len); - return 0x0000; -} - -u16 -nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_ocfg *info) -{ - u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); - if (data) { - info->match = nv_ro16(bios, data + 0x00); - info->clkcmp[0] = nv_ro16(bios, data + 0x02); - info->clkcmp[1] = nv_ro16(bios, data + 0x04); - } - return data; -} - -u16 -nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_ocfg *info) -{ - u16 data, idx = 0; - while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) { - if (info->match == type) - break; - } - return data; -} - -u16 -nvbios_oclk_match(struct nouveau_bios *bios, u16 cmp, u32 khz) -{ - while (cmp) { - if (khz / 10 >= nv_ro16(bios, cmp + 0x00)) - return nv_ro16(bios, cmp + 0x02); - cmp += 0x04; - } - return 0x0000; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c index 663853bcca82..3cbc0f3e8d5e 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c @@ -25,29 +25,23 @@ #include "subdev/bios.h" #include "subdev/bios/bit.h" +#include "subdev/bios/dcb.h" #include "subdev/bios/dp.h" -static u16 -nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +u16 +dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { - struct bit_entry d; + struct bit_entry bit_d; - if (!bit_entry(bios, 'd', &d)) { - if (d.version == 1 && d.length >= 2) { - u16 data = nv_ro16(bios, d.offset); + if (!bit_entry(bios, 'd', &bit_d)) { + if (bit_d.version == 1) { + u16 data = nv_ro16(bios, bit_d.offset); if (data) { - *ver = nv_ro08(bios, data + 0x00); - switch (*ver) { - case 0x21: - case 0x30: - case 0x40: - *hdr = nv_ro08(bios, data + 0x01); - *len = nv_ro08(bios, data + 0x02); - *cnt = nv_ro08(bios, data + 0x03); - return data; - default: - break; - } + *ver = nv_ro08(bios, data + 0); + *hdr = nv_ro08(bios, data + 1); + *len = nv_ro08(bios, data + 2); + *cnt = nv_ro08(bios, data + 3); + return data; } } } @@ -55,150 +49,28 @@ nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) return 0x0000; } -static u16 -nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len) -{ - u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len); - if (data && idx < *cnt) { - u16 outp = nv_ro16(bios, data + *hdr + idx * *len); - switch (*ver * !!outp) { - case 0x21: - case 0x30: - *hdr = nv_ro08(bios, data + 0x04); - *len = nv_ro08(bios, data + 0x05); - *cnt = nv_ro08(bios, outp + 0x04); - break; - case 0x40: - *hdr = nv_ro08(bios, data + 0x04); - *cnt = 0; - *len = 0; - break; - default: - break; - } - return outp; - } - *ver = 0x00; - return 0x0000; -} - u16 -nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpout *info) +dp_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len) { - u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len); - if (data && *ver) { - info->type = nv_ro16(bios, data + 0x00); - info->mask = nv_ro16(bios, data + 0x02); - switch (*ver) { - case 0x21: - case 0x30: - info->flags = nv_ro08(bios, data + 0x05); - info->script[0] = nv_ro16(bios, data + 0x06); - info->script[1] = nv_ro16(bios, data + 0x08); - info->lnkcmp = nv_ro16(bios, data + 0x0a); - info->script[2] = nv_ro16(bios, data + 0x0c); - info->script[3] = nv_ro16(bios, data + 0x0e); - info->script[4] = nv_ro16(bios, data + 0x10); - break; - case 0x40: - info->flags = nv_ro08(bios, data + 0x04); - info->script[0] = nv_ro16(bios, data + 0x05); - info->script[1] = nv_ro16(bios, data + 0x07); - info->lnkcmp = nv_ro16(bios, data + 0x09); - info->script[2] = nv_ro16(bios, data + 0x0b); - info->script[3] = nv_ro16(bios, data + 0x0d); - info->script[4] = nv_ro16(bios, data + 0x0f); - break; - default: - data = 0x0000; - break; - } - } - return data; + u8 hdr, cnt; + u16 table = dp_table(bios, ver, &hdr, &cnt, len); + if (table && idx < cnt) + return nv_ro16(bios, table + hdr + (idx * *len)); + return 0xffff; } u16 -nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpout *info) -{ - u16 data, idx = 0; - while ((data = nvbios_dpout_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) { - if (data && info->type == type) { - if ((info->mask & mask) == mask) - break; - } - } - return data; -} - -static u16 -nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +dp_outp_match(struct nouveau_bios *bios, struct dcb_output *outp, + u8 *ver, u8 *len) { - if (*ver >= 0x40) { - outp = nvbios_dp_table(bios, ver, hdr, cnt, len); - *hdr = *hdr + (*len * * cnt); - *len = nv_ro08(bios, outp + 0x06); - *cnt = nv_ro08(bios, outp + 0x07); - } - - if (idx < *cnt) - return outp + *hdr + (idx * *len); - - return 0x0000; -} - -u16 -nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpcfg *info) -{ - u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len); - if (data) { - switch (*ver) { - case 0x21: - info->drv = nv_ro08(bios, data + 0x02); - info->pre = nv_ro08(bios, data + 0x03); - info->unk = nv_ro08(bios, data + 0x04); - break; - case 0x30: - case 0x40: - info->drv = nv_ro08(bios, data + 0x01); - info->pre = nv_ro08(bios, data + 0x02); - info->unk = nv_ro08(bios, data + 0x03); - break; - default: - data = 0x0000; - break; - } - } - return data; -} - -u16 -nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpcfg *info) -{ - u8 idx = 0xff; + u8 idx = 0; u16 data; - - if (*ver >= 0x30) { - const u8 vsoff[] = { 0, 4, 7, 9 }; - idx = (un * 10) + vsoff[vs] + pe; - } else { - while ((data = nvbios_dpcfg_entry(bios, outp, idx, - ver, hdr, cnt, len))) { - if (nv_ro08(bios, data + 0x00) == vs && - nv_ro08(bios, data + 0x01) == pe) - break; - idx++; + while ((data = dp_outp(bios, idx++, ver, len)) != 0xffff) { + if (data) { + u32 hash = nv_ro32(bios, data); + if (dcb_hash_match(outp, hash)) + return data; } } - - return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info); + return 0x0000; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c index c90d4aa3ae4f..4c9f1e508165 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c @@ -101,8 +101,8 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line, } /* DCB 2.2, fixed TVDAC GPIO data */ - if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len))) { - if (ver >= 0x22 && ver < 0x30 && func == DCB_GPIO_TVDAC0) { + if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x22) { + if (func == DCB_GPIO_TVDAC0) { u8 conf = nv_ro08(bios, entry - 5); u8 addr = nv_ro08(bios, entry - 4); if (conf & 0x01) { diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c index ae168bbb86d8..6be8c32f6e4c 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c @@ -743,10 +743,9 @@ static void init_dp_condition(struct nvbios_init *init) { struct nouveau_bios *bios = init->bios; - struct nvbios_dpout info; u8 cond = nv_ro08(bios, init->offset + 1); u8 unkn = nv_ro08(bios, init->offset + 2); - u8 ver, hdr, cnt, len; + u8 ver, len; u16 data; trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn); @@ -760,12 +759,10 @@ init_dp_condition(struct nvbios_init *init) case 1: case 2: if ( init->outp && - (data = nvbios_dpout_match(bios, DCB_OUTPUT_DP, - (init->outp->or << 0) | - (init->outp->sorconf.link << 6), - &ver, &hdr, &cnt, &len, &info))) - { - if (!(info.flags & cond)) + (data = dp_outp_match(bios, init->outp, &ver, &len))) { + if (ver <= 0x40 && !(nv_ro08(bios, data + 5) & cond)) + init_exec_set(init, false); + if (ver == 0x40 && !(nv_ro08(bios, data + 4) & cond)) init_exec_set(init, false); break; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c index cc8d7d162d7c..9068c98b96f6 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c @@ -66,6 +66,24 @@ nva3_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq) return ret; } +int +nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info, + int clk, struct nouveau_pll_vals *pv) +{ + int ret, N, M, P; + + ret = nva3_pll_calc(clock, info, clk, &N, NULL, &M, &P); + + if (ret > 0) { + pv->refclk = info->refclk; + pv->N1 = N; + pv->M1 = M; + pv->log2P = P; + } + return ret; +} + + static int nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -80,6 +98,7 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return ret; priv->base.pll_set = nva3_clock_pll_set; + priv->base.pll_calc = nva3_clock_pll_calc; return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c index 5ccce0b17bf3..f6962c9b6c36 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c @@ -79,6 +79,7 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return ret; priv->base.pll_set = nvc0_clock_pll_set; + priv->base.pll_calc = nva3_clock_pll_calc; return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/base.c index f8a7ed4166cf..ca9a4648bd8a 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/base.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -60,24 +61,19 @@ struct nouveau_devobj { static const u64 disable_map[] = { [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS, - [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE, [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE, - [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO, - [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO, [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH, [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG, [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME, @@ -88,7 +84,7 @@ static const u64 disable_map[] = { [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0, [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1, [NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1, - [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC, + [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO, [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP, [NVDEV_SUBDEV_NR] = 0, }; @@ -212,7 +208,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent, /* determine frequency of timing crystal */ if ( device->chipset < 0x17 || - (device->chipset >= 0x20 && device->chipset < 0x25)) + (device->chipset >= 0x20 && device->chipset <= 0x25)) strap &= 0x00000040; else strap &= 0x00400040; @@ -360,37 +356,37 @@ nouveau_devobj_fini(struct nouveau_object *object, bool suspend) } static u8 -nouveau_devobj_rd08(struct nouveau_object *object, u64 addr) +nouveau_devobj_rd08(struct nouveau_object *object, u32 addr) { return nv_rd08(object->engine, addr); } static u16 -nouveau_devobj_rd16(struct nouveau_object *object, u64 addr) +nouveau_devobj_rd16(struct nouveau_object *object, u32 addr) { return nv_rd16(object->engine, addr); } static u32 -nouveau_devobj_rd32(struct nouveau_object *object, u64 addr) +nouveau_devobj_rd32(struct nouveau_object *object, u32 addr) { return nv_rd32(object->engine, addr); } static void -nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data) +nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data) { nv_wr08(object->engine, addr, data); } static void -nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data) +nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data) { nv_wr16(object->engine, addr, data); } static void -nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data) +nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data) { nv_wr32(object->engine, addr, data); } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c index 9c40b0fb23f6..f09accfd0e31 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c @@ -105,7 +105,7 @@ nv10_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -159,7 +159,7 @@ nv10_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c index 74f88f48e1c2..5fa58b7369b5 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c @@ -72,7 +72,7 @@ nv20_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -90,7 +90,7 @@ nv20_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -108,7 +108,7 @@ nv20_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c index 0ac1b2c4f61d..7f4b8fe6cccc 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c @@ -72,7 +72,7 @@ nv30_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -109,7 +109,7 @@ nv30_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -128,7 +128,7 @@ nv30_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c index 41d59689a021..42deadca0f0a 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c @@ -76,7 +76,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -96,7 +96,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -116,7 +116,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -156,7 +156,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -176,7 +176,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -196,7 +196,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -216,7 +216,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -236,7 +236,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -256,7 +256,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -276,7 +276,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -296,7 +296,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -316,7 +316,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -336,7 +336,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -356,7 +356,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c index 6ccfd8585ba2..fec3bcc9a6fc 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c @@ -98,7 +98,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0x86: device->cname = "G86"; @@ -123,7 +123,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0x92: device->cname = "G92"; @@ -148,7 +148,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0x94: device->cname = "G94"; @@ -173,7 +173,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0x96: device->cname = "G96"; @@ -198,7 +198,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0x98: device->cname = "G98"; @@ -223,7 +223,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xa0: device->cname = "G200"; @@ -248,7 +248,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xaa: device->cname = "MCP77/MCP78"; @@ -273,7 +273,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xac: device->cname = "MCP79/MCP7A"; @@ -298,7 +298,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xa3: device->cname = "GT215"; @@ -324,7 +324,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xa5: device->cname = "GT216"; @@ -349,7 +349,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xa8: device->cname = "GT218"; @@ -374,7 +374,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xaf: device->cname = "MCP89"; @@ -399,7 +399,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; default: nv_fatal(device, "unknown Tesla chipset\n"); diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c index f0461685a422..6697f0f9c293 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c @@ -74,12 +74,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xc4: device->cname = "GF104"; @@ -102,12 +102,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xc3: device->cname = "GF106"; @@ -130,12 +130,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xce: device->cname = "GF114"; @@ -158,12 +158,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xcf: device->cname = "GF116"; @@ -186,12 +186,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xc1: device->cname = "GF108"; @@ -214,12 +214,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xc8: device->cname = "GF110"; @@ -242,12 +242,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xd9: device->cname = "GF119"; @@ -266,13 +266,13 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; break; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c index 9b7881e76634..4a280b7ab853 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c @@ -45,9 +45,6 @@ #include #include #include -#include -#include -#include int nve0_identify(struct nouveau_device *device) @@ -70,16 +67,13 @@ nve0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; break; case 0xe7: device->cname = "GK107"; @@ -98,16 +92,13 @@ nve0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; break; default: nv_fatal(device, "unknown Kepler chipset\n"); diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c index ae7249b09797..61becfa732e9 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c @@ -22,10 +22,6 @@ * Authors: Ben Skeggs */ -#include -#include -#include -#include #include #include @@ -59,12 +55,7 @@ nv50_devinit_dtor(struct nouveau_object *object) static int nv50_devinit_init(struct nouveau_object *object) { - struct nouveau_bios *bios = nouveau_bios(object); struct nv50_devinit_priv *priv = (void *)object; - struct nvbios_outp info; - struct dcb_output outp; - u8 ver = 0xff, hdr, cnt, len; - int ret, i = 0; if (!priv->base.post) { if (!nv_rdvgac(priv, 0, 0x00) && @@ -74,30 +65,7 @@ nv50_devinit_init(struct nouveau_object *object) } } - ret = nouveau_devinit_init(&priv->base); - if (ret) - return ret; - - /* if we ran the init tables, execute first script pointer for each - * display table output entry that has a matching dcb entry. - */ - while (priv->base.post && ver) { - u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info); - if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = info.script[0], - .outp = &outp, - .crtc = -1, - .execute = 1, - }; - - nvbios_exec(&init); - } - }; - - return 0; + return nouveau_devinit_init(&priv->base); } static int diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/base.c index d6d16007ec1a..f0086de8af31 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/base.c @@ -57,47 +57,27 @@ nouveau_fb_bios_memtype(struct nouveau_bios *bios) } int -nouveau_fb_preinit(struct nouveau_fb *pfb) +nouveau_fb_init(struct nouveau_fb *pfb) { - static const char *name[] = { - [NV_MEM_TYPE_UNKNOWN] = "unknown", - [NV_MEM_TYPE_STOLEN ] = "stolen system memory", - [NV_MEM_TYPE_SGRAM ] = "SGRAM", - [NV_MEM_TYPE_SDRAM ] = "SDRAM", - [NV_MEM_TYPE_DDR1 ] = "DDR1", - [NV_MEM_TYPE_DDR2 ] = "DDR2", - [NV_MEM_TYPE_DDR3 ] = "DDR3", - [NV_MEM_TYPE_GDDR2 ] = "GDDR2", - [NV_MEM_TYPE_GDDR3 ] = "GDDR3", - [NV_MEM_TYPE_GDDR4 ] = "GDDR4", - [NV_MEM_TYPE_GDDR5 ] = "GDDR5", - }; - int ret, tags; - - tags = pfb->ram.init(pfb); - if (tags < 0 || !pfb->ram.size) { - nv_fatal(pfb, "error detecting memory configuration!!\n"); - return (tags < 0) ? tags : -ERANGE; - } + int ret, i; - if (!nouveau_mm_initialised(&pfb->vram)) { - ret = nouveau_mm_init(&pfb->vram, 0, pfb->ram.size >> 12, 1); - if (ret) - return ret; - } + ret = nouveau_subdev_init(&pfb->base); + if (ret) + return ret; - if (!nouveau_mm_initialised(&pfb->tags) && tags) { - ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1); - if (ret) - return ret; - } + for (i = 0; i < pfb->tile.regions; i++) + pfb->tile.prog(pfb, i, &pfb->tile.region[i]); - nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]); - nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20)); - nv_info(pfb, " ZCOMP: %d tags\n", tags); return 0; } +int +_nouveau_fb_init(struct nouveau_object *object) +{ + struct nouveau_fb *pfb = (void *)object; + return nouveau_fb_init(pfb); +} + void nouveau_fb_destroy(struct nouveau_fb *pfb) { @@ -105,8 +85,12 @@ nouveau_fb_destroy(struct nouveau_fb *pfb) for (i = 0; i < pfb->tile.regions; i++) pfb->tile.fini(pfb, i, &pfb->tile.region[i]); - nouveau_mm_fini(&pfb->tags); - nouveau_mm_fini(&pfb->vram); + + if (pfb->tags.block_size) + nouveau_mm_fini(&pfb->tags); + + if (pfb->vram.block_size) + nouveau_mm_fini(&pfb->vram); nouveau_subdev_destroy(&pfb->base); } @@ -117,24 +101,30 @@ _nouveau_fb_dtor(struct nouveau_object *object) struct nouveau_fb *pfb = (void *)object; nouveau_fb_destroy(pfb); } + int -nouveau_fb_init(struct nouveau_fb *pfb) +nouveau_fb_created(struct nouveau_fb *pfb) { - int ret, i; - - ret = nouveau_subdev_init(&pfb->base); - if (ret) - return ret; + static const char *name[] = { + [NV_MEM_TYPE_UNKNOWN] = "unknown", + [NV_MEM_TYPE_STOLEN ] = "stolen system memory", + [NV_MEM_TYPE_SGRAM ] = "SGRAM", + [NV_MEM_TYPE_SDRAM ] = "SDRAM", + [NV_MEM_TYPE_DDR1 ] = "DDR1", + [NV_MEM_TYPE_DDR2 ] = "DDR2", + [NV_MEM_TYPE_DDR3 ] = "DDR3", + [NV_MEM_TYPE_GDDR2 ] = "GDDR2", + [NV_MEM_TYPE_GDDR3 ] = "GDDR3", + [NV_MEM_TYPE_GDDR4 ] = "GDDR4", + [NV_MEM_TYPE_GDDR5 ] = "GDDR5", + }; - for (i = 0; i < pfb->tile.regions; i++) - pfb->tile.prog(pfb, i, &pfb->tile.region[i]); + if (pfb->ram.size == 0) { + nv_fatal(pfb, "no vram detected!!\n"); + return -ERANGE; + } + nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]); + nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20)); return 0; } - -int -_nouveau_fb_init(struct nouveau_object *object) -{ - struct nouveau_fb *pfb = (void *)object; - return nouveau_fb_init(pfb); -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c index 6e369f85361e..eb06836b69f7 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c @@ -55,37 +55,6 @@ nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags) return false; } -static int -nv04_fb_vram_init(struct nouveau_fb *pfb) -{ - u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0); - if (boot0 & 0x00000100) { - pfb->ram.size = ((boot0 >> 12) & 0xf) * 2 + 2; - pfb->ram.size *= 1024 * 1024; - } else { - switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) { - case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB: - pfb->ram.size = 32 * 1024 * 1024; - break; - case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB: - pfb->ram.size = 16 * 1024 * 1024; - break; - case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB: - pfb->ram.size = 8 * 1024 * 1024; - break; - case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB: - pfb->ram.size = 4 * 1024 * 1024; - break; - } - } - - if ((boot0 & 0x00000038) <= 0x10) - pfb->ram.type = NV_MEM_TYPE_SGRAM; - else - pfb->ram.type = NV_MEM_TYPE_SDRAM; - return 0; -} - static int nv04_fb_init(struct nouveau_object *object) { @@ -110,6 +79,7 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_object **pobject) { struct nv04_fb_priv *priv; + u32 boot0; int ret; ret = nouveau_fb_create(parent, engine, oclass, &priv); @@ -117,9 +87,35 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; + boot0 = nv_rd32(priv, NV04_PFB_BOOT_0); + if (boot0 & 0x00000100) { + priv->base.ram.size = ((boot0 >> 12) & 0xf) * 2 + 2; + priv->base.ram.size *= 1024 * 1024; + } else { + switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) { + case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB: + priv->base.ram.size = 32 * 1024 * 1024; + break; + case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB: + priv->base.ram.size = 16 * 1024 * 1024; + break; + case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB: + priv->base.ram.size = 8 * 1024 * 1024; + break; + case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB: + priv->base.ram.size = 4 * 1024 * 1024; + break; + } + } + + if ((boot0 & 0x00000038) <= 0x10) + priv->base.ram.type = NV_MEM_TYPE_SGRAM; + else + priv->base.ram.type = NV_MEM_TYPE_SDRAM; + + priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv04_fb_vram_init; - return nouveau_fb_preinit(&priv->base); + return nouveau_fb_created(&priv->base); } struct nouveau_oclass diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c index edbbe26e858d..f037a422d2f4 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c @@ -30,20 +30,7 @@ struct nv10_fb_priv { struct nouveau_fb base; }; -static int -nv10_fb_vram_init(struct nouveau_fb *pfb) -{ - u32 cfg0 = nv_rd32(pfb, 0x100200); - if (cfg0 & 0x00000001) - pfb->ram.type = NV_MEM_TYPE_DDR1; - else - pfb->ram.type = NV_MEM_TYPE_SDRAM; - - pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; - return 0; -} - -void +static void nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nouveau_fb_tile *tile) { @@ -52,7 +39,7 @@ nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, tile->pitch = pitch; } -void +static void nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) { tile->addr = 0; @@ -67,7 +54,6 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit); nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch); nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr); - nv_rd32(pfb, 0x100240 + (i * 0x10)); } static int @@ -75,6 +61,7 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + struct nouveau_device *device = nv_device(parent); struct nv10_fb_priv *priv; int ret; @@ -83,13 +70,42 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; + if (device->chipset == 0x1a || device->chipset == 0x1f) { + struct pci_dev *bridge; + u32 mem, mib; + + bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); + if (!bridge) { + nv_fatal(device, "no bridge device\n"); + return 0; + } + + if (device->chipset == 0x1a) { + pci_read_config_dword(bridge, 0x7c, &mem); + mib = ((mem >> 6) & 31) + 1; + } else { + pci_read_config_dword(bridge, 0x84, &mem); + mib = ((mem >> 4) & 127) + 1; + } + + priv->base.ram.type = NV_MEM_TYPE_STOLEN; + priv->base.ram.size = mib * 1024 * 1024; + } else { + u32 cfg0 = nv_rd32(priv, 0x100200); + if (cfg0 & 0x00000001) + priv->base.ram.type = NV_MEM_TYPE_DDR1; + else + priv->base.ram.type = NV_MEM_TYPE_SDRAM; + + priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000; + } + priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv10_fb_vram_init; priv->base.tile.regions = 8; priv->base.tile.init = nv10_fb_tile_init; priv->base.tile.fini = nv10_fb_tile_fini; priv->base.tile.prog = nv10_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); + return nouveau_fb_created(&priv->base); } struct nouveau_oclass diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c deleted file mode 100644 index 48366841db4a..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv1a_fb_priv { - struct nouveau_fb base; -}; - -static int -nv1a_fb_vram_init(struct nouveau_fb *pfb) -{ - struct pci_dev *bridge; - u32 mem, mib; - - bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); - if (!bridge) { - nv_fatal(pfb, "no bridge device\n"); - return -ENODEV; - } - - if (nv_device(pfb)->chipset == 0x1a) { - pci_read_config_dword(bridge, 0x7c, &mem); - mib = ((mem >> 6) & 31) + 1; - } else { - pci_read_config_dword(bridge, 0x84, &mem); - mib = ((mem >> 4) & 127) + 1; - } - - pfb->ram.type = NV_MEM_TYPE_STOLEN; - pfb->ram.size = mib * 1024 * 1024; - return 0; -} - -static int -nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv1a_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv1a_fb_vram_init; - priv->base.tile.regions = 8; - priv->base.tile.init = nv10_fb_tile_init; - priv->base.tile.fini = nv10_fb_tile_fini; - priv->base.tile.prog = nv10_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - -struct nouveau_oclass -nv1a_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x1a), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv1a_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = _nouveau_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c index 5d14612a2c8e..4b3578fcb7fb 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c @@ -30,54 +30,43 @@ struct nv20_fb_priv { struct nouveau_fb base; }; -int -nv20_fb_vram_init(struct nouveau_fb *pfb) -{ - u32 pbus1218 = nv_rd32(pfb, 0x001218); - - switch (pbus1218 & 0x00000300) { - case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break; - case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break; - case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break; - case 0x00000300: pfb->ram.type = NV_MEM_TYPE_GDDR2; break; - } - pfb->ram.size = (nv_rd32(pfb, 0x10020c) & 0xff000000); - pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; - - return nv_rd32(pfb, 0x100320); -} - -void +static void nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nouveau_fb_tile *tile) { + struct nouveau_device *device = nv_device(pfb); + int bpp = (flags & 2) ? 32 : 16; + tile->addr = 0x00000001 | addr; tile->limit = max(1u, addr + size) - 1; tile->pitch = pitch; + + /* Allocate some of the on-die tag memory, used to store Z + * compression meta-data (most likely just a bitmap determining + * if a given tile is compressed or not). + */ + size /= 256; if (flags & 4) { - pfb->tile.comp(pfb, i, size, flags, tile); + if (!nouveau_mm_head(&pfb->tags, 1, size, size, 1, &tile->tag)) { + /* Enable Z compression */ + tile->zcomp = tile->tag->offset; + if (device->chipset >= 0x25) { + if (bpp == 16) + tile->zcomp |= 0x00100000; + else + tile->zcomp |= 0x00200000; + } else { + tile->zcomp |= 0x80000000; + if (bpp != 16) + tile->zcomp |= 0x04000000; + } + } + tile->addr |= 2; } } static void -nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, - struct nouveau_fb_tile *tile) -{ - u32 tiles = DIV_ROUND_UP(size, 0x40); - u32 tags = round_up(tiles / pfb->ram.parts, 0x40); - if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { - if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */ - else tile->zcomp = 0x04000000; /* Z24S8 */ - tile->zcomp |= tile->tag->offset; - tile->zcomp |= 0x80000000; /* enable */ -#ifdef __BIG_ENDIAN - tile->zcomp |= 0x08000000; -#endif - } -} - -void nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) { tile->addr = 0; @@ -87,13 +76,12 @@ nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) nouveau_mm_free(&pfb->tags, &tile->tag); } -void +static void nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) { nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit); nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch); nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr); - nv_rd32(pfb, 0x100240 + (i * 0x10)); nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp); } @@ -102,7 +90,9 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + struct nouveau_device *device = nv_device(parent); struct nv20_fb_priv *priv; + u32 pbus1218; int ret; ret = nouveau_fb_create(parent, engine, oclass, &priv); @@ -110,14 +100,28 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; + pbus1218 = nv_rd32(priv, 0x001218); + switch (pbus1218 & 0x00000300) { + case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break; + case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break; + case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break; + case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break; + } + priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000; + + if (device->chipset >= 0x25) + ret = nouveau_mm_init(&priv->base.tags, 0, 64 * 1024, 1); + else + ret = nouveau_mm_init(&priv->base.tags, 0, 32 * 1024, 1); + if (ret) + return ret; + priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv20_fb_vram_init; priv->base.tile.regions = 8; priv->base.tile.init = nv20_fb_tile_init; - priv->base.tile.comp = nv20_fb_tile_comp; priv->base.tile.fini = nv20_fb_tile_fini; priv->base.tile.prog = nv20_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); + return nouveau_fb_created(&priv->base); } struct nouveau_oclass diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c deleted file mode 100644 index 0042ace6bef9..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv25_fb_priv { - struct nouveau_fb base; -}; - -static void -nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, - struct nouveau_fb_tile *tile) -{ - u32 tiles = DIV_ROUND_UP(size, 0x40); - u32 tags = round_up(tiles / pfb->ram.parts, 0x40); - if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { - if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */ - else tile->zcomp = 0x00200000; /* Z24S8 */ - tile->zcomp |= tile->tag->offset; -#ifdef __BIG_ENDIAN - tile->zcomp |= 0x01000000; -#endif - } -} - -static int -nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv25_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv20_fb_vram_init; - priv->base.tile.regions = 8; - priv->base.tile.init = nv20_fb_tile_init; - priv->base.tile.comp = nv25_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv20_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - -struct nouveau_oclass -nv25_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x25), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv25_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = _nouveau_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c index a7ba0d048aec..cba67bc91390 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c @@ -34,36 +34,17 @@ void nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nouveau_fb_tile *tile) { - /* for performance, select alternate bank offset for zeta */ - if (!(flags & 4)) { - tile->addr = (0 << 4); - } else { - if (pfb->tile.comp) /* z compression */ - pfb->tile.comp(pfb, i, size, flags, tile); - tile->addr = (1 << 4); - } - - tile->addr |= 0x00000001; /* enable */ - tile->addr |= addr; + tile->addr = addr | 1; tile->limit = max(1u, addr + size) - 1; tile->pitch = pitch; } -static void -nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, - struct nouveau_fb_tile *tile) +void +nv30_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) { - u32 tiles = DIV_ROUND_UP(size, 0x40); - u32 tags = round_up(tiles / pfb->ram.parts, 0x40); - if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { - if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */ - else tile->zcomp |= 0x02000000; /* Z24S8 */ - tile->zcomp |= ((tile->tag->offset ) >> 6); - tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12; -#ifdef __BIG_ENDIAN - tile->zcomp |= 0x10000000; -#endif - } + tile->addr = 0; + tile->limit = 0; + tile->pitch = 0; } static int @@ -91,7 +72,7 @@ calc_ref(struct nv30_fb_priv *priv, int l, int k, int i) return x; } -int +static int nv30_fb_init(struct nouveau_object *object) { struct nouveau_device *device = nv_device(object); @@ -130,6 +111,7 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_object **pobject) { struct nv30_fb_priv *priv; + u32 pbus1218; int ret; ret = nouveau_fb_create(parent, engine, oclass, &priv); @@ -137,14 +119,21 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; + pbus1218 = nv_rd32(priv, 0x001218); + switch (pbus1218 & 0x00000300) { + case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break; + case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break; + case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break; + case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break; + } + priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000; + priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv20_fb_vram_init; priv->base.tile.regions = 8; priv->base.tile.init = nv30_fb_tile_init; - priv->base.tile.comp = nv30_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv20_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); + priv->base.tile.fini = nv30_fb_tile_fini; + priv->base.tile.prog = nv10_fb_tile_prog; + return nouveau_fb_created(&priv->base); } struct nouveau_oclass diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c deleted file mode 100644 index 092f6f4f3521..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv35_fb_priv { - struct nouveau_fb base; -}; - -static void -nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, - struct nouveau_fb_tile *tile) -{ - u32 tiles = DIV_ROUND_UP(size, 0x40); - u32 tags = round_up(tiles / pfb->ram.parts, 0x40); - if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { - if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */ - else tile->zcomp |= 0x08000000; /* Z24S8 */ - tile->zcomp |= ((tile->tag->offset ) >> 6); - tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13; -#ifdef __BIG_ENDIAN - tile->zcomp |= 0x40000000; -#endif - } -} - -static int -nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv35_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv20_fb_vram_init; - priv->base.tile.regions = 8; - priv->base.tile.init = nv30_fb_tile_init; - priv->base.tile.comp = nv35_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv20_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - -struct nouveau_oclass -nv35_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x35), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv35_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv30_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c deleted file mode 100644 index 797ab3b821b9..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv36_fb_priv { - struct nouveau_fb base; -}; - -static void -nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, - struct nouveau_fb_tile *tile) -{ - u32 tiles = DIV_ROUND_UP(size, 0x40); - u32 tags = round_up(tiles / pfb->ram.parts, 0x40); - if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { - if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */ - else tile->zcomp |= 0x20000000; /* Z24S8 */ - tile->zcomp |= ((tile->tag->offset ) >> 6); - tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14; -#ifdef __BIG_ENDIAN - tile->zcomp |= 0x80000000; -#endif - } -} - -static int -nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv36_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv20_fb_vram_init; - priv->base.tile.regions = 8; - priv->base.tile.init = nv30_fb_tile_init; - priv->base.tile.comp = nv36_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv20_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - -struct nouveau_oclass -nv36_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x36), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv36_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv30_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c index 65e131b90f37..347a496fcad8 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c @@ -30,37 +30,34 @@ struct nv40_fb_priv { struct nouveau_fb base; }; -static int -nv40_fb_vram_init(struct nouveau_fb *pfb) +static inline int +nv44_graph_class(struct nouveau_device *device) { - u32 pbus1218 = nv_rd32(pfb, 0x001218); - switch (pbus1218 & 0x00000300) { - case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break; - case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break; - case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break; - case 0x00000300: pfb->ram.type = NV_MEM_TYPE_DDR2; break; - } + if ((device->chipset & 0xf0) == 0x60) + return 1; - pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; - pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; - return nv_rd32(pfb, 0x100320); + return !(0x0baf & (1 << (device->chipset & 0x0f))); } -void -nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, - struct nouveau_fb_tile *tile) +static void +nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) { - u32 tiles = DIV_ROUND_UP(size, 0x80); - u32 tags = round_up(tiles / pfb->ram.parts, 0x100); - if ( (flags & 2) && - !nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { - tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */ - tile->zcomp |= ((tile->tag->offset ) >> 8); - tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13; -#ifdef __BIG_ENDIAN - tile->zcomp |= 0x40000000; -#endif - } + nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit); + nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch); + nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr); +} + +static void +nv40_fb_init_gart(struct nv40_fb_priv *priv) +{ + nv_wr32(priv, 0x100800, 0x00000001); +} + +static void +nv44_fb_init_gart(struct nv40_fb_priv *priv) +{ + nv_wr32(priv, 0x100850, 0x80000000); + nv_wr32(priv, 0x100800, 0x00000001); } static int @@ -73,7 +70,19 @@ nv40_fb_init(struct nouveau_object *object) if (ret) return ret; - nv_mask(priv, 0x10033c, 0x00008000, 0x00000000); + switch (nv_device(priv)->chipset) { + case 0x40: + case 0x45: + nv_mask(priv, 0x10033c, 0x00008000, 0x00000000); + break; + default: + if (nv44_graph_class(nv_device(priv))) + nv44_fb_init_gart(priv); + else + nv40_fb_init_gart(priv); + break; + } + return 0; } @@ -82,6 +91,7 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + struct nouveau_device *device = nv_device(parent); struct nv40_fb_priv *priv; int ret; @@ -90,14 +100,69 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; + /* 0x001218 is actually present on a few other NV4X I looked at, + * and even contains sane values matching 0x100474. From looking + * at various vbios images however, this isn't the case everywhere. + * So, I chose to use the same regs I've seen NVIDIA reading around + * the memory detection, hopefully that'll get us the right numbers + */ + if (device->chipset == 0x40) { + u32 pbus1218 = nv_rd32(priv, 0x001218); + switch (pbus1218 & 0x00000300) { + case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break; + case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break; + case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break; + case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_DDR2; break; + } + } else + if (device->chipset == 0x49 || device->chipset == 0x4b) { + u32 pfb914 = nv_rd32(priv, 0x100914); + switch (pfb914 & 0x00000003) { + case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_DDR1; break; + case 0x00000001: priv->base.ram.type = NV_MEM_TYPE_DDR2; break; + case 0x00000002: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break; + case 0x00000003: break; + } + } else + if (device->chipset != 0x4e) { + u32 pfb474 = nv_rd32(priv, 0x100474); + if (pfb474 & 0x00000004) + priv->base.ram.type = NV_MEM_TYPE_GDDR3; + if (pfb474 & 0x00000002) + priv->base.ram.type = NV_MEM_TYPE_DDR2; + if (pfb474 & 0x00000001) + priv->base.ram.type = NV_MEM_TYPE_DDR1; + } else { + priv->base.ram.type = NV_MEM_TYPE_STOLEN; + } + + priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000; + priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv40_fb_vram_init; - priv->base.tile.regions = 8; + switch (device->chipset) { + case 0x40: + case 0x45: + priv->base.tile.regions = 8; + break; + case 0x46: + case 0x47: + case 0x49: + case 0x4b: + case 0x4c: + priv->base.tile.regions = 15; + break; + default: + priv->base.tile.regions = 12; + break; + } priv->base.tile.init = nv30_fb_tile_init; - priv->base.tile.comp = nv40_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv20_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); + priv->base.tile.fini = nv30_fb_tile_fini; + if (device->chipset == 0x40) + priv->base.tile.prog = nv10_fb_tile_prog; + else + priv->base.tile.prog = nv40_fb_tile_prog; + + return nouveau_fb_created(&priv->base); } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c deleted file mode 100644 index e9e5a08c41a1..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv41_fb_priv { - struct nouveau_fb base; -}; - -int -nv41_fb_vram_init(struct nouveau_fb *pfb) -{ - u32 pfb474 = nv_rd32(pfb, 0x100474); - if (pfb474 & 0x00000004) - pfb->ram.type = NV_MEM_TYPE_GDDR3; - if (pfb474 & 0x00000002) - pfb->ram.type = NV_MEM_TYPE_DDR2; - if (pfb474 & 0x00000001) - pfb->ram.type = NV_MEM_TYPE_DDR1; - - pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; - pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; - return nv_rd32(pfb, 0x100320); -} - -void -nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) -{ - nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit); - nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch); - nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr); - nv_rd32(pfb, 0x100600 + (i * 0x10)); - nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp); -} - -int -nv41_fb_init(struct nouveau_object *object) -{ - struct nv41_fb_priv *priv = (void *)object; - int ret; - - ret = nouveau_fb_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x100800, 0x00000001); - return 0; -} - -static int -nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv41_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv41_fb_vram_init; - priv->base.tile.regions = 12; - priv->base.tile.init = nv30_fb_tile_init; - priv->base.tile.comp = nv40_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv41_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - - -struct nouveau_oclass -nv41_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x41), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv41_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv41_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c deleted file mode 100644 index ae89b5006f7a..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv44_fb_priv { - struct nouveau_fb base; -}; - -int -nv44_fb_vram_init(struct nouveau_fb *pfb) -{ - u32 pfb474 = nv_rd32(pfb, 0x100474); - if (pfb474 & 0x00000004) - pfb->ram.type = NV_MEM_TYPE_GDDR3; - if (pfb474 & 0x00000002) - pfb->ram.type = NV_MEM_TYPE_DDR2; - if (pfb474 & 0x00000001) - pfb->ram.type = NV_MEM_TYPE_DDR1; - - pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; - return 0; -} - -static void -nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, - u32 flags, struct nouveau_fb_tile *tile) -{ - tile->addr = 0x00000001; /* mode = vram */ - tile->addr |= addr; - tile->limit = max(1u, addr + size) - 1; - tile->pitch = pitch; -} - -void -nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) -{ - nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit); - nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch); - nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr); - nv_rd32(pfb, 0x100600 + (i * 0x10)); -} - -int -nv44_fb_init(struct nouveau_object *object) -{ - struct nv44_fb_priv *priv = (void *)object; - int ret; - - ret = nouveau_fb_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x100850, 0x80000000); - nv_wr32(priv, 0x100800, 0x00000001); - return 0; -} - -static int -nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv44_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv44_fb_vram_init; - priv->base.tile.regions = 12; - priv->base.tile.init = nv44_fb_tile_init; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv44_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - - -struct nouveau_oclass -nv44_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x44), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv44_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv44_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c deleted file mode 100644 index 589b93ea2994..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv46_fb_priv { - struct nouveau_fb base; -}; - -void -nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, - u32 flags, struct nouveau_fb_tile *tile) -{ - /* for performance, select alternate bank offset for zeta */ - if (!(flags & 4)) tile->addr = (0 << 3); - else tile->addr = (1 << 3); - - tile->addr |= 0x00000001; /* mode = vram */ - tile->addr |= addr; - tile->limit = max(1u, addr + size) - 1; - tile->pitch = pitch; -} - -static int -nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv46_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv44_fb_vram_init; - priv->base.tile.regions = 15; - priv->base.tile.init = nv46_fb_tile_init; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv44_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - - -struct nouveau_oclass -nv46_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x46), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv46_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv44_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c deleted file mode 100644 index 818bba35b368..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv47_fb_priv { - struct nouveau_fb base; -}; - -static int -nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv47_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv41_fb_vram_init; - priv->base.tile.regions = 15; - priv->base.tile.init = nv30_fb_tile_init; - priv->base.tile.comp = nv40_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv41_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - - -struct nouveau_oclass -nv47_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x47), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv47_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv41_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c deleted file mode 100644 index 84a31af16ab4..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv49_fb_priv { - struct nouveau_fb base; -}; - -static int -nv49_fb_vram_init(struct nouveau_fb *pfb) -{ - u32 pfb914 = nv_rd32(pfb, 0x100914); - - switch (pfb914 & 0x00000003) { - case 0x00000000: pfb->ram.type = NV_MEM_TYPE_DDR1; break; - case 0x00000001: pfb->ram.type = NV_MEM_TYPE_DDR2; break; - case 0x00000002: pfb->ram.type = NV_MEM_TYPE_GDDR3; break; - case 0x00000003: break; - } - - pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; - pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; - return nv_rd32(pfb, 0x100320); -} - -static int -nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv49_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv49_fb_vram_init; - priv->base.tile.regions = 15; - priv->base.tile.init = nv30_fb_tile_init; - priv->base.tile.comp = nv40_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv41_fb_tile_prog; - - return nouveau_fb_preinit(&priv->base); -} - - -struct nouveau_oclass -nv49_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x49), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv49_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv41_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c deleted file mode 100644 index 797fd558170b..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv4e_fb_priv { - struct nouveau_fb base; -}; - -static int -nv4e_fb_vram_init(struct nouveau_fb *pfb) -{ - pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; - pfb->ram.type = NV_MEM_TYPE_STOLEN; - return 0; -} - -static int -nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv4e_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv4e_fb_vram_init; - priv->base.tile.regions = 12; - priv->base.tile.init = nv46_fb_tile_init; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv44_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - -struct nouveau_oclass -nv4e_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x4e), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv4e_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv44_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c index 487cb8c6c204..5f570806143a 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c @@ -51,101 +51,6 @@ nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype) return types[(memtype & 0xff00) >> 8] != 0; } -static u32 -nv50_fb_vram_rblock(struct nouveau_fb *pfb) -{ - int i, parts, colbits, rowbitsa, rowbitsb, banks; - u64 rowsize, predicted; - u32 r0, r4, rt, ru, rblock_size; - - r0 = nv_rd32(pfb, 0x100200); - r4 = nv_rd32(pfb, 0x100204); - rt = nv_rd32(pfb, 0x100250); - ru = nv_rd32(pfb, 0x001540); - nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); - - for (i = 0, parts = 0; i < 8; i++) { - if (ru & (0x00010000 << i)) - parts++; - } - - colbits = (r4 & 0x0000f000) >> 12; - rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; - rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; - banks = 1 << (((r4 & 0x03000000) >> 24) + 2); - - rowsize = parts * banks * (1 << colbits) * 8; - predicted = rowsize << rowbitsa; - if (r0 & 0x00000004) - predicted += rowsize << rowbitsb; - - if (predicted != pfb->ram.size) { - nv_warn(pfb, "memory controller reports %d MiB VRAM\n", - (u32)(pfb->ram.size >> 20)); - } - - rblock_size = rowsize; - if (rt & 1) - rblock_size *= 3; - - nv_debug(pfb, "rblock %d bytes\n", rblock_size); - return rblock_size; -} - -static int -nv50_fb_vram_init(struct nouveau_fb *pfb) -{ - struct nouveau_device *device = nv_device(pfb); - struct nouveau_bios *bios = nouveau_bios(device); - const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ - const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ - u32 size; - int ret; - - pfb->ram.size = nv_rd32(pfb, 0x10020c); - pfb->ram.size = (pfb->ram.size & 0xffffff00) | - ((pfb->ram.size & 0x000000ff) << 32); - - size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail; - switch (device->chipset) { - case 0xaa: - case 0xac: - case 0xaf: /* IGPs, no reordering, no real VRAM */ - ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1); - if (ret) - return ret; - - pfb->ram.type = NV_MEM_TYPE_STOLEN; - pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; - break; - default: - switch (nv_rd32(pfb, 0x100714) & 0x00000007) { - case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break; - case 1: - if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3) - pfb->ram.type = NV_MEM_TYPE_DDR3; - else - pfb->ram.type = NV_MEM_TYPE_DDR2; - break; - case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break; - case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break; - case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break; - default: - break; - } - - ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, - nv50_fb_vram_rblock(pfb) >> 12); - if (ret) - return ret; - - pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1; - break; - } - - return nv_rd32(pfb, 0x100320); -} - static int nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, u32 memtype, struct nouveau_mem **pmem) @@ -235,6 +140,195 @@ nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem) kfree(mem); } +static u32 +nv50_vram_rblock(struct nv50_fb_priv *priv) +{ + int i, parts, colbits, rowbitsa, rowbitsb, banks; + u64 rowsize, predicted; + u32 r0, r4, rt, ru, rblock_size; + + r0 = nv_rd32(priv, 0x100200); + r4 = nv_rd32(priv, 0x100204); + rt = nv_rd32(priv, 0x100250); + ru = nv_rd32(priv, 0x001540); + nv_debug(priv, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); + + for (i = 0, parts = 0; i < 8; i++) { + if (ru & (0x00010000 << i)) + parts++; + } + + colbits = (r4 & 0x0000f000) >> 12; + rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; + rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; + banks = 1 << (((r4 & 0x03000000) >> 24) + 2); + + rowsize = parts * banks * (1 << colbits) * 8; + predicted = rowsize << rowbitsa; + if (r0 & 0x00000004) + predicted += rowsize << rowbitsb; + + if (predicted != priv->base.ram.size) { + nv_warn(priv, "memory controller reports %d MiB VRAM\n", + (u32)(priv->base.ram.size >> 20)); + } + + rblock_size = rowsize; + if (rt & 1) + rblock_size *= 3; + + nv_debug(priv, "rblock %d bytes\n", rblock_size); + return rblock_size; +} + +static int +nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nouveau_bios *bios = nouveau_bios(device); + const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ + const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ + struct nv50_fb_priv *priv; + u32 tags; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + switch (nv_rd32(priv, 0x100714) & 0x00000007) { + case 0: priv->base.ram.type = NV_MEM_TYPE_DDR1; break; + case 1: + if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3) + priv->base.ram.type = NV_MEM_TYPE_DDR3; + else + priv->base.ram.type = NV_MEM_TYPE_DDR2; + break; + case 2: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break; + case 3: priv->base.ram.type = NV_MEM_TYPE_GDDR4; break; + case 4: priv->base.ram.type = NV_MEM_TYPE_GDDR5; break; + default: + break; + } + + priv->base.ram.size = nv_rd32(priv, 0x10020c); + priv->base.ram.size = (priv->base.ram.size & 0xffffff00) | + ((priv->base.ram.size & 0x000000ff) << 32); + + tags = nv_rd32(priv, 0x100320); + ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1); + if (ret) + return ret; + + nv_debug(priv, "%d compression tags\n", tags); + + size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail; + switch (device->chipset) { + case 0xaa: + case 0xac: + case 0xaf: /* IGPs, no reordering, no real VRAM */ + ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 1); + if (ret) + return ret; + + priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12; + priv->base.ram.type = NV_MEM_TYPE_STOLEN; + break; + default: + ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, + nv50_vram_rblock(priv) >> 12); + if (ret) + return ret; + + priv->base.ram.ranks = (nv_rd32(priv, 0x100200) & 0x4) ? 2 : 1; + break; + } + + priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (priv->r100c08_page) { + priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page, + 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(device->pdev, priv->r100c08)) + nv_warn(priv, "failed 0x100c08 page map\n"); + } else { + nv_warn(priv, "failed 0x100c08 page alloc\n"); + } + + priv->base.memtype_valid = nv50_fb_memtype_valid; + priv->base.ram.get = nv50_fb_vram_new; + priv->base.ram.put = nv50_fb_vram_del; + return nouveau_fb_created(&priv->base); +} + +static void +nv50_fb_dtor(struct nouveau_object *object) +{ + struct nouveau_device *device = nv_device(object); + struct nv50_fb_priv *priv = (void *)object; + + if (priv->r100c08_page) { + pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + __free_page(priv->r100c08_page); + } + + nouveau_fb_destroy(&priv->base); +} + +static int +nv50_fb_init(struct nouveau_object *object) +{ + struct nouveau_device *device = nv_device(object); + struct nv50_fb_priv *priv = (void *)object; + int ret; + + ret = nouveau_fb_init(&priv->base); + if (ret) + return ret; + + /* Not a clue what this is exactly. Without pointing it at a + * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) + * cause IOMMU "read from address 0" errors (rh#561267) + */ + nv_wr32(priv, 0x100c08, priv->r100c08 >> 8); + + /* This is needed to get meaningful information from 100c90 + * on traps. No idea what these values mean exactly. */ + switch (device->chipset) { + case 0x50: + nv_wr32(priv, 0x100c90, 0x000707ff); + break; + case 0xa3: + case 0xa5: + case 0xa8: + nv_wr32(priv, 0x100c90, 0x000d0fff); + break; + case 0xaf: + nv_wr32(priv, 0x100c90, 0x089d1fff); + break; + default: + nv_wr32(priv, 0x100c90, 0x001d07ff); + break; + } + + return 0; +} + +struct nouveau_oclass +nv50_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_fb_ctor, + .dtor = nv50_fb_dtor, + .init = nv50_fb_init, + .fini = _nouveau_fb_fini, + }, +}; + static const struct nouveau_enum vm_dispatch_subclients[] = { { 0x00000000, "GRCTX", NULL }, { 0x00000001, "NOTIFY", NULL }, @@ -330,11 +424,11 @@ static const struct nouveau_enum vm_fault[] = { {} }; -static void -nv50_fb_intr(struct nouveau_subdev *subdev) +void +nv50_fb_trap(struct nouveau_fb *pfb, int display) { - struct nouveau_device *device = nv_device(subdev); - struct nv50_fb_priv *priv = (void *)subdev; + struct nouveau_device *device = nv_device(pfb); + struct nv50_fb_priv *priv = (void *)pfb; const struct nouveau_enum *en, *cl; u32 trap[6], idx, chan; u8 st0, st1, st2, st3; @@ -351,6 +445,9 @@ nv50_fb_intr(struct nouveau_subdev *subdev) } nv_wr32(priv, 0x100c90, idx | 0x80000000); + if (!display) + return; + /* decode status bits into something more useful */ if (device->chipset < 0xa3 || device->chipset == 0xaa || device->chipset == 0xac) { @@ -397,101 +494,3 @@ nv50_fb_intr(struct nouveau_subdev *subdev) else printk("0x%08x\n", st1); } - -static int -nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nouveau_device *device = nv_device(parent); - struct nv50_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (priv->r100c08_page) { - priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page, - 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(device->pdev, priv->r100c08)) - nv_warn(priv, "failed 0x100c08 page map\n"); - } else { - nv_warn(priv, "failed 0x100c08 page alloc\n"); - } - - priv->base.memtype_valid = nv50_fb_memtype_valid; - priv->base.ram.init = nv50_fb_vram_init; - priv->base.ram.get = nv50_fb_vram_new; - priv->base.ram.put = nv50_fb_vram_del; - nv_subdev(priv)->intr = nv50_fb_intr; - return nouveau_fb_preinit(&priv->base); -} - -static void -nv50_fb_dtor(struct nouveau_object *object) -{ - struct nouveau_device *device = nv_device(object); - struct nv50_fb_priv *priv = (void *)object; - - if (priv->r100c08_page) { - pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - __free_page(priv->r100c08_page); - } - - nouveau_fb_destroy(&priv->base); -} - -static int -nv50_fb_init(struct nouveau_object *object) -{ - struct nouveau_device *device = nv_device(object); - struct nv50_fb_priv *priv = (void *)object; - int ret; - - ret = nouveau_fb_init(&priv->base); - if (ret) - return ret; - - /* Not a clue what this is exactly. Without pointing it at a - * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) - * cause IOMMU "read from address 0" errors (rh#561267) - */ - nv_wr32(priv, 0x100c08, priv->r100c08 >> 8); - - /* This is needed to get meaningful information from 100c90 - * on traps. No idea what these values mean exactly. */ - switch (device->chipset) { - case 0x50: - nv_wr32(priv, 0x100c90, 0x000707ff); - break; - case 0xa3: - case 0xa5: - case 0xa8: - nv_wr32(priv, 0x100c90, 0x000d0fff); - break; - case 0xaf: - nv_wr32(priv, 0x100c90, 0x089d1fff); - break; - default: - nv_wr32(priv, 0x100c90, 0x001d07ff); - break; - } - - return 0; -} - -struct nouveau_oclass -nv50_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x50), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv50_fb_ctor, - .dtor = nv50_fb_dtor, - .init = nv50_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c index 306bdf121452..9f59f2bf0079 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c @@ -61,65 +61,6 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags) return likely((types[memtype] == 1)); } -static int -nvc0_fb_vram_init(struct nouveau_fb *pfb) -{ - struct nouveau_bios *bios = nouveau_bios(pfb); - const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ - const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ - u32 parts = nv_rd32(pfb, 0x022438); - u32 pmask = nv_rd32(pfb, 0x022554); - u32 bsize = nv_rd32(pfb, 0x10f20c); - u32 offset, length; - bool uniform = true; - int ret, part; - - nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800)); - nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask); - - pfb->ram.type = nouveau_fb_bios_memtype(bios); - pfb->ram.ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1; - - /* read amount of vram attached to each memory controller */ - for (part = 0; part < parts; part++) { - if (!(pmask & (1 << part))) { - u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000)); - if (psize != bsize) { - if (psize < bsize) - bsize = psize; - uniform = false; - } - - nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize); - pfb->ram.size += (u64)psize << 20; - } - } - - /* if all controllers have the same amount attached, there's no holes */ - if (uniform) { - offset = rsvd_head; - length = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail; - return nouveau_mm_init(&pfb->vram, offset, length, 1); - } - - /* otherwise, address lowest common amount from 0GiB */ - ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1); - if (ret) - return ret; - - /* and the rest starting from (8GiB + common_size) */ - offset = (0x0200000000ULL >> 12) + (bsize << 8); - length = (pfb->ram.size >> 12) - (bsize << 8) - rsvd_tail; - - ret = nouveau_mm_init(&pfb->vram, offset, length, 0); - if (ret) { - nouveau_mm_fini(&pfb->vram); - return ret; - } - - return 0; -} - static int nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, u32 memtype, struct nouveau_mem **pmem) @@ -197,6 +138,66 @@ nvc0_fb_dtor(struct nouveau_object *object) nouveau_fb_destroy(&priv->base); } +static int +nvc0_vram_detect(struct nvc0_fb_priv *priv) +{ + struct nouveau_bios *bios = nouveau_bios(priv); + struct nouveau_fb *pfb = &priv->base; + const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ + const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ + u32 parts = nv_rd32(priv, 0x022438); + u32 pmask = nv_rd32(priv, 0x022554); + u32 bsize = nv_rd32(priv, 0x10f20c); + u32 offset, length; + bool uniform = true; + int ret, part; + + nv_debug(priv, "0x100800: 0x%08x\n", nv_rd32(priv, 0x100800)); + nv_debug(priv, "parts 0x%08x mask 0x%08x\n", parts, pmask); + + priv->base.ram.type = nouveau_fb_bios_memtype(bios); + priv->base.ram.ranks = (nv_rd32(priv, 0x10f200) & 0x00000004) ? 2 : 1; + + /* read amount of vram attached to each memory controller */ + for (part = 0; part < parts; part++) { + if (!(pmask & (1 << part))) { + u32 psize = nv_rd32(priv, 0x11020c + (part * 0x1000)); + if (psize != bsize) { + if (psize < bsize) + bsize = psize; + uniform = false; + } + + nv_debug(priv, "%d: mem_amount 0x%08x\n", part, psize); + priv->base.ram.size += (u64)psize << 20; + } + } + + /* if all controllers have the same amount attached, there's no holes */ + if (uniform) { + offset = rsvd_head; + length = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail; + return nouveau_mm_init(&pfb->vram, offset, length, 1); + } + + /* otherwise, address lowest common amount from 0GiB */ + ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1); + if (ret) + return ret; + + /* and the rest starting from (8GiB + common_size) */ + offset = (0x0200000000ULL >> 12) + (bsize << 8); + length = (priv->base.ram.size >> 12) - (bsize << 8) - rsvd_tail; + + ret = nouveau_mm_init(&pfb->vram, offset, length, 0); + if (ret) { + nouveau_mm_fini(&pfb->vram); + return ret; + } + + return 0; +} + static int nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -212,10 +213,13 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return ret; priv->base.memtype_valid = nvc0_fb_memtype_valid; - priv->base.ram.init = nvc0_fb_vram_init; priv->base.ram.get = nvc0_fb_vram_new; priv->base.ram.put = nv50_fb_vram_del; + ret = nvc0_vram_detect(priv); + if (ret) + return ret; + priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!priv->r100c10_page) return -ENOMEM; @@ -225,7 +229,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (pci_dma_mapping_error(device->pdev, priv->r100c10)) return -EFAULT; - return nouveau_fb_preinit(&priv->base); + return nouveau_fb_created(&priv->base); } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c index dc27e794a851..fe1ebf199ba9 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c @@ -50,7 +50,7 @@ auxch_init(struct nouveau_i2c *aux, int ch) ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50)); udelay(1); if (!timeout--) { - AUX_ERR("begin idle timeout 0x%08x\n", ctrl); + AUX_ERR("begin idle timeout 0x%08x", ctrl); return -EBUSY; } } while (ctrl & 0x03010000); diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c index f5bbd3834116..ba4d28b50368 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c @@ -63,14 +63,14 @@ nv04_instobj_dtor(struct nouveau_object *object) } static u32 -nv04_instobj_rd32(struct nouveau_object *object, u64 addr) +nv04_instobj_rd32(struct nouveau_object *object, u32 addr) { struct nv04_instobj_priv *node = (void *)object; return nv_ro32(object->engine, node->mem->offset + addr); } static void -nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data) +nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data) { struct nv04_instobj_priv *node = (void *)object; nv_wo32(object->engine, node->mem->offset + addr, data); @@ -173,13 +173,13 @@ nv04_instmem_dtor(struct nouveau_object *object) } static u32 -nv04_instmem_rd32(struct nouveau_object *object, u64 addr) +nv04_instmem_rd32(struct nouveau_object *object, u32 addr) { return nv_rd32(object, 0x700000 + addr); } static void -nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data) +nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data) { return nv_wr32(object, 0x700000 + addr, data); } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c index da64253201ef..73c52ebd5932 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c @@ -111,14 +111,14 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, } static u32 -nv40_instmem_rd32(struct nouveau_object *object, u64 addr) +nv40_instmem_rd32(struct nouveau_object *object, u32 addr) { struct nv04_instmem_priv *priv = (void *)object; return ioread32_native(priv->iomem + addr); } static void -nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data) +nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data) { struct nv04_instmem_priv *priv = (void *)object; iowrite32_native(data, priv->iomem + addr); diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c index cfc7e31461de..27ef0891d10b 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c @@ -76,7 +76,7 @@ nv50_instobj_dtor(struct nouveau_object *object) } static u32 -nv50_instobj_rd32(struct nouveau_object *object, u64 offset) +nv50_instobj_rd32(struct nouveau_object *object, u32 offset) { struct nv50_instmem_priv *priv = (void *)object->engine; struct nv50_instobj_priv *node = (void *)object; @@ -96,7 +96,7 @@ nv50_instobj_rd32(struct nouveau_object *object, u64 offset) } static void -nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data) +nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data) { struct nv50_instmem_priv *priv = (void *)object->engine; struct nv50_instobj_priv *node = (void *)object; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/base.c index 8379aafa6e1b..de5721cfc4c2 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/base.c @@ -30,20 +30,20 @@ nouveau_mc_intr(struct nouveau_subdev *subdev) struct nouveau_mc *pmc = nouveau_mc(subdev); const struct nouveau_mc_intr *map = pmc->intr_map; struct nouveau_subdev *unit; - u32 stat, intr; + u32 stat; - intr = stat = nv_rd32(pmc, 0x000100); + stat = nv_rd32(pmc, 0x000100); while (stat && map->stat) { if (stat & map->stat) { unit = nouveau_subdev(subdev, map->unit); if (unit && unit->intr) unit->intr(unit); - intr &= ~map->stat; + stat &= ~map->stat; } map++; } - if (intr) { + if (stat) { nv_error(pmc, "unknown intr 0x%08x\n", stat); } } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c index 8d759f830323..cedf33b02977 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c @@ -39,7 +39,6 @@ nv50_mc_intr[] = { { 0x00200000, NVDEV_SUBDEV_GPIO }, { 0x04000000, NVDEV_ENGINE_DISP }, { 0x80000000, NVDEV_ENGINE_SW }, - { 0x0000d101, NVDEV_SUBDEV_FB }, {}, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c index ceb5c83f9459..a001e4c4d38d 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c @@ -40,7 +40,6 @@ nv98_mc_intr[] = { { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */ { 0x04000000, NVDEV_ENGINE_DISP }, { 0x80000000, NVDEV_ENGINE_SW }, - { 0x0040d101, NVDEV_SUBDEV_FB }, {}, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c index 92796682722d..c2b81e30a17d 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c @@ -36,7 +36,6 @@ nvc0_mc_intr[] = { { 0x00000100, NVDEV_ENGINE_FIFO }, { 0x00001000, NVDEV_ENGINE_GR }, { 0x00008000, NVDEV_ENGINE_BSP }, - { 0x00020000, NVDEV_ENGINE_VP }, { 0x00100000, NVDEV_SUBDEV_TIMER }, { 0x00200000, NVDEV_SUBDEV_GPIO }, { 0x02000000, NVDEV_SUBDEV_LTCG }, diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c index 49050d991e75..9474cfca6e4c 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c @@ -67,7 +67,7 @@ nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) static void nv41_vm_flush(struct nouveau_vm *vm) { - struct nv04_vm_priv *priv = (void *)vm->vmm; + struct nv04_vmmgr_priv *priv = (void *)vm->vmm; mutex_lock(&nv_subdev(priv)->mutex); nv_wr32(priv, 0x100810, 0x00000022); diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c b/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c index d48c02a36527..cbf1fc60a386 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -241,27 +241,19 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) if (unlikely(!abi16)) return -ENOMEM; - client = nv_client(abi16->client); - device = nv_device(abi16->device); - imem = nouveau_instmem(device); - pfb = nouveau_fb(device); - /* hack to allow channel engine type specification on kepler */ - if (device->card_type >= NV_E0) { - if (init->fb_ctxdma_handle != ~0) - init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR; - else - init->fb_ctxdma_handle = init->tt_ctxdma_handle; + if (!drm->channel) + return nouveau_abi16_put(abi16, -ENODEV); - /* allow flips to be executed if this is a graphics channel */ - init->tt_ctxdma_handle = 0; - if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR) - init->tt_ctxdma_handle = 1; - } + client = nv_client(abi16->client); if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) return nouveau_abi16_put(abi16, -EINVAL); + device = nv_device(abi16->device); + imem = nouveau_instmem(device); + pfb = nouveau_fb(device); + /* allocate "abi16 channel" data and make up a handle for it */ init->channel = ffsll(~abi16->handles); if (!init->channel--) @@ -276,6 +268,11 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) abi16->handles |= (1 << init->channel); /* create channel object and initialise dma and fence management */ + if (device->card_type >= NV_E0) { + init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR; + init->tt_ctxdma_handle = 0; + } + ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN | init->channel, init->fb_ctxdma_handle, init->tt_ctxdma_handle, &chan->chan); @@ -385,7 +382,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) struct nouveau_abi16_chan *chan, *temp; struct nouveau_abi16_ntfy *ntfy; struct nouveau_object *object; - struct nv_dma_class args = {}; + struct nv_dma_class args; int ret; if (unlikely(!abi16)) diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c b/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c index d97f20069d3e..48783e14114c 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -35,14 +35,6 @@ static struct nouveau_dsm_priv { acpi_handle rom_handle; } nouveau_dsm_priv; -bool nouveau_is_optimus(void) { - return nouveau_dsm_priv.optimus_detected; -} - -bool nouveau_is_v1_dsm(void) { - return nouveau_dsm_priv.dsm_detected; -} - #define NOUVEAU_DSM_HAS_MUX 0x1 #define NOUVEAU_DSM_HAS_OPT 0x2 @@ -191,7 +183,9 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) { - if (!nouveau_dsm_priv.dsm_detected) + /* perhaps the _DSM functions are mutually exclusive, but prepare for + * the future */ + if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected) return 0; if (id == VGA_SWITCHEROO_IGD) return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); @@ -207,7 +201,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, /* Optimus laptops have the card already disabled in * nouveau_switcheroo_set_state */ - if (!nouveau_dsm_priv.dsm_detected) + if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected) return 0; return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); @@ -289,24 +283,24 @@ static bool nouveau_dsm_detect(void) has_optimus = 1; } - /* find the optimus DSM or the old v1 DSM */ - if (has_optimus == 1) { + if (vga_count == 2 && has_dsm && guid_valid) { acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); - printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", + printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", acpi_method_name); - nouveau_dsm_priv.optimus_detected = true; + nouveau_dsm_priv.dsm_detected = true; ret = true; - } else if (vga_count == 2 && has_dsm && guid_valid) { + } + + if (has_optimus == 1) { acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); - printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", + printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", acpi_method_name); - nouveau_dsm_priv.dsm_detected = true; + nouveau_dsm_priv.optimus_detected = true; ret = true; } - return ret; } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.h b/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.h index d0da230d7706..08af67722b57 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.h @@ -4,8 +4,6 @@ #define ROM_BIOS_PAGE 4096 #if defined(CONFIG_ACPI) -bool nouveau_is_optimus(void); -bool nouveau_is_v1_dsm(void); void nouveau_register_dsm_handler(void); void nouveau_unregister_dsm_handler(void); void nouveau_switcheroo_optimus_dsm(void); @@ -13,8 +11,6 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); bool nouveau_acpi_rom_supported(struct pci_dev *pdev); void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *); #else -static inline bool nouveau_is_optimus(void) { return false; }; -static inline bool nouveau_is_v1_dsm(void) { return false; }; static inline void nouveau_register_dsm_handler(void) {} static inline void nouveau_unregister_dsm_handler(void) {} static inline void nouveau_switcheroo_optimus_dsm(void) {} diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c index 865eddfa30a7..09fdef235882 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -624,6 +624,206 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b return 0; } +/* BIT 'U'/'d' table encoder subtables have hashes matching them to + * a particular set of encoders. + * + * This function returns true if a particular DCB entry matches. + */ +bool +bios_encoder_match(struct dcb_output *dcb, u32 hash) +{ + if ((hash & 0x000000f0) != (dcb->location << 4)) + return false; + if ((hash & 0x0000000f) != dcb->type) + return false; + if (!(hash & (dcb->or << 16))) + return false; + + switch (dcb->type) { + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_LVDS: + case DCB_OUTPUT_DP: + if (hash & 0x00c00000) { + if (!(hash & (dcb->sorconf.link << 22))) + return false; + } + default: + return true; + } +} + +int +nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk, + struct dcb_output *dcbent, int crtc) +{ + /* + * The display script table is located by the BIT 'U' table. + * + * It contains an array of pointers to various tables describing + * a particular output type. The first 32-bits of the output + * tables contains similar information to a DCB entry, and is + * used to decide whether that particular table is suitable for + * the output you want to access. + * + * The "record header length" field here seems to indicate the + * offset of the first configuration entry in the output tables. + * This is 10 on most cards I've seen, but 12 has been witnessed + * on DP cards, and there's another script pointer within the + * header. + * + * offset + 0 ( 8 bits): version + * offset + 1 ( 8 bits): header length + * offset + 2 ( 8 bits): record length + * offset + 3 ( 8 bits): number of records + * offset + 4 ( 8 bits): record header length + * offset + 5 (16 bits): pointer to first output script table + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvbios *bios = &drm->vbios; + uint8_t *table = &bios->data[bios->display.script_table_ptr]; + uint8_t *otable = NULL; + uint16_t script; + int i; + + if (!bios->display.script_table_ptr) { + NV_ERROR(drm, "No pointer to output script table\n"); + return 1; + } + + /* + * Nothing useful has been in any of the pre-2.0 tables I've seen, + * so until they are, we really don't need to care. + */ + if (table[0] < 0x20) + return 1; + + if (table[0] != 0x20 && table[0] != 0x21) { + NV_ERROR(drm, "Output script table version 0x%02x unknown\n", + table[0]); + return 1; + } + + /* + * The output script tables describing a particular output type + * look as follows: + * + * offset + 0 (32 bits): output this table matches (hash of DCB) + * offset + 4 ( 8 bits): unknown + * offset + 5 ( 8 bits): number of configurations + * offset + 6 (16 bits): pointer to some script + * offset + 8 (16 bits): pointer to some script + * + * headerlen == 10 + * offset + 10 : configuration 0 + * + * headerlen == 12 + * offset + 10 : pointer to some script + * offset + 12 : configuration 0 + * + * Each config entry is as follows: + * + * offset + 0 (16 bits): unknown, assumed to be a match value + * offset + 2 (16 bits): pointer to script table (clock set?) + * offset + 4 (16 bits): pointer to script table (reset?) + * + * There doesn't appear to be a count value to say how many + * entries exist in each script table, instead, a 0 value in + * the first 16-bit word seems to indicate both the end of the + * list and the default entry. The second 16-bit word in the + * script tables is a pointer to the script to execute. + */ + + NV_DEBUG(drm, "Searching for output entry for %d %d %d\n", + dcbent->type, dcbent->location, dcbent->or); + for (i = 0; i < table[3]; i++) { + otable = ROMPTR(dev, table[table[1] + (i * table[2])]); + if (otable && bios_encoder_match(dcbent, ROM32(otable[0]))) + break; + } + + if (!otable) { + NV_DEBUG(drm, "failed to match any output table\n"); + return 1; + } + + if (pclk < -2 || pclk > 0) { + /* Try to find matching script table entry */ + for (i = 0; i < otable[5]; i++) { + if (ROM16(otable[table[4] + i*6]) == type) + break; + } + + if (i == otable[5]) { + NV_ERROR(drm, "Table 0x%04x not found for %d/%d, " + "using first\n", + type, dcbent->type, dcbent->or); + i = 0; + } + } + + if (pclk == 0) { + script = ROM16(otable[6]); + if (!script) { + NV_DEBUG(drm, "output script 0 not found\n"); + return 1; + } + + NV_DEBUG(drm, "0x%04X: parsing output script 0\n", script); + nouveau_bios_run_init_table(dev, script, dcbent, crtc); + } else + if (pclk == -1) { + script = ROM16(otable[8]); + if (!script) { + NV_DEBUG(drm, "output script 1 not found\n"); + return 1; + } + + NV_DEBUG(drm, "0x%04X: parsing output script 1\n", script); + nouveau_bios_run_init_table(dev, script, dcbent, crtc); + } else + if (pclk == -2) { + if (table[4] >= 12) + script = ROM16(otable[10]); + else + script = 0; + if (!script) { + NV_DEBUG(drm, "output script 2 not found\n"); + return 1; + } + + NV_DEBUG(drm, "0x%04X: parsing output script 2\n", script); + nouveau_bios_run_init_table(dev, script, dcbent, crtc); + } else + if (pclk > 0) { + script = ROM16(otable[table[4] + i*6 + 2]); + if (script) + script = clkcmptable(bios, script, pclk); + if (!script) { + NV_DEBUG(drm, "clock script 0 not found\n"); + return 1; + } + + NV_DEBUG(drm, "0x%04X: parsing clock script 0\n", script); + nouveau_bios_run_init_table(dev, script, dcbent, crtc); + } else + if (pclk < 0) { + script = ROM16(otable[table[4] + i*6 + 4]); + if (script) + script = clkcmptable(bios, script, -pclk); + if (!script) { + NV_DEBUG(drm, "clock script 1 not found\n"); + return 1; + } + + NV_DEBUG(drm, "0x%04X: parsing clock script 1\n", script); + nouveau_bios_run_init_table(dev, script, dcbent, crtc); + } + + return 0; +} + + int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk) { /* @@ -1012,6 +1212,31 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, return 0; } +static int +parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios, + struct bit_entry *bitentry) +{ + /* + * Parses the pointer to the G80 output script tables + * + * Starting at bitentry->offset: + * + * offset + 0 (16 bits): output script table pointer + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + uint16_t outputscripttableptr; + + if (bitentry->length != 3) { + NV_ERROR(drm, "Do not understand BIT U table\n"); + return -EINVAL; + } + + outputscripttableptr = ROM16(bios->data[bitentry->offset]); + bios->display.script_table_ptr = outputscripttableptr; + return 0; +} + struct bit_table { const char id; int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); @@ -1088,6 +1313,7 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset) parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */ parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds)); parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds)); + parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U)); return 0; } @@ -2098,7 +2324,7 @@ nouveau_run_vbios_init(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; - int ret = 0; + int i, ret = 0; /* Reset the BIOS head to 0. */ bios->state.crtchead = 0; @@ -2111,6 +2337,13 @@ nouveau_run_vbios_init(struct drm_device *dev) bios->fp.lvds_init_run = false; } + if (nv_device(drm->device)->card_type >= NV_50) { + for (i = 0; bios->execute && i < bios->dcb.entries; i++) { + nouveau_bios_run_display_table(dev, 0, 0, + &bios->dcb.entry[i], -1); + } + } + return ret; } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.h b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.h index f68c54ca422f..3befbb821a56 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -127,6 +127,12 @@ struct nvbios { int crtchead; } state; + struct { + struct dcb_output *output; + int crtc; + uint16_t script_table_ptr; + } display; + struct { uint16_t fptablepointer; /* also used by tmds */ uint16_t fpxlatetableptr; @@ -179,6 +185,8 @@ void nouveau_bios_takedown(struct drm_device *dev); int nouveau_run_vbios_init(struct drm_device *); struct dcb_connector_table_entry * nouveau_bios_connector_entry(struct drm_device *, int index); +int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk, + struct dcb_output *, int crtc); bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *); uint8_t *nouveau_bios_embedded_edid(struct drm_device *); int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk, @@ -187,5 +195,6 @@ int run_tmds_table(struct drm_device *, struct dcb_output *, int head, int pxclk); int call_lvds_script(struct drm_device *, struct dcb_output *, int head, enum LVDS_script, int pxclk); +bool bios_encoder_match(struct dcb_output *, u32 hash); #endif diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c b/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c index 4c950b4cf416..35ac57f0aab6 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -225,7 +225,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, type, &nvbo->placement, - align >> PAGE_SHIFT, false, NULL, acc_size, sg, + align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg, nouveau_bo_del_ttm); if (ret) { /* ttm will call nouveau_bo_del_ttm if it fails.. */ @@ -566,7 +566,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, if (ret) return ret; - ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict, + ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, no_wait_reserve, no_wait_gpu, new_mem); nouveau_fence_unref(&fence); return ret; @@ -1472,19 +1472,19 @@ nouveau_bo_fence_ref(void *sync_obj) } static bool -nouveau_bo_fence_signalled(void *sync_obj) +nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg) { return nouveau_fence_done(sync_obj); } static int -nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr) +nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) { return nouveau_fence_wait(sync_obj, lazy, intr); } static int -nouveau_bo_fence_flush(void *sync_obj) +nouveau_bo_fence_flush(void *sync_obj, void *sync_arg) { return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_chan.c b/trunk/drivers/gpu/drm/nouveau/nouveau_chan.c index 174300b6a02e..c1d7301c0e9c 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -76,8 +76,6 @@ nouveau_channel_del(struct nouveau_channel **pchan) nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle); nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); nouveau_bo_unmap(chan->push.buffer); - if (chan->push.buffer && chan->push.buffer->pin_refcnt) - nouveau_bo_unpin(chan->push.buffer); nouveau_bo_ref(NULL, &chan->push.buffer); kfree(chan); } @@ -269,7 +267,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) struct nouveau_fb *pfb = nouveau_fb(device); struct nouveau_software_chan *swch; struct nouveau_object *object; - struct nv_dma_class args = {}; + struct nv_dma_class args; int ret, i; /* allocate dma objects to cover all allowed vram, and gart */ @@ -348,7 +346,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) /* allocate software object class (used for fences on <= nv05, and * to signal flip completion), bind it to a subchannel. */ - if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) { + if (chan != chan->drm->cechan) { ret = nouveau_object_new(nv_object(client), chan->handle, NvSw, nouveau_abi16_swclass(chan->drm), NULL, 0, &object); diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c b/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c index 29a913460fed..d3595b23434a 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -110,6 +110,7 @@ nouveau_connector_destroy(struct drm_connector *connector) dev = nv_connector->base.dev; drm = nouveau_drm(dev); gpio = nouveau_gpio(drm->device); + NV_DEBUG(drm, "\n"); if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) { gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff, @@ -220,7 +221,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector, } if (nv_connector->type == DCB_CONNECTOR_DVI_I) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, dev->mode_config.dvi_i_subconnector_property, nv_encoder->dcb->type == DCB_OUTPUT_TMDS ? DRM_MODE_SUBCONNECTOR_DVID : @@ -354,7 +355,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) * valid - it's not (rh#613284) */ if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) { - if (!(nv_connector->edid = nouveau_acpi_edid(dev, connector))) { + if ((nv_connector->edid = nouveau_acpi_edid(dev, connector))) { status = connector_status_connected; goto out; } @@ -928,6 +929,8 @@ nouveau_connector_create(struct drm_device *dev, int index) int type, ret = 0; bool dummy; + NV_DEBUG(drm, "\n"); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { nv_connector = nouveau_connector(connector); if (nv_connector->index == index) @@ -1040,7 +1043,7 @@ nouveau_connector_create(struct drm_device *dev, int index) /* Init DVI-I specific properties */ if (nv_connector->type == DCB_CONNECTOR_DVI_I) - drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0); + drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0); /* Add overscan compensation options to digital outputs */ if (disp->underscan_property && @@ -1048,31 +1051,31 @@ nouveau_connector_create(struct drm_device *dev, int index) type == DRM_MODE_CONNECTOR_DVII || type == DRM_MODE_CONNECTOR_HDMIA || type == DRM_MODE_CONNECTOR_DisplayPort)) { - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, disp->underscan_property, UNDERSCAN_OFF); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, disp->underscan_hborder_property, 0); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, disp->underscan_vborder_property, 0); } /* Add hue and saturation options */ if (disp->vibrant_hue_property) - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, disp->vibrant_hue_property, 90); if (disp->color_vibrance_property) - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, disp->color_vibrance_property, 150); switch (nv_connector->type) { case DCB_CONNECTOR_VGA: if (nv_device(drm->device)->card_type >= NV_50) { - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, nv_connector->scaling_mode); } @@ -1085,18 +1088,18 @@ nouveau_connector_create(struct drm_device *dev, int index) default: nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, nv_connector->scaling_mode); if (disp->dithering_mode) { nv_connector->dithering_mode = DITHERING_MODE_AUTO; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, disp->dithering_mode, nv_connector->dithering_mode); } if (disp->dithering_depth) { nv_connector->dithering_depth = DITHERING_DEPTH_AUTO; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, disp->dithering_depth, nv_connector->dithering_depth); } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_connector.h b/trunk/drivers/gpu/drm/nouveau/nouveau_connector.h index 20eb84cce9e6..ebdb87670a8f 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -28,7 +28,6 @@ #define __NOUVEAU_CONNECTOR_H__ #include -#include "nouveau_crtc.h" struct nouveau_i2c_port; @@ -81,21 +80,6 @@ static inline struct nouveau_connector *nouveau_connector( return container_of(con, struct nouveau_connector, base); } -static inline struct nouveau_connector * -nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) -{ - struct drm_device *dev = nv_crtc->base.dev; - struct drm_connector *connector; - struct drm_crtc *crtc = to_drm_crtc(nv_crtc); - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder && connector->encoder->crtc == crtc) - return nouveau_connector(connector); - } - - return NULL; -} - struct drm_connector * nouveau_connector_create(struct drm_device *, int index); diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_crtc.h b/trunk/drivers/gpu/drm/nouveau/nouveau_crtc.h index d1e5890784d7..e6d0d1eb0133 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_crtc.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_crtc.h @@ -82,6 +82,16 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc) return &crtc->base; } +int nv50_crtc_create(struct drm_device *dev, int index); +int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv, + uint32_t buffer_handle, uint32_t width, + uint32_t height); +int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y); + int nv04_cursor_init(struct nouveau_crtc *); +int nv50_cursor_init(struct nouveau_crtc *); + +struct nouveau_connector * +nouveau_crtc_connector_get(struct nouveau_crtc *crtc); #endif /* __NOUVEAU_CRTC_H__ */ diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_display.c b/trunk/drivers/gpu/drm/nouveau/nouveau_display.c index e4188f24fc75..86124b131f4f 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_display.c @@ -98,12 +98,12 @@ nouveau_framebuffer_init(struct drm_device *dev, nv_fb->r_dma = NvEvoVRAM_LP; switch (fb->depth) { - case 8: nv_fb->r_format = 0x1e00; break; - case 15: nv_fb->r_format = 0xe900; break; - case 16: nv_fb->r_format = 0xe800; break; + case 8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break; + case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break; + case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break; case 24: - case 32: nv_fb->r_format = 0xcf00; break; - case 30: nv_fb->r_format = 0xd100; break; + case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break; + case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break; default: NV_ERROR(drm, "unknown depth %d\n", fb->depth); return -EINVAL; @@ -324,7 +324,7 @@ nouveau_display_create(struct drm_device *dev) disp->underscan_vborder_property = drm_property_create_range(dev, 0, "underscan vborder", 0, 128); - if (gen >= 1) { + if (gen == 1) { disp->vibrant_hue_property = drm_property_create(dev, DRM_MODE_PROP_RANGE, "vibrant hue", 2); @@ -366,7 +366,10 @@ nouveau_display_create(struct drm_device *dev) if (nv_device(drm->device)->card_type < NV_50) ret = nv04_display_create(dev); else + if (nv_device(drm->device)->card_type < NV_D0) ret = nv50_display_create(dev); + else + ret = nvd0_display_create(dev); if (ret) goto disp_create_err; @@ -397,12 +400,11 @@ nouveau_display_destroy(struct drm_device *dev) nouveau_backlight_exit(dev); drm_vblank_cleanup(dev); - drm_kms_helper_poll_fini(dev); - drm_mode_config_cleanup(dev); - if (disp->dtor) disp->dtor(dev); + drm_kms_helper_poll_fini(dev); + drm_mode_config_cleanup(dev); nouveau_drm(dev)->display = NULL; kfree(disp); } @@ -657,7 +659,10 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, /* Emit a page flip */ if (nv_device(drm->device)->card_type >= NV_50) { - ret = nv50_display_flip_next(crtc, fb, chan, 0); + if (nv_device(drm->device)->card_type >= NV_D0) + ret = nvd0_display_flip_next(crtc, fb, chan, 0); + else + ret = nv50_display_flip_next(crtc, fb, chan); if (ret) { mutex_unlock(&chan->cli->mutex); goto fail_unreserve; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c b/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c index 59838651ee8f..978a108ba7a1 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -30,17 +30,60 @@ #include "nouveau_encoder.h" #include "nouveau_crtc.h" -#include - #include #include +u8 * +nouveau_dp_bios_data(struct drm_device *dev, struct dcb_output *dcb, u8 **entry) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct bit_entry d; + u8 *table; + int i; + + if (bit_table(dev, 'd', &d)) { + NV_ERROR(drm, "BIT 'd' table not found\n"); + return NULL; + } + + if (d.version != 1) { + NV_ERROR(drm, "BIT 'd' table version %d unknown\n", d.version); + return NULL; + } + + table = ROMPTR(dev, d.data[0]); + if (!table) { + NV_ERROR(drm, "displayport table pointer invalid\n"); + return NULL; + } + + switch (table[0]) { + case 0x20: + case 0x21: + case 0x30: + case 0x40: + break; + default: + NV_ERROR(drm, "displayport table 0x%02x unknown\n", table[0]); + return NULL; + } + + for (i = 0; i < table[3]; i++) { + *entry = ROMPTR(dev, table[table[1] + (i * table[2])]); + if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0]))) + return table; + } + + NV_ERROR(drm, "displayport encoder table not found\n"); + return NULL; +} + /****************************************************************************** * link training *****************************************************************************/ struct dp_state { struct nouveau_i2c_port *auxch; - struct nouveau_object *core; + struct dp_train_func *func; struct dcb_output *dcb; int crtc; u8 *dpcd; @@ -54,20 +97,13 @@ static void dp_set_link_config(struct drm_device *dev, struct dp_state *dp) { struct nouveau_drm *drm = nouveau_drm(dev); - struct dcb_output *dcb = dp->dcb; - const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); - const u32 moff = (dp->crtc << 3) | (link << 2) | or; u8 sink[2]; - u32 data; NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); /* set desired link configuration on the source */ - data = ((dp->link_bw / 27000) << 8) | dp->link_nr; - if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP) - data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH; - - nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data); + dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw, + dp->dpcd[2] & DP_ENHANCED_FRAME_CAP); /* inform the sink of the new configuration */ sink[0] = dp->link_bw / 27000; @@ -82,14 +118,11 @@ static void dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern) { struct nouveau_drm *drm = nouveau_drm(dev); - struct dcb_output *dcb = dp->dcb; - const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); - const u32 moff = (dp->crtc << 3) | (link << 2) | or; u8 sink_tp; NV_DEBUG(drm, "training pattern %d\n", pattern); - nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern); + dp->func->train_set(dev, dp->dcb, pattern); nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1); sink_tp &= ~DP_TRAINING_PATTERN_MASK; @@ -101,9 +134,6 @@ static int dp_link_train_commit(struct drm_device *dev, struct dp_state *dp) { struct nouveau_drm *drm = nouveau_drm(dev); - struct dcb_output *dcb = dp->dcb; - const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); - const u32 moff = (dp->crtc << 3) | (link << 2) | or; int i; for (i = 0; i < dp->link_nr; i++) { @@ -118,8 +148,7 @@ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp) dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]); - - nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre); + dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre); } return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4); @@ -205,32 +234,59 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp) } static void -dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread) +dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable) { - struct dcb_output *dcb = dp->dcb; - const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); - const u32 moff = (dp->crtc << 3) | (link << 2) | or; - - nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ? - NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON : - NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) | - NV94_DISP_SOR_DP_TRAIN_OP_INIT); + u16 script = 0x0000; + u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); + if (table) { + if (table[0] >= 0x20 && table[0] <= 0x30) { + if (enable) script = ROM16(entry[12]); + else script = ROM16(entry[14]); + } else + if (table[0] == 0x40) { + if (enable) script = ROM16(entry[11]); + else script = ROM16(entry[13]); + } + } + + nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); +} + +static void +dp_link_train_init(struct drm_device *dev, struct dp_state *dp) +{ + u16 script = 0x0000; + u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); + if (table) { + if (table[0] >= 0x20 && table[0] <= 0x30) + script = ROM16(entry[6]); + else + if (table[0] == 0x40) + script = ROM16(entry[5]); + } + + nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); } static void dp_link_train_fini(struct drm_device *dev, struct dp_state *dp) { - struct dcb_output *dcb = dp->dcb; - const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); - const u32 moff = (dp->crtc << 3) | (link << 2) | or; + u16 script = 0x0000; + u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); + if (table) { + if (table[0] >= 0x20 && table[0] <= 0x30) + script = ROM16(entry[8]); + else + if (table[0] == 0x40) + script = ROM16(entry[7]); + } - nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, - NV94_DISP_SOR_DP_TRAIN_OP_FINI); + nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); } static bool nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate, - struct nouveau_object *core) + struct dp_train_func *func) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); @@ -248,7 +304,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate, if (!dp.auxch) return false; - dp.core = core; + dp.func = func; dp.dcb = nv_encoder->dcb; dp.crtc = nv_crtc->index; dp.dpcd = nv_encoder->dp.dpcd; @@ -262,8 +318,11 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate, */ gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false); - /* enable down-spreading and execute pre-train script from vbios */ - dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1); + /* enable down-spreading, if possible */ + dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1); + + /* execute pre-train script from vbios */ + dp_link_train_init(dev, &dp); /* start off at highest link rate supported by encoder and display */ while (*link_bw > nv_encoder->dp.link_bw) @@ -306,7 +365,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate, void nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate, - struct nouveau_object *core) + struct dp_train_func *func) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_drm *drm = nouveau_drm(encoder->dev); @@ -326,7 +385,7 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate, nv_wraux(auxch, DP_SET_POWER, &status, 1); if (mode == DRM_MODE_DPMS_ON) - nouveau_dp_link_train(encoder, datarate, core); + nouveau_dp_link_train(encoder, datarate, func); } static void diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c index a7529b372381..8503b2ea570a 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -49,6 +49,8 @@ #include "nouveau_fbcon.h" #include "nouveau_fence.h" +#include "nouveau_ttm.h" + MODULE_PARM_DESC(config, "option string to pass to driver core"); static char *nouveau_config; module_param_named(config, nouveau_config, charp, 0400); @@ -127,7 +129,8 @@ nouveau_accel_init(struct nouveau_drm *drm) /* initialise synchronisation routines */ if (device->card_type < NV_10) ret = nv04_fence_create(drm); - else if (device->chipset < 0x84) ret = nv10_fence_create(drm); + else if (device->card_type < NV_50) ret = nv10_fence_create(drm); + else if (device->chipset < 0x84) ret = nv50_fence_create(drm); else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); else ret = nvc0_fence_create(drm); if (ret) { @@ -146,7 +149,7 @@ nouveau_accel_init(struct nouveau_drm *drm) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); arg0 = NVE0_CHANNEL_IND_ENGINE_GR; - arg1 = 1; + arg1 = 0; } else { arg0 = NvDmaFB; arg1 = NvDmaTT; @@ -221,7 +224,6 @@ nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent) boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; #endif remove_conflicting_framebuffers(aper, "nouveaufb", boot); - kfree(aper); ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev), nouveau_config, nouveau_debug, &device); @@ -393,12 +395,17 @@ nouveau_drm_remove(struct pci_dev *pdev) } int -nouveau_do_suspend(struct drm_device *dev) +nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state) { + struct drm_device *dev = pci_get_drvdata(pdev); struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_cli *cli; int ret; + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF || + pm_state.event == PM_EVENT_PRETHAW) + return 0; + if (dev->mode_config.num_crtc) { NV_INFO(drm, "suspending fbcon...\n"); nouveau_fbcon_set_suspend(dev, 1); @@ -429,6 +436,13 @@ nouveau_do_suspend(struct drm_device *dev) goto fail_client; nouveau_agp_fini(drm); + + pci_save_state(pdev); + if (pm_state.event == PM_EVENT_SUSPEND) { + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + } + return 0; fail_client: @@ -443,33 +457,24 @@ nouveau_do_suspend(struct drm_device *dev) return ret; } -int nouveau_pmops_suspend(struct device *dev) +int +nouveau_drm_resume(struct pci_dev *pdev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_device *dev = pci_get_drvdata(pdev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_cli *cli; int ret; - if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - ret = nouveau_do_suspend(drm_dev); + NV_INFO(drm, "re-enabling device...\n"); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); if (ret) return ret; - - pci_save_state(pdev); - pci_disable_device(pdev); - pci_set_power_state(pdev, PCI_D3hot); - - return 0; -} - -int -nouveau_do_resume(struct drm_device *dev) -{ - struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_cli *cli; - - NV_INFO(drm, "re-enabling device...\n"); + pci_set_master(pdev); nouveau_agp_reset(drm); @@ -495,42 +500,6 @@ nouveau_do_resume(struct drm_device *dev) return 0; } -int nouveau_pmops_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); - int ret; - - if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) - return 0; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - ret = pci_enable_device(pdev); - if (ret) - return ret; - pci_set_master(pdev); - - return nouveau_do_resume(drm_dev); -} - -static int nouveau_pmops_freeze(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); - - return nouveau_do_suspend(drm_dev); -} - -static int nouveau_pmops_thaw(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); - - return nouveau_do_resume(drm_dev); -} - - static int nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) { @@ -683,22 +652,14 @@ nouveau_drm_pci_table[] = { {} }; -static const struct dev_pm_ops nouveau_pm_ops = { - .suspend = nouveau_pmops_suspend, - .resume = nouveau_pmops_resume, - .freeze = nouveau_pmops_freeze, - .thaw = nouveau_pmops_thaw, - .poweroff = nouveau_pmops_freeze, - .restore = nouveau_pmops_resume, -}; - static struct pci_driver nouveau_drm_pci_driver = { .name = "nouveau", .id_table = nouveau_drm_pci_table, .probe = nouveau_drm_probe, .remove = nouveau_drm_remove, - .driver.pm = &nouveau_pm_ops, + .suspend = nouveau_drm_suspend, + .resume = nouveau_drm_resume, }; static int __init diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h index aa89eb938b47..a10169927086 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h @@ -129,8 +129,8 @@ nouveau_dev(struct drm_device *dev) return nv_device(nouveau_drm(dev)->device); } -int nouveau_pmops_suspend(struct device *); -int nouveau_pmops_resume(struct device *); +int nouveau_drm_suspend(struct pci_dev *, pm_message_t); +int nouveau_drm_resume(struct pci_dev *); #define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args) #define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_encoder.h b/trunk/drivers/gpu/drm/nouveau/nouveau_encoder.h index d0d95bd511ab..6a17bf2ba9a4 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_encoder.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_encoder.h @@ -93,9 +93,14 @@ get_slave_funcs(struct drm_encoder *enc) /* nouveau_dp.c */ bool nouveau_dp_detect(struct drm_encoder *); void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate, - struct nouveau_object *); + struct dp_train_func *); +u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_output *, u8 **); struct nouveau_connector * nouveau_encoder_connector_get(struct nouveau_encoder *encoder); +int nv50_sor_create(struct drm_connector *, struct dcb_output *); +void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32); +int nv50_dac_create(struct drm_connector *, struct dcb_output *); + #endif /* __NOUVEAU_ENCODER_H__ */ diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/trunk/drivers/gpu/drm/nouveau/nouveau_hdmi.c new file mode 100644 index 000000000000..2c672cebc889 --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_hdmi.c @@ -0,0 +1,261 @@ +/* + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include +#include "nouveau_drm.h" +#include "nouveau_connector.h" +#include "nouveau_encoder.h" +#include "nouveau_crtc.h" + +static bool +hdmi_sor(struct drm_encoder *encoder) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + if (nv_device(drm->device)->chipset < 0xa3 || + nv_device(drm->device)->chipset == 0xaa || + nv_device(drm->device)->chipset == 0xac) + return false; + return true; +} + +static inline u32 +hdmi_base(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc); + if (!hdmi_sor(encoder)) + return 0x616500 + (nv_crtc->index * 0x800); + return 0x61c500 + (nv_encoder->or * 0x800); +} + +static void +hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val) +{ + struct nouveau_device *device = nouveau_dev(encoder->dev); + nv_wr32(device, hdmi_base(encoder) + reg, val); +} + +static u32 +hdmi_rd32(struct drm_encoder *encoder, u32 reg) +{ + struct nouveau_device *device = nouveau_dev(encoder->dev); + return nv_rd32(device, hdmi_base(encoder) + reg); +} + +static u32 +hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val) +{ + u32 tmp = hdmi_rd32(encoder, reg); + hdmi_wr32(encoder, reg, (tmp & ~mask) | val); + return tmp; +} + +static void +nouveau_audio_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_device *device = nouveau_dev(encoder->dev); + u32 or = nv_encoder->or * 0x800; + + if (hdmi_sor(encoder)) + nv_mask(device, 0x61c448 + or, 0x00000003, 0x00000000); +} + +static void +nouveau_audio_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_device *device = nouveau_dev(encoder->dev); + struct nouveau_connector *nv_connector; + u32 or = nv_encoder->or * 0x800; + int i; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (!drm_detect_monitor_audio(nv_connector->edid)) { + nouveau_audio_disconnect(encoder); + return; + } + + if (hdmi_sor(encoder)) { + nv_mask(device, 0x61c448 + or, 0x00000001, 0x00000001); + + drm_edid_to_eld(&nv_connector->base, nv_connector->edid); + if (nv_connector->base.eld[0]) { + u8 *eld = nv_connector->base.eld; + for (i = 0; i < eld[2] * 4; i++) + nv_wr32(device, 0x61c440 + or, (i << 8) | eld[i]); + for (i = eld[2] * 4; i < 0x60; i++) + nv_wr32(device, 0x61c440 + or, (i << 8) | 0x00); + nv_mask(device, 0x61c448 + or, 0x00000002, 0x00000002); + } + } +} + +static void +nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame) +{ + /* calculate checksum for the infoframe */ + u8 sum = 0, i; + for (i = 0; i < frame[2]; i++) + sum += frame[i]; + frame[3] = 256 - sum; + + /* disable infoframe, and write header */ + hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000); + hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff); + + /* register scans tell me the audio infoframe has only one set of + * subpack regs, according to tegra (gee nvidia, it'd be nice if we + * could get those docs too!), the hdmi block pads out the rest of + * the packet on its own. + */ + if (ctrl == 0x020) + frame[2] = 6; + + /* write out checksum and data, weird weird 7 byte register pairs */ + for (i = 0; i < frame[2] + 1; i += 7) { + u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8); + u32 *subpack = (u32 *)&frame[3 + i]; + hdmi_wr32(encoder, rsubpack + 0, subpack[0]); + hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff); + } + + /* enable the infoframe */ + hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001); +} + +static void +nouveau_hdmi_video_infoframe(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0; + const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0; + const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0; + u8 frame[20]; + + frame[0x00] = 0x82; /* AVI infoframe */ + frame[0x01] = 0x02; /* version */ + frame[0x02] = 0x0d; /* length */ + frame[0x03] = 0x00; + frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S; + frame[0x05] = (C << 6) | (M << 4) | R; + frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC; + frame[0x07] = VIC; + frame[0x08] = PR; + frame[0x09] = bar_top & 0xff; + frame[0x0a] = bar_top >> 8; + frame[0x0b] = bar_bottom & 0xff; + frame[0x0c] = bar_bottom >> 8; + frame[0x0d] = bar_left & 0xff; + frame[0x0e] = bar_left >> 8; + frame[0x0f] = bar_right & 0xff; + frame[0x10] = bar_right >> 8; + frame[0x11] = 0x00; + frame[0x12] = 0x00; + frame[0x13] = 0x00; + + nouveau_hdmi_infoframe(encoder, 0x020, frame); +} + +static void +nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00; + const u8 CA = 0x00, DM_INH = 0, LSV = 0x00; + u8 frame[12]; + + frame[0x00] = 0x84; /* Audio infoframe */ + frame[0x01] = 0x01; /* version */ + frame[0x02] = 0x0a; /* length */ + frame[0x03] = 0x00; + frame[0x04] = (CT << 4) | CC; + frame[0x05] = (SF << 2) | ceaSS; + frame[0x06] = FMT; + frame[0x07] = CA; + frame[0x08] = (DM_INH << 7) | (LSV << 3); + frame[0x09] = 0x00; + frame[0x0a] = 0x00; + frame[0x0b] = 0x00; + + nouveau_hdmi_infoframe(encoder, 0x000, frame); +} + +static void +nouveau_hdmi_disconnect(struct drm_encoder *encoder) +{ + nouveau_audio_disconnect(encoder); + + /* disable audio and avi infoframes */ + hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000); + hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000); + + /* disable hdmi */ + hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000); +} + +void +nouveau_hdmi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct nouveau_device *device = nouveau_dev(encoder->dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector; + u32 max_ac_packet, rekey; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (!mode || !nv_connector || !nv_connector->edid || + !drm_detect_hdmi_monitor(nv_connector->edid)) { + nouveau_hdmi_disconnect(encoder); + return; + } + + nouveau_hdmi_video_infoframe(encoder, mode); + nouveau_hdmi_audio_infoframe(encoder, mode); + + hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */ + hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */ + hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */ + + nv_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ + nv_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ + nv_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ + + /* value matches nvidia binary driver, and tegra constant */ + rekey = 56; + + max_ac_packet = mode->htotal - mode->hdisplay; + max_ac_packet -= rekey; + max_ac_packet -= 18; /* constant from tegra */ + max_ac_packet /= 32; + + /* enable hdmi */ + hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */ + 0x1f000000 | /* unknown */ + max_ac_packet << 16 | + rekey); + + nouveau_audio_mode_set(encoder, mode); +} diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_irq.c b/trunk/drivers/gpu/drm/nouveau/nouveau_irq.c index 1303680affd3..1d8cb506a28a 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -60,6 +60,18 @@ nouveau_irq_handler(DRM_IRQ_ARGS) return IRQ_NONE; nv_subdev(pmc)->intr(nv_subdev(pmc)); + + if (dev->mode_config.num_crtc) { + if (device->card_type >= NV_D0) { + if (nv_rd32(device, 0x000100) & 0x04000000) + nvd0_display_intr(dev); + } else + if (device->card_type >= NV_50) { + if (nv_rd32(device, 0x000100) & 0x04000000) + nv50_display_intr(dev); + } + } + return IRQ_HANDLED; } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_prime.c b/trunk/drivers/gpu/drm/nouveau/nouveau_prime.c index 3543fec2355e..366462cf8a2c 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -155,6 +155,10 @@ nouveau_prime_new(struct drm_device *dev, return ret; nvbo = *pnvbo; + /* we restrict allowed domains on nv50+ to only the types + * that were requested at creation time. not possibly on + * earlier chips without busting the ABI. + */ nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); if (!nvbo->gem) { diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_vga.c b/trunk/drivers/gpu/drm/nouveau/nouveau_vga.c index 25d3495725eb..6f0ac64873df 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -31,11 +31,12 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); + pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (state == VGA_SWITCHEROO_ON) { printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - nouveau_pmops_resume(&pdev->dev); + nouveau_drm_resume(pdev); drm_kms_helper_poll_enable(dev); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { @@ -43,7 +44,7 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev, dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(dev); nouveau_switcheroo_optimus_dsm(); - nouveau_pmops_suspend(&pdev->dev); + nouveau_drm_suspend(pdev, pmm); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } diff --git a/trunk/drivers/gpu/drm/nouveau/nv04_crtc.c b/trunk/drivers/gpu/drm/nouveau/nv04_crtc.c index 6578cd28c556..82a0d9c6cda3 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/trunk/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -730,7 +730,6 @@ static void nv_crtc_destroy(struct drm_crtc *crtc) drm_crtc_cleanup(crtc); nouveau_bo_unmap(nv_crtc->cursor.nvbo); - nouveau_bo_unpin(nv_crtc->cursor.nvbo); nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); kfree(nv_crtc); } @@ -1057,11 +1056,8 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) 0, 0x0000, NULL, &nv_crtc->cursor.nvbo); if (!ret) { ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); - if (!ret) { + if (!ret) ret = nouveau_bo_map(nv_crtc->cursor.nvbo); - if (ret) - nouveau_bo_unpin(nv_crtc->cursor.nvbo); - } if (ret) nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); } diff --git a/trunk/drivers/gpu/drm/nouveau/nv04_display.c b/trunk/drivers/gpu/drm/nouveau/nv04_display.c index 2cd6fb8c548e..846050f04c23 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv04_display.c +++ b/trunk/drivers/gpu/drm/nouveau/nv04_display.c @@ -60,6 +60,8 @@ nv04_display_create(struct drm_device *dev) struct nv04_display *disp; int i, ret; + NV_DEBUG(drm, "\n"); + disp = kzalloc(sizeof(*disp), GFP_KERNEL); if (!disp) return -ENOMEM; @@ -130,10 +132,13 @@ nv04_display_create(struct drm_device *dev) void nv04_display_destroy(struct drm_device *dev) { + struct nouveau_drm *drm = nouveau_drm(dev); struct nv04_display *disp = nv04_display(dev); struct drm_encoder *encoder; struct drm_crtc *crtc; + NV_DEBUG(drm, "\n"); + /* Turn every CRTC off. */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct drm_mode_set modeset = { diff --git a/trunk/drivers/gpu/drm/nouveau/nv10_fence.c b/trunk/drivers/gpu/drm/nouveau/nv10_fence.c index 7ae7f97a6d4d..ce752bf5cc4e 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv10_fence.c +++ b/trunk/drivers/gpu/drm/nouveau/nv10_fence.c @@ -155,8 +155,6 @@ nv10_fence_destroy(struct nouveau_drm *drm) { struct nv10_fence_priv *priv = drm->fence; nouveau_bo_unmap(priv->bo); - if (priv->bo) - nouveau_bo_unpin(priv->bo); nouveau_bo_ref(NULL, &priv->bo); drm->fence = NULL; kfree(priv); @@ -185,11 +183,8 @@ nv10_fence_create(struct nouveau_drm *drm) 0, 0x0000, NULL, &priv->bo); if (!ret) { ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); - if (!ret) { + if (!ret) ret = nouveau_bo_map(priv->bo); - if (ret) - nouveau_bo_unpin(priv->bo); - } if (ret) nouveau_bo_ref(NULL, &priv->bo); } diff --git a/trunk/drivers/gpu/drm/nouveau/nv17_tv.c b/trunk/drivers/gpu/drm/nouveau/nv17_tv.c index 2ca276ada507..897b63621e2d 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/trunk/drivers/gpu/drm/nouveau/nv17_tv.c @@ -195,7 +195,7 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) break; } - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, conf->tv_subconnector_property, tv_enc->subconnector); @@ -672,25 +672,25 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder, drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, conf->tv_select_subconnector_property, tv_enc->select_subconnector); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, conf->tv_subconnector_property, tv_enc->subconnector); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, conf->tv_mode_property, tv_enc->tv_norm); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, conf->tv_flicker_reduction_property, tv_enc->flicker); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, conf->tv_saturation_property, tv_enc->saturation); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, conf->tv_hue_property, tv_enc->hue); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, conf->tv_overscan_property, tv_enc->overscan); diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_crtc.c b/trunk/drivers/gpu/drm/nouveau/nv50_crtc.c new file mode 100644 index 000000000000..222de77d6269 --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -0,0 +1,764 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include + +#include "nouveau_reg.h" +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_gem.h" +#include "nouveau_hw.h" +#include "nouveau_encoder.h" +#include "nouveau_crtc.h" +#include "nouveau_connector.h" +#include "nv50_display.h" + +#include + +static void +nv50_crtc_lut_load(struct drm_crtc *crtc) +{ + struct nouveau_drm *drm = nouveau_drm(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); + int i; + + NV_DEBUG(drm, "\n"); + + for (i = 0; i < 256; i++) { + writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0); + writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2); + writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4); + } + + if (nv_crtc->lut.depth == 30) { + writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0); + writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2); + writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4); + } +} + +int +nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *evo = nv50_display(dev)->master; + int index = nv_crtc->index, ret; + + NV_DEBUG(drm, "index %d\n", nv_crtc->index); + NV_DEBUG(drm, "%s\n", blanked ? "blanked" : "unblanked"); + + if (blanked) { + nv_crtc->cursor.hide(nv_crtc, false); + + ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 7 : 5); + if (ret) { + NV_ERROR(drm, "no space while blanking crtc\n"); + return ret; + } + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); + OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK); + OUT_RING(evo, 0); + if (nv_device(drm->device)->chipset != 0x50) { + BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); + OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE); + } + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); + OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE); + } else { + if (nv_crtc->cursor.visible) + nv_crtc->cursor.show(nv_crtc, false); + else + nv_crtc->cursor.hide(nv_crtc, false); + + ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 10 : 8); + if (ret) { + NV_ERROR(drm, "no space while unblanking crtc\n"); + return ret; + } + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); + OUT_RING(evo, nv_crtc->lut.depth == 8 ? + NV50_EVO_CRTC_CLUT_MODE_OFF : + NV50_EVO_CRTC_CLUT_MODE_ON); + OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8); + if (nv_device(drm->device)->chipset != 0x50) { + BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); + OUT_RING(evo, NvEvoVRAM); + } + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2); + OUT_RING(evo, nv_crtc->fb.offset >> 8); + OUT_RING(evo, 0); + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); + if (nv_device(drm->device)->chipset != 0x50) + if (nv_crtc->fb.tile_flags == 0x7a00 || + nv_crtc->fb.tile_flags == 0xfe00) + OUT_RING(evo, NvEvoFB32); + else + if (nv_crtc->fb.tile_flags == 0x7000) + OUT_RING(evo, NvEvoFB16); + else + OUT_RING(evo, NvEvoVRAM_LP); + else + OUT_RING(evo, NvEvoVRAM_LP); + } + + nv_crtc->fb.blanked = blanked; + return 0; +} + +static int +nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update) +{ + struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master; + struct nouveau_connector *nv_connector; + struct drm_connector *connector; + int head = nv_crtc->index, ret; + u32 mode = 0x00; + + nv_connector = nouveau_crtc_connector_get(nv_crtc); + connector = &nv_connector->base; + if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) { + if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3) + mode = DITHERING_MODE_DYNAMIC2X2; + } else { + mode = nv_connector->dithering_mode; + } + + if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) { + if (connector->display_info.bpc >= 8) + mode |= DITHERING_DEPTH_8BPC; + } else { + mode |= nv_connector->dithering_depth; + } + + ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); + if (ret == 0) { + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1); + OUT_RING (evo, mode); + if (update) { + BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING (evo, 0); + FIRE_RING (evo); + } + } + + return ret; +} + +static int +nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *evo = nv50_display(dev)->master; + int ret; + int adj; + u32 hue, vib; + + NV_DEBUG(drm, "vibrance = %i, hue = %i\n", + nv_crtc->color_vibrance, nv_crtc->vibrant_hue); + + ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); + if (ret) { + NV_ERROR(drm, "no space while setting color vibrance\n"); + return ret; + } + + adj = (nv_crtc->color_vibrance > 0) ? 50 : 0; + vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff; + + hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff; + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1); + OUT_RING (evo, (hue << 20) | (vib << 8)); + + if (update) { + BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING (evo, 0); + FIRE_RING (evo); + } + + return 0; +} + +struct nouveau_connector * +nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct drm_connector *connector; + struct drm_crtc *crtc = to_drm_crtc(nv_crtc); + + /* The safest approach is to find an encoder with the right crtc, that + * is also linked to a connector. */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder) + if (connector->encoder->crtc == crtc) + return nouveau_connector(connector); + } + + return NULL; +} + +static int +nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update) +{ + struct nouveau_connector *nv_connector; + struct drm_crtc *crtc = &nv_crtc->base; + struct drm_device *dev = crtc->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *evo = nv50_display(dev)->master; + struct drm_display_mode *umode = &crtc->mode; + struct drm_display_mode *omode; + int scaling_mode, ret; + u32 ctrl = 0, oX, oY; + + NV_DEBUG(drm, "\n"); + + nv_connector = nouveau_crtc_connector_get(nv_crtc); + if (!nv_connector || !nv_connector->native_mode) { + NV_ERROR(drm, "no native mode, forcing panel scaling\n"); + scaling_mode = DRM_MODE_SCALE_NONE; + } else { + scaling_mode = nv_connector->scaling_mode; + } + + /* start off at the resolution we programmed the crtc for, this + * effectively handles NONE/FULL scaling + */ + if (scaling_mode != DRM_MODE_SCALE_NONE) + omode = nv_connector->native_mode; + else + omode = umode; + + oX = omode->hdisplay; + oY = omode->vdisplay; + if (omode->flags & DRM_MODE_FLAG_DBLSCAN) + oY *= 2; + + /* add overscan compensation if necessary, will keep the aspect + * ratio the same as the backend mode unless overridden by the + * user setting both hborder and vborder properties. + */ + if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON || + (nv_connector->underscan == UNDERSCAN_AUTO && + nv_connector->edid && + drm_detect_hdmi_monitor(nv_connector->edid)))) { + u32 bX = nv_connector->underscan_hborder; + u32 bY = nv_connector->underscan_vborder; + u32 aspect = (oY << 19) / oX; + + if (bX) { + oX -= (bX * 2); + if (bY) oY -= (bY * 2); + else oY = ((oX * aspect) + (aspect / 2)) >> 19; + } else { + oX -= (oX >> 4) + 32; + if (bY) oY -= (bY * 2); + else oY = ((oX * aspect) + (aspect / 2)) >> 19; + } + } + + /* handle CENTER/ASPECT scaling, taking into account the areas + * removed already for overscan compensation + */ + switch (scaling_mode) { + case DRM_MODE_SCALE_CENTER: + oX = min((u32)umode->hdisplay, oX); + oY = min((u32)umode->vdisplay, oY); + /* fall-through */ + case DRM_MODE_SCALE_ASPECT: + if (oY < oX) { + u32 aspect = (umode->hdisplay << 19) / umode->vdisplay; + oX = ((oY * aspect) + (aspect / 2)) >> 19; + } else { + u32 aspect = (umode->vdisplay << 19) / umode->hdisplay; + oY = ((oX * aspect) + (aspect / 2)) >> 19; + } + break; + default: + break; + } + + if (umode->hdisplay != oX || umode->vdisplay != oY || + umode->flags & DRM_MODE_FLAG_INTERLACE || + umode->flags & DRM_MODE_FLAG_DBLSCAN) + ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE; + + ret = RING_SPACE(evo, 5); + if (ret) + return ret; + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1); + OUT_RING (evo, ctrl); + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2); + OUT_RING (evo, oY << 16 | oX); + OUT_RING (evo, oY << 16 | oX); + + if (update) { + nv50_display_flip_stop(crtc); + nv50_display_sync(dev); + nv50_display_flip_next(crtc, crtc->fb, NULL); + } + + return 0; +} + +int +nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_clock *clk = nouveau_clock(device); + + return clk->pll_set(clk, PLL_VPLL0 + head, pclk); +} + +static void +nv50_crtc_destroy(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nouveau_drm *drm = nouveau_drm(crtc->dev); + + NV_DEBUG(drm, "\n"); + + nouveau_bo_unmap(nv_crtc->lut.nvbo); + nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); + nouveau_bo_unmap(nv_crtc->cursor.nvbo); + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + drm_crtc_cleanup(&nv_crtc->base); + kfree(nv_crtc); +} + +int +nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t buffer_handle, uint32_t width, uint32_t height) +{ + struct drm_device *dev = crtc->dev; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nouveau_bo *cursor = NULL; + struct drm_gem_object *gem; + int ret = 0, i; + + if (!buffer_handle) { + nv_crtc->cursor.hide(nv_crtc, true); + return 0; + } + + if (width != 64 || height != 64) + return -EINVAL; + + gem = drm_gem_object_lookup(dev, file_priv, buffer_handle); + if (!gem) + return -ENOENT; + cursor = nouveau_gem_object(gem); + + ret = nouveau_bo_map(cursor); + if (ret) + goto out; + + /* The simple will do for now. */ + for (i = 0; i < 64 * 64; i++) + nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i)); + + nouveau_bo_unmap(cursor); + + nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset); + nv_crtc->cursor.show(nv_crtc, true); + +out: + drm_gem_object_unreference_unlocked(gem); + return ret; +} + +int +nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + nv_crtc->cursor.set_pos(nv_crtc, x, y); + return 0; +} + +static void +nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, + uint32_t start, uint32_t size) +{ + int end = (start + size > 256) ? 256 : start + size, i; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + for (i = start; i < end; i++) { + nv_crtc->lut.r[i] = r[i]; + nv_crtc->lut.g[i] = g[i]; + nv_crtc->lut.b[i] = b[i]; + } + + /* We need to know the depth before we upload, but it's possible to + * get called before a framebuffer is bound. If this is the case, + * mark the lut values as dirty by setting depth==0, and it'll be + * uploaded on the first mode_set_base() + */ + if (!nv_crtc->base.fb) { + nv_crtc->lut.depth = 0; + return; + } + + nv50_crtc_lut_load(crtc); +} + +static void +nv50_crtc_save(struct drm_crtc *crtc) +{ + struct nouveau_drm *drm = nouveau_drm(crtc->dev); + NV_ERROR(drm, "!!\n"); +} + +static void +nv50_crtc_restore(struct drm_crtc *crtc) +{ + struct nouveau_drm *drm = nouveau_drm(crtc->dev); + NV_ERROR(drm, "!!\n"); +} + +static const struct drm_crtc_funcs nv50_crtc_funcs = { + .save = nv50_crtc_save, + .restore = nv50_crtc_restore, + .cursor_set = nv50_crtc_cursor_set, + .cursor_move = nv50_crtc_cursor_move, + .gamma_set = nv50_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, + .page_flip = nouveau_crtc_page_flip, + .destroy = nv50_crtc_destroy, +}; + +static void +nv50_crtc_dpms(struct drm_crtc *crtc, int mode) +{ +} + +static void +nv50_crtc_prepare(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + + NV_DEBUG(drm, "index %d\n", nv_crtc->index); + + nv50_display_flip_stop(crtc); + drm_vblank_pre_modeset(dev, nv_crtc->index); + nv50_crtc_blank(nv_crtc, true); +} + +static void +nv50_crtc_commit(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + NV_DEBUG(drm, "index %d\n", nv_crtc->index); + + nv50_crtc_blank(nv_crtc, false); + drm_vblank_post_modeset(dev, nv_crtc->index); + nv50_display_sync(dev); + nv50_display_flip_next(crtc, crtc->fb, NULL); +} + +static bool +nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static int +nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *passed_fb, + int x, int y, bool atomic) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = nv_crtc->base.dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *evo = nv50_display(dev)->master; + struct drm_framebuffer *drm_fb; + struct nouveau_framebuffer *fb; + int ret; + + NV_DEBUG(drm, "index %d\n", nv_crtc->index); + + /* no fb bound */ + if (!atomic && !crtc->fb) { + NV_DEBUG(drm, "No FB bound\n"); + return 0; + } + + /* If atomic, we want to switch to the fb we were passed, so + * now we update pointers to do that. (We don't pin; just + * assume we're already pinned and update the base address.) + */ + if (atomic) { + drm_fb = passed_fb; + fb = nouveau_framebuffer(passed_fb); + } else { + drm_fb = crtc->fb; + fb = nouveau_framebuffer(crtc->fb); + /* If not atomic, we can go ahead and pin, and unpin the + * old fb we were passed. + */ + ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + if (passed_fb) { + struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); + nouveau_bo_unpin(ofb->nvbo); + } + } + + nv_crtc->fb.offset = fb->nvbo->bo.offset; + nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); + nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; + if (!nv_crtc->fb.blanked && nv_device(drm->device)->chipset != 0x50) { + ret = RING_SPACE(evo, 2); + if (ret) + return ret; + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1); + OUT_RING (evo, fb->r_dma); + } + + ret = RING_SPACE(evo, 12); + if (ret) + return ret; + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5); + OUT_RING (evo, nv_crtc->fb.offset >> 8); + OUT_RING (evo, 0); + OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width); + OUT_RING (evo, fb->r_pitch); + OUT_RING (evo, fb->r_format); + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1); + OUT_RING (evo, fb->base.depth == 8 ? + NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON); + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1); + OUT_RING (evo, (y << 16) | x); + + if (nv_crtc->lut.depth != fb->base.depth) { + nv_crtc->lut.depth = fb->base.depth; + nv50_crtc_lut_load(crtc); + } + + return 0; +} + +static int +nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, + struct drm_display_mode *mode, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct drm_device *dev = crtc->dev; + struct nouveau_channel *evo = nv50_display(dev)->master; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + u32 head = nv_crtc->index * 0x400; + u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1; + u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1; + u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks; + u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks; + u32 vblan2e = 0, vblan2s = 1; + int ret; + + /* hw timing description looks like this: + * + * <---------display---------> + * ______ + * |____________|---------------------------|____________| + * + * ^ synce ^ blanke ^ blanks ^ active + * + * interlaced modes also have 2 additional values pointing at the end + * and start of the next field's blanking period. + */ + + hactive = mode->htotal; + hsynce = mode->hsync_end - mode->hsync_start - 1; + hbackp = mode->htotal - mode->hsync_end; + hblanke = hsynce + hbackp; + hfrontp = mode->hsync_start - mode->hdisplay; + hblanks = mode->htotal - hfrontp - 1; + + vactive = mode->vtotal * vscan / ilace; + vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1; + vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace; + vblanke = vsynce + vbackp; + vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; + vblanks = vactive - vfrontp - 1; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { + vblan2e = vactive + vsynce + vbackp; + vblan2s = vblan2e + (mode->vdisplay * vscan / ilace); + vactive = (vactive * 2) + 1; + } + + ret = RING_SPACE(evo, 18); + if (ret == 0) { + BEGIN_NV04(evo, 0, 0x0804 + head, 2); + OUT_RING (evo, 0x00800000 | mode->clock); + OUT_RING (evo, (ilace == 2) ? 2 : 0); + BEGIN_NV04(evo, 0, 0x0810 + head, 6); + OUT_RING (evo, 0x00000000); /* border colour */ + OUT_RING (evo, (vactive << 16) | hactive); + OUT_RING (evo, ( vsynce << 16) | hsynce); + OUT_RING (evo, (vblanke << 16) | hblanke); + OUT_RING (evo, (vblanks << 16) | hblanks); + OUT_RING (evo, (vblan2e << 16) | vblan2s); + BEGIN_NV04(evo, 0, 0x082c + head, 1); + OUT_RING (evo, 0x00000000); + BEGIN_NV04(evo, 0, 0x0900 + head, 1); + OUT_RING (evo, 0x00000311); /* makes sync channel work */ + BEGIN_NV04(evo, 0, 0x08c8 + head, 1); + OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay); + BEGIN_NV04(evo, 0, 0x08d4 + head, 1); + OUT_RING (evo, 0x00000000); /* screen position */ + } + + nv_crtc->set_dither(nv_crtc, false); + nv_crtc->set_scale(nv_crtc, false); + nv_crtc->set_color_vibrance(nv_crtc, false); + + return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false); +} + +static int +nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + int ret; + + nv50_display_flip_stop(crtc); + ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false); + if (ret) + return ret; + + ret = nv50_display_sync(crtc->dev); + if (ret) + return ret; + + return nv50_display_flip_next(crtc, crtc->fb, NULL); +} + +static int +nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + int ret; + + nv50_display_flip_stop(crtc); + ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true); + if (ret) + return ret; + + return nv50_display_sync(crtc->dev); +} + +static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = { + .dpms = nv50_crtc_dpms, + .prepare = nv50_crtc_prepare, + .commit = nv50_crtc_commit, + .mode_fixup = nv50_crtc_mode_fixup, + .mode_set = nv50_crtc_mode_set, + .mode_set_base = nv50_crtc_mode_set_base, + .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, + .load_lut = nv50_crtc_lut_load, +}; + +int +nv50_crtc_create(struct drm_device *dev, int index) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_crtc *nv_crtc = NULL; + int ret, i; + + NV_DEBUG(drm, "\n"); + + nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); + if (!nv_crtc) + return -ENOMEM; + + nv_crtc->index = index; + nv_crtc->set_dither = nv50_crtc_set_dither; + nv_crtc->set_scale = nv50_crtc_set_scale; + nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance; + nv_crtc->color_vibrance = 50; + nv_crtc->vibrant_hue = 0; + nv_crtc->lut.depth = 0; + for (i = 0; i < 256; i++) { + nv_crtc->lut.r[i] = i << 8; + nv_crtc->lut.g[i] = i << 8; + nv_crtc->lut.b[i] = i << 8; + } + + drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs); + drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs); + drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); + + ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &nv_crtc->lut.nvbo); + if (!ret) { + ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(nv_crtc->lut.nvbo); + if (ret) + nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); + } + + if (ret) + goto out; + + + ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &nv_crtc->cursor.nvbo); + if (!ret) { + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(nv_crtc->cursor.nvbo); + if (ret) + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + } + + if (ret) + goto out; + + nv50_cursor_init(nv_crtc); +out: + if (ret) + nv50_crtc_destroy(&nv_crtc->base); + return ret; +} diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_cursor.c b/trunk/drivers/gpu/drm/nouveau/nv50_cursor.c new file mode 100644 index 000000000000..223da113ceee --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nv50_cursor.c @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_crtc.h" +#include "nv50_display.h" + +static void +nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *evo = nv50_display(dev)->master; + int ret; + + NV_DEBUG(drm, "\n"); + + if (update && nv_crtc->cursor.visible) + return; + + ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2); + if (ret) { + NV_ERROR(drm, "no space while unhiding cursor\n"); + return; + } + + if (nv_device(drm->device)->chipset != 0x50) { + BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); + OUT_RING(evo, NvEvoVRAM); + } + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); + OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW); + OUT_RING(evo, nv_crtc->cursor.offset >> 8); + + if (update) { + BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING(evo, 0); + FIRE_RING(evo); + nv_crtc->cursor.visible = true; + } +} + +static void +nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *evo = nv50_display(dev)->master; + int ret; + + NV_DEBUG(drm, "\n"); + + if (update && !nv_crtc->cursor.visible) + return; + + ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2); + if (ret) { + NV_ERROR(drm, "no space while hiding cursor\n"); + return; + } + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); + OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE); + OUT_RING(evo, 0); + if (nv_device(drm->device)->chipset != 0x50) { + BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); + OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE); + } + + if (update) { + BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING(evo, 0); + FIRE_RING(evo); + nv_crtc->cursor.visible = false; + } +} + +static void +nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) +{ + struct nouveau_device *device = nouveau_dev(nv_crtc->base.dev); + + nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; + nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), + ((y & 0xFFFF) << 16) | (x & 0xFFFF)); + /* Needed to make the cursor move. */ + nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0); +} + +static void +nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) +{ + if (offset == nv_crtc->cursor.offset) + return; + + nv_crtc->cursor.offset = offset; + if (nv_crtc->cursor.visible) { + nv_crtc->cursor.visible = false; + nv_crtc->cursor.show(nv_crtc, true); + } +} + +int +nv50_cursor_init(struct nouveau_crtc *nv_crtc) +{ + nv_crtc->cursor.set_offset = nv50_cursor_set_offset; + nv_crtc->cursor.set_pos = nv50_cursor_set_pos; + nv_crtc->cursor.hide = nv50_cursor_hide; + nv_crtc->cursor.show = nv50_cursor_show; + return 0; +} diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_dac.c b/trunk/drivers/gpu/drm/nouveau/nv50_dac.c new file mode 100644 index 000000000000..6a30a1748573 --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nv50_dac.c @@ -0,0 +1,321 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include + +#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) +#include "nouveau_reg.h" +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "nv50_display.h" + +#include + +static void +nv50_dac_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *evo = nv50_display(dev)->master; + int ret; + + if (!nv_encoder->crtc) + return; + nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); + + NV_DEBUG(drm, "Disconnecting DAC %d\n", nv_encoder->or); + + ret = RING_SPACE(evo, 4); + if (ret) { + NV_ERROR(drm, "no space while disconnecting DAC\n"); + return; + } + BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1); + OUT_RING (evo, 0); + BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING (evo, 0); + + nv_encoder->crtc = NULL; +} + +static enum drm_connector_status +nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_device *device = nouveau_dev(encoder->dev); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + enum drm_connector_status status = connector_status_disconnected; + uint32_t dpms_state, load_pattern, load_state; + int or = nv_encoder->or; + + nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001); + dpms_state = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)); + + nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), + 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); + if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { + NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); + NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, + nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); + return status; + } + + /* Use bios provided value if possible. */ + if (drm->vbios.dactestval) { + load_pattern = drm->vbios.dactestval; + NV_DEBUG(drm, "Using bios provided load_pattern of %d\n", + load_pattern); + } else { + load_pattern = 340; + NV_DEBUG(drm, "Using default load_pattern of %d\n", + load_pattern); + } + + nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), + NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern); + mdelay(45); /* give it some time to process */ + load_state = nv_rd32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or)); + + nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0); + nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state | + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); + + if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) == + NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) + status = connector_status_connected; + + if (status == connector_status_connected) + NV_DEBUG(drm, "Load was detected on output with or %d\n", or); + else + NV_DEBUG(drm, "Load was not detected on output with or %d\n", or); + + return status; +} + +static void +nv50_dac_dpms(struct drm_encoder *encoder, int mode) +{ + struct nouveau_device *device = nouveau_dev(encoder->dev); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + uint32_t val; + int or = nv_encoder->or; + + NV_DEBUG(drm, "or %d mode %d\n", or, mode); + + /* wait for it to be done */ + if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { + NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); + NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, + nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); + return; + } + + val = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F; + + if (mode != DRM_MODE_DPMS_ON) + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED; + + switch (mode) { + case DRM_MODE_DPMS_STANDBY: + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF; + break; + case DRM_MODE_DPMS_SUSPEND: + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF; + break; + case DRM_MODE_DPMS_OFF: + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF; + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF; + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF; + break; + default: + break; + } + + nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val | + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); +} + +static void +nv50_dac_save(struct drm_encoder *encoder) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + NV_ERROR(drm, "!!\n"); +} + +static void +nv50_dac_restore(struct drm_encoder *encoder) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + NV_ERROR(drm, "!!\n"); +} + +static bool +nv50_dac_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *connector; + + NV_DEBUG(drm, "or %d\n", nv_encoder->or); + + connector = nouveau_encoder_connector_get(nv_encoder); + if (!connector) { + NV_ERROR(drm, "Encoder has no connector\n"); + return false; + } + + if (connector->scaling_mode != DRM_MODE_SCALE_NONE && + connector->native_mode) + drm_mode_copy(adjusted_mode, connector->native_mode); + + return true; +} + +static void +nv50_dac_commit(struct drm_encoder *encoder) +{ +} + +static void +nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct drm_device *dev = encoder->dev; + struct nouveau_channel *evo = nv50_display(dev)->master; + struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); + uint32_t mode_ctl = 0, mode_ctl2 = 0; + int ret; + + NV_DEBUG(drm, "or %d type %d crtc %d\n", + nv_encoder->or, nv_encoder->dcb->type, crtc->index); + + nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); + + if (crtc->index == 1) + mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1; + else + mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0; + + /* Lacking a working tv-out, this is not a 100% sure. */ + if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG) + mode_ctl |= 0x40; + else + if (nv_encoder->dcb->type == DCB_OUTPUT_TV) + mode_ctl |= 0x100; + + if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) + mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC; + + if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) + mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC; + + ret = RING_SPACE(evo, 3); + if (ret) { + NV_ERROR(drm, "no space while connecting DAC\n"); + return; + } + BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2); + OUT_RING(evo, mode_ctl); + OUT_RING(evo, mode_ctl2); + + nv_encoder->crtc = encoder->crtc; +} + +static struct drm_crtc * +nv50_dac_crtc_get(struct drm_encoder *encoder) +{ + return nouveau_encoder(encoder)->crtc; +} + +static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = { + .dpms = nv50_dac_dpms, + .save = nv50_dac_save, + .restore = nv50_dac_restore, + .mode_fixup = nv50_dac_mode_fixup, + .prepare = nv50_dac_disconnect, + .commit = nv50_dac_commit, + .mode_set = nv50_dac_mode_set, + .get_crtc = nv50_dac_crtc_get, + .detect = nv50_dac_detect, + .disable = nv50_dac_disconnect +}; + +static void +nv50_dac_destroy(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + + if (!encoder) + return; + + NV_DEBUG(drm, "\n"); + + drm_encoder_cleanup(encoder); + kfree(nv_encoder); +} + +static const struct drm_encoder_funcs nv50_dac_encoder_funcs = { + .destroy = nv50_dac_destroy, +}; + +int +nv50_dac_create(struct drm_connector *connector, struct dcb_output *entry) +{ + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + encoder = to_drm_encoder(nv_encoder); + + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; + + drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs, + DRM_MODE_ENCODER_DAC); + drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + + drm_mode_connector_attach_encoder(connector, encoder); + return 0; +} + diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_display.c b/trunk/drivers/gpu/drm/nouveau/nv50_display.c index 35874085a61e..f97b42cbb6bb 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_display.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_display.c @@ -1,2058 +1,969 @@ - /* - * Copyright 2011 Red Hat Inc. +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * - * Authors: Ben Skeggs */ -#include - -#include -#include - #include "nouveau_drm.h" #include "nouveau_dma.h" -#include "nouveau_gem.h" -#include "nouveau_connector.h" -#include "nouveau_encoder.h" + +#include "nv50_display.h" #include "nouveau_crtc.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_fbcon.h" +#include #include "nouveau_fence.h" -#include "nv50_display.h" -#include #include -#include - #include -#include -#include - -#define EVO_DMA_NR 9 - -#define EVO_MASTER (0x00) -#define EVO_FLIP(c) (0x01 + (c)) -#define EVO_OVLY(c) (0x05 + (c)) -#define EVO_OIMM(c) (0x09 + (c)) -#define EVO_CURS(c) (0x0d + (c)) - -/* offsets in shared sync bo of various structures */ -#define EVO_SYNC(c, o) ((c) * 0x0100 + (o)) -#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00) -#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00) -#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10) - -#define EVO_CORE_HANDLE (0xd1500000) -#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i)) -#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff)) -#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) | \ - (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8)) - -/****************************************************************************** - * EVO channel - *****************************************************************************/ - -struct nv50_chan { - struct nouveau_object *user; - u32 handle; -}; - -static int -nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head, - void *data, u32 size, struct nv50_chan *chan) + +static void nv50_display_bh(unsigned long); + +static inline int +nv50_sor_nr(struct drm_device *dev) { - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - const u32 oclass = EVO_CHAN_OCLASS(bclass, core); - const u32 handle = EVO_CHAN_HANDLE(bclass, head); - int ret; + struct nouveau_device *device = nouveau_dev(dev); - ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle, - oclass, data, size, &chan->user); - if (ret) - return ret; + if (device->chipset < 0x90 || + device->chipset == 0x92 || + device->chipset == 0xa0) + return 2; - chan->handle = handle; - return 0; + return 4; } -static void -nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan) +u32 +nv50_display_active_crtcs(struct drm_device *dev) { - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - if (chan->handle) - nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle); -} + struct nouveau_device *device = nouveau_dev(dev); + u32 mask = 0; + int i; -/****************************************************************************** - * PIO EVO channel - *****************************************************************************/ + if (device->chipset < 0x90 || + device->chipset == 0x92 || + device->chipset == 0xa0) { + for (i = 0; i < 2; i++) + mask |= nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i)); + } else { + for (i = 0; i < 4; i++) + mask |= nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i)); + } -struct nv50_pioc { - struct nv50_chan base; -}; + for (i = 0; i < 3; i++) + mask |= nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); -static void -nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc) + return mask & 3; +} + +int +nv50_display_early_init(struct drm_device *dev) { - nv50_chan_destroy(core, &pioc->base); + return 0; } -static int -nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head, - void *data, u32 size, struct nv50_pioc *pioc) +void +nv50_display_late_takedown(struct drm_device *dev) { - return nv50_chan_create(core, bclass, head, data, size, &pioc->base); } -/****************************************************************************** - * DMA EVO channel - *****************************************************************************/ +int +nv50_display_sync(struct drm_device *dev) +{ + struct nv50_display *disp = nv50_display(dev); + struct nouveau_channel *evo = disp->master; + int ret; -struct nv50_dmac { - struct nv50_chan base; - dma_addr_t handle; - u32 *ptr; -}; + ret = RING_SPACE(evo, 6); + if (ret == 0) { + BEGIN_NV04(evo, 0, 0x0084, 1); + OUT_RING (evo, 0x80000000); + BEGIN_NV04(evo, 0, 0x0080, 1); + OUT_RING (evo, 0); + BEGIN_NV04(evo, 0, 0x0084, 1); + OUT_RING (evo, 0x00000000); -static void -nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac) -{ - if (dmac->ptr) { - struct pci_dev *pdev = nv_device(core)->pdev; - pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle); + nv_wo32(disp->ramin, 0x2000, 0x00000000); + FIRE_RING (evo); + + if (nv_wait_ne(disp->ramin, 0x2000, 0xffffffff, 0x00000000)) + return 0; } - nv50_chan_destroy(core, &dmac->base); + return 0; } -static int -nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent) +int +nv50_display_init(struct drm_device *dev) { - struct nouveau_fb *pfb = nouveau_fb(core); - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - struct nouveau_object *object; - int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NV50_DMA_CONF0_ENABLE | - NV50_DMA_CONF0_PART_256, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_channel *evo; + int ret, i; + u32 val; - ret = nouveau_object_new(client, parent, NvEvoFB16, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 | - NV50_DMA_CONF0_PART_256, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; + NV_DEBUG(drm, "\n"); - ret = nouveau_object_new(client, parent, NvEvoFB32, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a | - NV50_DMA_CONF0_PART_256, - }, sizeof(struct nv_dma_class), &object); - return ret; -} + nv_wr32(device, 0x00610184, nv_rd32(device, 0x00614004)); -static int -nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent) -{ - struct nouveau_fb *pfb = nouveau_fb(core); - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - struct nouveau_object *object; - int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NVC0_DMA_CONF0_ENABLE, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; + /* + * I think the 0x006101XX range is some kind of main control area + * that enables things. + */ + /* CRTC? */ + for (i = 0; i < 2; i++) { + val = nv_rd32(device, 0x00616100 + (i * 0x800)); + nv_wr32(device, 0x00610190 + (i * 0x10), val); + val = nv_rd32(device, 0x00616104 + (i * 0x800)); + nv_wr32(device, 0x00610194 + (i * 0x10), val); + val = nv_rd32(device, 0x00616108 + (i * 0x800)); + nv_wr32(device, 0x00610198 + (i * 0x10), val); + val = nv_rd32(device, 0x0061610c + (i * 0x800)); + nv_wr32(device, 0x0061019c + (i * 0x10), val); + } + + /* DAC */ + for (i = 0; i < 3; i++) { + val = nv_rd32(device, 0x0061a000 + (i * 0x800)); + nv_wr32(device, 0x006101d0 + (i * 0x04), val); + } + + /* SOR */ + for (i = 0; i < nv50_sor_nr(dev); i++) { + val = nv_rd32(device, 0x0061c000 + (i * 0x800)); + nv_wr32(device, 0x006101e0 + (i * 0x04), val); + } + + /* EXT */ + for (i = 0; i < 3; i++) { + val = nv_rd32(device, 0x0061e000 + (i * 0x800)); + nv_wr32(device, 0x006101f0 + (i * 0x04), val); + } + + for (i = 0; i < 3; i++) { + nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 | + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); + nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001); + } - ret = nouveau_object_new(client, parent, NvEvoFB16, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe, - }, sizeof(struct nv_dma_class), &object); + /* The precise purpose is unknown, i suspect it has something to do + * with text mode. + */ + if (nv_rd32(device, NV50_PDISPLAY_INTR_1) & 0x100) { + nv_wr32(device, NV50_PDISPLAY_INTR_1, 0x100); + nv_wr32(device, 0x006194e8, nv_rd32(device, 0x006194e8) & ~1); + if (!nv_wait(device, 0x006194e8, 2, 0)) { + NV_ERROR(drm, "timeout: (0x6194e8 & 2) != 0\n"); + NV_ERROR(drm, "0x6194e8 = 0x%08x\n", + nv_rd32(device, 0x6194e8)); + return -EBUSY; + } + } + + for (i = 0; i < 2; i++) { + nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); + if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { + NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n"); + NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n", + nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); + return -EBUSY; + } + + nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON); + if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) { + NV_ERROR(drm, "timeout: " + "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i); + NV_ERROR(drm, "CURSOR_CTRL2(%d) = 0x%08x\n", i, + nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); + return -EBUSY; + } + } + + nv_wr32(device, NV50_PDISPLAY_PIO_CTRL, 0x00000000); + nv_mask(device, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000); + nv_wr32(device, NV50_PDISPLAY_INTR_EN_0, 0x00000000); + nv_mask(device, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000); + nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, + NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 | + NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 | + NV50_PDISPLAY_INTR_EN_1_CLK_UNK40); + + ret = nv50_evo_init(dev); if (ret) return ret; + evo = nv50_display(dev)->master; - ret = nouveau_object_new(client, parent, NvEvoFB32, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe, - }, sizeof(struct nv_dma_class), &object); - return ret; -} + nv_wr32(device, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9); -static int -nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent) -{ - struct nouveau_fb *pfb = nouveau_fb(core); - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - struct nouveau_object *object; - int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NVD0_DMA_CONF0_ENABLE | - NVD0_DMA_CONF0_PAGE_LP, - }, sizeof(struct nv_dma_class), &object); + ret = RING_SPACE(evo, 3); if (ret) return ret; + BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2); + OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED); + OUT_RING (evo, NvEvoSync); - ret = nouveau_object_new(client, parent, NvEvoFB32, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe | - NVD0_DMA_CONF0_PAGE_LP, - }, sizeof(struct nv_dma_class), &object); - return ret; + return nv50_display_sync(dev); } -static int -nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head, - void *data, u32 size, u64 syncbuf, - struct nv50_dmac *dmac) +void +nv50_display_fini(struct drm_device *dev) { - struct nouveau_fb *pfb = nouveau_fb(core); - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - struct nouveau_object *object; - u32 pushbuf = *(u32 *)data; - int ret; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nouveau_dev(dev); + struct nv50_display *disp = nv50_display(dev); + struct nouveau_channel *evo = disp->master; + struct drm_crtc *drm_crtc; + int ret, i; - dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE, - &dmac->handle); - if (!dmac->ptr) - return -ENOMEM; + NV_DEBUG(drm, "\n"); - ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf, - NV_DMA_FROM_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_PCI_US | - NV_DMA_ACCESS_RD, - .start = dmac->handle + 0x0000, - .limit = dmac->handle + 0x0fff, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; + list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); - ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base); - if (ret) - return ret; + nv50_crtc_blank(crtc, true); + } - ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = syncbuf + 0x0000, - .limit = syncbuf + 0x0fff, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; + ret = RING_SPACE(evo, 2); + if (ret == 0) { + BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING(evo, 0); + } + FIRE_RING(evo); - ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; + /* Almost like ack'ing a vblank interrupt, maybe in the spirit of + * cleaning up? + */ + list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); + uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index); - if (nv_device(core)->card_type < NV_C0) - ret = nv50_dmac_create_fbdma(core, dmac->base.handle); - else - if (nv_device(core)->card_type < NV_D0) - ret = nvc0_dmac_create_fbdma(core, dmac->base.handle); - else - ret = nvd0_dmac_create_fbdma(core, dmac->base.handle); - return ret; -} + if (!crtc->base.enabled) + continue; -struct nv50_mast { - struct nv50_dmac base; -}; - -struct nv50_curs { - struct nv50_pioc base; -}; - -struct nv50_sync { - struct nv50_dmac base; - struct { - u32 offset; - u16 value; - } sem; -}; - -struct nv50_ovly { - struct nv50_dmac base; -}; - -struct nv50_oimm { - struct nv50_pioc base; -}; - -struct nv50_head { - struct nouveau_crtc base; - struct nv50_curs curs; - struct nv50_sync sync; - struct nv50_ovly ovly; - struct nv50_oimm oimm; -}; - -#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c)) -#define nv50_curs(c) (&nv50_head(c)->curs) -#define nv50_sync(c) (&nv50_head(c)->sync) -#define nv50_ovly(c) (&nv50_head(c)->ovly) -#define nv50_oimm(c) (&nv50_head(c)->oimm) -#define nv50_chan(c) (&(c)->base.base) -#define nv50_vers(c) nv_mclass(nv50_chan(c)->user) - -struct nv50_disp { - struct nouveau_object *core; - struct nv50_mast mast; - - u32 modeset; - - struct nouveau_bo *sync; -}; - -static struct nv50_disp * -nv50_disp(struct drm_device *dev) -{ - return nouveau_display(dev)->priv; -} + nv_wr32(device, NV50_PDISPLAY_INTR_1, mask); + if (!nv_wait(device, NV50_PDISPLAY_INTR_1, mask, mask)) { + NV_ERROR(drm, "timeout: (0x610024 & 0x%08x) == " + "0x%08x\n", mask, mask); + NV_ERROR(drm, "0x610024 = 0x%08x\n", + nv_rd32(device, NV50_PDISPLAY_INTR_1)); + } + } + + for (i = 0; i < 2; i++) { + nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0); + if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { + NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n"); + NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n", + nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); + } + } -#define nv50_mast(d) (&nv50_disp(d)->mast) + nv50_evo_fini(dev); -static struct drm_crtc * -nv50_display_crtc_get(struct drm_encoder *encoder) -{ - return nouveau_encoder(encoder)->crtc; + for (i = 0; i < 3; i++) { + if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(i), + NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { + NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i); + NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", i, + nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(i))); + } + } + + /* disable interrupts. */ + nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, 0x00000000); } -/****************************************************************************** - * EVO channel helpers - *****************************************************************************/ -static u32 * -evo_wait(void *evoc, int nr) +int +nv50_display_create(struct drm_device *dev) { - struct nv50_dmac *dmac = evoc; - u32 put = nv_ro32(dmac->base.user, 0x0000) / 4; + struct nouveau_drm *drm = nouveau_drm(dev); + struct dcb_table *dcb = &drm->vbios.dcb; + struct drm_connector *connector, *ct; + struct nv50_display *priv; + int ret, i; + + NV_DEBUG(drm, "\n"); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + nouveau_display(dev)->priv = priv; + nouveau_display(dev)->dtor = nv50_display_destroy; + nouveau_display(dev)->init = nv50_display_init; + nouveau_display(dev)->fini = nv50_display_fini; + + /* Create CRTC objects */ + for (i = 0; i < 2; i++) { + ret = nv50_crtc_create(dev, i); + if (ret) + return ret; + } - if (put + nr >= (PAGE_SIZE / 4) - 8) { - dmac->ptr[put] = 0x20000000; + /* We setup the encoders from the BIOS table */ + for (i = 0 ; i < dcb->entries; i++) { + struct dcb_output *entry = &dcb->entry[i]; - nv_wo32(dmac->base.user, 0x0000, 0x00000000); - if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) { - NV_ERROR(dmac->base.user, "channel stalled\n"); - return NULL; + if (entry->location != DCB_LOC_ON_CHIP) { + NV_WARN(drm, "Off-chip encoder %d/%d unsupported\n", + entry->type, ffs(entry->or) - 1); + continue; } - put = 0; + connector = nouveau_connector_create(dev, entry->connector); + if (IS_ERR(connector)) + continue; + + switch (entry->type) { + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_LVDS: + case DCB_OUTPUT_DP: + nv50_sor_create(connector, entry); + break; + case DCB_OUTPUT_ANALOG: + nv50_dac_create(connector, entry); + break; + default: + NV_WARN(drm, "DCB encoder %d unknown\n", entry->type); + continue; + } } - return dmac->ptr + put; -} + list_for_each_entry_safe(connector, ct, + &dev->mode_config.connector_list, head) { + if (!connector->encoder_ids[0]) { + NV_WARN(drm, "%s has no encoders, removing\n", + drm_get_connector_name(connector)); + connector->funcs->destroy(connector); + } + } -static void -evo_kick(u32 *push, void *evoc) -{ - struct nv50_dmac *dmac = evoc; - nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2); -} + tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev); -#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m)) -#define evo_data(p,d) *((p)++) = (d) + ret = nv50_evo_create(dev); + if (ret) { + nv50_display_destroy(dev); + return ret; + } -static bool -evo_sync_wait(void *data) -{ - return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000; + return 0; } -static int -evo_sync(struct drm_device *dev) +void +nv50_display_destroy(struct drm_device *dev) { - struct nouveau_device *device = nouveau_dev(dev); - struct nv50_disp *disp = nv50_disp(dev); - struct nv50_mast *mast = nv50_mast(dev); - u32 *push = evo_wait(mast, 8); - if (push) { - nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000); - evo_mthd(push, 0x0084, 1); - evo_data(push, 0x80000000 | EVO_MAST_NTFY); - evo_mthd(push, 0x0080, 2); - evo_data(push, 0x00000000); - evo_data(push, 0x00000000); - evo_kick(push, mast); - if (nv_wait_cb(device, evo_sync_wait, disp->sync)) - return 0; - } + struct nv50_display *disp = nv50_display(dev); - return -EBUSY; + nv50_evo_destroy(dev); + kfree(disp); } -/****************************************************************************** - * Page flipping channel - *****************************************************************************/ struct nouveau_bo * nv50_display_crtc_sema(struct drm_device *dev, int crtc) { - return nv50_disp(dev)->sync; + return nv50_display(dev)->crtc[crtc].sem.bo; } void nv50_display_flip_stop(struct drm_crtc *crtc) { - struct nv50_sync *sync = nv50_sync(crtc); - u32 *push; - - push = evo_wait(sync, 8); - if (push) { - evo_mthd(push, 0x0084, 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x0094, 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x00c0, 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); - evo_kick(push, sync); + struct nv50_display *disp = nv50_display(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index]; + struct nouveau_channel *evo = dispc->sync; + int ret; + + ret = RING_SPACE(evo, 8); + if (ret) { + WARN_ON(1); + return; } + + BEGIN_NV04(evo, 0, 0x0084, 1); + OUT_RING (evo, 0x00000000); + BEGIN_NV04(evo, 0, 0x0094, 1); + OUT_RING (evo, 0x00000000); + BEGIN_NV04(evo, 0, 0x00c0, 1); + OUT_RING (evo, 0x00000000); + BEGIN_NV04(evo, 0, 0x0080, 1); + OUT_RING (evo, 0x00000000); + FIRE_RING (evo); } int nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct nouveau_channel *chan, u32 swap_interval) + struct nouveau_channel *chan) { + struct nouveau_drm *drm = nouveau_drm(crtc->dev); struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); - struct nv50_disp *disp = nv50_disp(crtc->dev); + struct nv50_display *disp = nv50_display(crtc->dev); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - struct nv50_sync *sync = nv50_sync(crtc); - u32 *push; + struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index]; + struct nouveau_channel *evo = dispc->sync; int ret; - swap_interval <<= 4; - if (swap_interval == 0) - swap_interval |= 0x100; - - push = evo_wait(sync, 128); - if (unlikely(push == NULL)) - return -EBUSY; + ret = RING_SPACE(evo, chan ? 25 : 27); + if (unlikely(ret)) + return ret; /* synchronise with the rendering channel, if necessary */ if (likely(chan)) { ret = RING_SPACE(chan, 10); - if (ret) + if (ret) { + WIND_RING(evo); return ret; + } - if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { - BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); + if (nv_device(drm->device)->chipset < 0xc0) { + BEGIN_NV04(chan, 0, 0x0060, 2); OUT_RING (chan, NvEvoSema0 + nv_crtc->index); - OUT_RING (chan, sync->sem.offset); - BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); - OUT_RING (chan, 0xf00d0000 | sync->sem.value); - BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2); - OUT_RING (chan, sync->sem.offset ^ 0x10); + OUT_RING (chan, dispc->sem.offset); + BEGIN_NV04(chan, 0, 0x006c, 1); + OUT_RING (chan, 0xf00d0000 | dispc->sem.value); + BEGIN_NV04(chan, 0, 0x0064, 2); + OUT_RING (chan, dispc->sem.offset ^ 0x10); OUT_RING (chan, 0x74b1e000); - BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); - if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS) + BEGIN_NV04(chan, 0, 0x0060, 1); + if (nv_device(drm->device)->chipset < 0x84) OUT_RING (chan, NvSema); else OUT_RING (chan, chan->vram); } else { u64 offset = nvc0_fence_crtc(chan, nv_crtc->index); - offset += sync->sem.offset; - - BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + offset += dispc->sem.offset; + BEGIN_NVC0(chan, 0, 0x0010, 4); OUT_RING (chan, upper_32_bits(offset)); OUT_RING (chan, lower_32_bits(offset)); - OUT_RING (chan, 0xf00d0000 | sync->sem.value); + OUT_RING (chan, 0xf00d0000 | dispc->sem.value); OUT_RING (chan, 0x1002); - BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + BEGIN_NVC0(chan, 0, 0x0010, 4); OUT_RING (chan, upper_32_bits(offset)); OUT_RING (chan, lower_32_bits(offset ^ 0x10)); OUT_RING (chan, 0x74b1e000); OUT_RING (chan, 0x1001); } - FIRE_RING (chan); } else { - nouveau_bo_wr32(disp->sync, sync->sem.offset / 4, - 0xf00d0000 | sync->sem.value); - evo_sync(crtc->dev); + nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4, + 0xf00d0000 | dispc->sem.value); } - /* queue the flip */ - evo_mthd(push, 0x0100, 1); - evo_data(push, 0xfffe0000); - evo_mthd(push, 0x0084, 1); - evo_data(push, swap_interval); - if (!(swap_interval & 0x00000100)) { - evo_mthd(push, 0x00e0, 1); - evo_data(push, 0x40000000); - } - evo_mthd(push, 0x0088, 4); - evo_data(push, sync->sem.offset); - evo_data(push, 0xf00d0000 | sync->sem.value); - evo_data(push, 0x74b1e000); - evo_data(push, NvEvoSync); - evo_mthd(push, 0x00a0, 2); - evo_data(push, 0x00000000); - evo_data(push, 0x00000000); - evo_mthd(push, 0x00c0, 1); - evo_data(push, nv_fb->r_dma); - evo_mthd(push, 0x0110, 2); - evo_data(push, 0x00000000); - evo_data(push, 0x00000000); - if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) { - evo_mthd(push, 0x0800, 5); - evo_data(push, nv_fb->nvbo->bo.offset >> 8); - evo_data(push, 0); - evo_data(push, (fb->height << 16) | fb->width); - evo_data(push, nv_fb->r_pitch); - evo_data(push, nv_fb->r_format); + /* queue the flip on the crtc's "display sync" channel */ + BEGIN_NV04(evo, 0, 0x0100, 1); + OUT_RING (evo, 0xfffe0000); + if (chan) { + BEGIN_NV04(evo, 0, 0x0084, 1); + OUT_RING (evo, 0x00000100); } else { - evo_mthd(push, 0x0400, 5); - evo_data(push, nv_fb->nvbo->bo.offset >> 8); - evo_data(push, 0); - evo_data(push, (fb->height << 16) | fb->width); - evo_data(push, nv_fb->r_pitch); - evo_data(push, nv_fb->r_format); - } - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); - evo_kick(push, sync); - - sync->sem.offset ^= 0x10; - sync->sem.value++; + BEGIN_NV04(evo, 0, 0x0084, 1); + OUT_RING (evo, 0x00000010); + /* allows gamma somehow, PDISP will bitch at you if + * you don't wait for vblank before changing this.. + */ + BEGIN_NV04(evo, 0, 0x00e0, 1); + OUT_RING (evo, 0x40000000); + } + BEGIN_NV04(evo, 0, 0x0088, 4); + OUT_RING (evo, dispc->sem.offset); + OUT_RING (evo, 0xf00d0000 | dispc->sem.value); + OUT_RING (evo, 0x74b1e000); + OUT_RING (evo, NvEvoSync); + BEGIN_NV04(evo, 0, 0x00a0, 2); + OUT_RING (evo, 0x00000000); + OUT_RING (evo, 0x00000000); + BEGIN_NV04(evo, 0, 0x00c0, 1); + OUT_RING (evo, nv_fb->r_dma); + BEGIN_NV04(evo, 0, 0x0110, 2); + OUT_RING (evo, 0x00000000); + OUT_RING (evo, 0x00000000); + BEGIN_NV04(evo, 0, 0x0800, 5); + OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8); + OUT_RING (evo, 0); + OUT_RING (evo, (fb->height << 16) | fb->width); + OUT_RING (evo, nv_fb->r_pitch); + OUT_RING (evo, nv_fb->r_format); + BEGIN_NV04(evo, 0, 0x0080, 1); + OUT_RING (evo, 0x00000000); + FIRE_RING (evo); + + dispc->sem.offset ^= 0x10; + dispc->sem.value++; return 0; } -/****************************************************************************** - * CRTC - *****************************************************************************/ -static int -nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update) +static u16 +nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb, + u32 mc, int pxclk) { - struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - struct nouveau_connector *nv_connector; - struct drm_connector *connector; - u32 *push, mode = 0x00; - - nv_connector = nouveau_crtc_connector_get(nv_crtc); - connector = &nv_connector->base; - if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) { - if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3) - mode = DITHERING_MODE_DYNAMIC2X2; - } else { - mode = nv_connector->dithering_mode; - } + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_connector *nv_connector = NULL; + struct drm_encoder *encoder; + struct nvbios *bios = &drm->vbios; + u32 script = 0, or; - if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) { - if (connector->display_info.bpc >= 8) - mode |= DITHERING_DEPTH_8BPC; - } else { - mode |= nv_connector->dithering_depth; - } + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - push = evo_wait(mast, 4); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1); - evo_data(push, mode); - } else - if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1); - evo_data(push, mode); - } else { - evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1); - evo_data(push, mode); - } + if (nv_encoder->dcb != dcb) + continue; - if (update) { - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); - } - evo_kick(push, mast); + nv_connector = nouveau_encoder_connector_get(nv_encoder); + break; } - return 0; -} - -static int -nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update) -{ - struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - struct drm_display_mode *omode, *umode = &nv_crtc->base.mode; - struct drm_crtc *crtc = &nv_crtc->base; - struct nouveau_connector *nv_connector; - int mode = DRM_MODE_SCALE_NONE; - u32 oX, oY, *push; - - /* start off at the resolution we programmed the crtc for, this - * effectively handles NONE/FULL scaling - */ - nv_connector = nouveau_crtc_connector_get(nv_crtc); - if (nv_connector && nv_connector->native_mode) - mode = nv_connector->scaling_mode; - - if (mode != DRM_MODE_SCALE_NONE) - omode = nv_connector->native_mode; - else - omode = umode; - - oX = omode->hdisplay; - oY = omode->vdisplay; - if (omode->flags & DRM_MODE_FLAG_DBLSCAN) - oY *= 2; - - /* add overscan compensation if necessary, will keep the aspect - * ratio the same as the backend mode unless overridden by the - * user setting both hborder and vborder properties. - */ - if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON || - (nv_connector->underscan == UNDERSCAN_AUTO && - nv_connector->edid && - drm_detect_hdmi_monitor(nv_connector->edid)))) { - u32 bX = nv_connector->underscan_hborder; - u32 bY = nv_connector->underscan_vborder; - u32 aspect = (oY << 19) / oX; - - if (bX) { - oX -= (bX * 2); - if (bY) oY -= (bY * 2); - else oY = ((oX * aspect) + (aspect / 2)) >> 19; + or = ffs(dcb->or) - 1; + switch (dcb->type) { + case DCB_OUTPUT_LVDS: + script = (mc >> 8) & 0xf; + if (bios->fp_no_ddc) { + if (bios->fp.dual_link) + script |= 0x0100; + if (bios->fp.if_is_24bit) + script |= 0x0200; } else { - oX -= (oX >> 4) + 32; - if (bY) oY -= (bY * 2); - else oY = ((oX * aspect) + (aspect / 2)) >> 19; - } - } + /* determine number of lvds links */ + if (nv_connector && nv_connector->edid && + nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { + /* http://www.spwg.org */ + if (((u8 *)nv_connector->edid)[121] == 2) + script |= 0x0100; + } else + if (pxclk >= bios->fp.duallink_transition_clk) { + script |= 0x0100; + } - /* handle CENTER/ASPECT scaling, taking into account the areas - * removed already for overscan compensation - */ - switch (mode) { - case DRM_MODE_SCALE_CENTER: - oX = min((u32)umode->hdisplay, oX); - oY = min((u32)umode->vdisplay, oY); - /* fall-through */ - case DRM_MODE_SCALE_ASPECT: - if (oY < oX) { - u32 aspect = (umode->hdisplay << 19) / umode->vdisplay; - oX = ((oY * aspect) + (aspect / 2)) >> 19; - } else { - u32 aspect = (umode->vdisplay << 19) / umode->hdisplay; - oY = ((oX * aspect) + (aspect / 2)) >> 19; + /* determine panel depth */ + if (script & 0x0100) { + if (bios->fp.strapless_is_24bit & 2) + script |= 0x0200; + } else { + if (bios->fp.strapless_is_24bit & 1) + script |= 0x0200; + } + + if (nv_connector && nv_connector->edid && + (nv_connector->edid->revision >= 4) && + (nv_connector->edid->input & 0x70) >= 0x20) + script |= 0x0200; } break; + case DCB_OUTPUT_TMDS: + script = (mc >> 8) & 0xf; + if (pxclk >= 165000) + script |= 0x0100; + break; + case DCB_OUTPUT_DP: + script = (mc >> 8) & 0xf; + break; + case DCB_OUTPUT_ANALOG: + script = 0xff; + break; default: + NV_ERROR(drm, "modeset on unsupported output type!\n"); break; } - push = evo_wait(mast, 8); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - /*XXX: SCALE_CTRL_ACTIVE??? */ - evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2); - evo_data(push, (oY << 16) | oX); - evo_data(push, (oY << 16) | oX); - evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1); - evo_data(push, umode->vdisplay << 16 | umode->hdisplay); - } else { - evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3); - evo_data(push, (oY << 16) | oX); - evo_data(push, (oY << 16) | oX); - evo_data(push, (oY << 16) | oX); - evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1); - evo_data(push, umode->vdisplay << 16 | umode->hdisplay); - } + return script; +} - evo_kick(push, mast); +static void +nv50_display_unk10_handler(struct drm_device *dev) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv50_display *disp = nv50_display(dev); + u32 unk30 = nv_rd32(device, 0x610030), mc; + int i, crtc, or = 0, type = DCB_OUTPUT_ANY; - if (update) { - nv50_display_flip_stop(crtc); - nv50_display_flip_next(crtc, crtc->fb, NULL, 1); - } - } + NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30); + disp->irq.dcb = NULL; - return 0; -} + nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) & ~8); -static int -nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update) -{ - struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - u32 *push, hue, vib; - int adj; - - adj = (nv_crtc->color_vibrance > 0) ? 50 : 0; - vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff; - hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff; - - push = evo_wait(mast, 16); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1); - evo_data(push, (hue << 20) | (vib << 8)); - } else { - evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1); - evo_data(push, (hue << 20) | (vib << 8)); - } + /* Determine which CRTC we're dealing with, only 1 ever will be + * signalled at the same time with the current nouveau code. + */ + crtc = ffs((unk30 & 0x00000060) >> 5) - 1; + if (crtc < 0) + goto ack; + + /* Nothing needs to be done for the encoder */ + crtc = ffs((unk30 & 0x00000180) >> 7) - 1; + if (crtc < 0) + goto ack; + + /* Find which encoder was connected to the CRTC */ + for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) { + mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); + NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc); + if (!(mc & (1 << crtc))) + continue; - if (update) { - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); + switch ((mc & 0x00000f00) >> 8) { + case 0: type = DCB_OUTPUT_ANALOG; break; + case 1: type = DCB_OUTPUT_TV; break; + default: + NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc); + goto ack; } - evo_kick(push, mast); + + or = i; } - return 0; -} + for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { + if (nv_device(drm->device)->chipset < 0x90 || + nv_device(drm->device)->chipset == 0x92 || + nv_device(drm->device)->chipset == 0xa0) + mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i)); + else + mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i)); -static int -nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, - int x, int y, bool update) -{ - struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb); - struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - u32 *push; - - push = evo_wait(mast, 16); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1); - evo_data(push, nvfb->nvbo->bo.offset >> 8); - evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3); - evo_data(push, (fb->height << 16) | fb->width); - evo_data(push, nvfb->r_pitch); - evo_data(push, nvfb->r_format); - evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1); - evo_data(push, (y << 16) | x); - if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) { - evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); - evo_data(push, nvfb->r_dma); - } - } else { - evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1); - evo_data(push, nvfb->nvbo->bo.offset >> 8); - evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4); - evo_data(push, (fb->height << 16) | fb->width); - evo_data(push, nvfb->r_pitch); - evo_data(push, nvfb->r_format); - evo_data(push, nvfb->r_dma); - evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1); - evo_data(push, (y << 16) | x); - } + NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc); + if (!(mc & (1 << crtc))) + continue; - if (update) { - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); + switch ((mc & 0x00000f00) >> 8) { + case 0: type = DCB_OUTPUT_LVDS; break; + case 1: type = DCB_OUTPUT_TMDS; break; + case 2: type = DCB_OUTPUT_TMDS; break; + case 5: type = DCB_OUTPUT_TMDS; break; + case 8: type = DCB_OUTPUT_DP; break; + case 9: type = DCB_OUTPUT_DP; break; + default: + NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc); + goto ack; } - evo_kick(push, mast); + + or = i; } - nv_crtc->fb.tile_flags = nvfb->r_dma; - return 0; -} + /* There was no encoder to disable */ + if (type == DCB_OUTPUT_ANY) + goto ack; -static void -nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc) -{ - struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - u32 *push = evo_wait(mast, 16); - if (push) { - if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { - evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); - evo_data(push, 0x85000000); - evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); - } else - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); - evo_data(push, 0x85000000); - evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); - evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); - evo_data(push, NvEvoVRAM); - } else { - evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2); - evo_data(push, 0x85000000); - evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); - evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); - evo_data(push, NvEvoVRAM); - } - evo_kick(push, mast); - } -} + /* Disable the encoder */ + for (i = 0; i < drm->vbios.dcb.entries; i++) { + struct dcb_output *dcb = &drm->vbios.dcb.entry[i]; -static void -nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc) -{ - struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - u32 *push = evo_wait(mast, 16); - if (push) { - if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { - evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x05000000); - } else - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x05000000); - evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x00000000); - } else { - evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1); - evo_data(push, 0x05000000); - evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); - evo_data(push, 0x00000000); + if (dcb->type == type && (dcb->or & (1 << or))) { + nouveau_bios_run_display_table(dev, 0, -1, dcb, -1); + disp->irq.dcb = dcb; + goto ack; } - evo_kick(push, mast); } + + NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc); +ack: + nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10); + nv_wr32(device, 0x610030, 0x80000000); } static void -nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update) +nv50_display_unk20_handler(struct drm_device *dev) { - struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - - if (show) - nv50_crtc_cursor_show(nv_crtc); - else - nv50_crtc_cursor_hide(nv_crtc); - - if (update) { - u32 *push = evo_wait(mast, 2); - if (push) { - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); - evo_kick(push, mast); + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv50_display *disp = nv50_display(dev); + u32 unk30 = nv_rd32(device, 0x610030), tmp, pclk, script, mc = 0; + struct dcb_output *dcb; + int i, crtc, or = 0, type = DCB_OUTPUT_ANY; + + NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30); + dcb = disp->irq.dcb; + if (dcb) { + nouveau_bios_run_display_table(dev, 0, -2, dcb, -1); + disp->irq.dcb = NULL; + } + + /* CRTC clock change requested? */ + crtc = ffs((unk30 & 0x00000600) >> 9) - 1; + if (crtc >= 0) { + pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)); + pclk &= 0x003fffff; + if (pclk) + nv50_crtc_set_clock(dev, crtc, pclk); + + tmp = nv_rd32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc)); + tmp &= ~0x000000f; + nv_wr32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp); + } + + /* Nothing needs to be done for the encoder */ + crtc = ffs((unk30 & 0x00000180) >> 7) - 1; + if (crtc < 0) + goto ack; + pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff; + + /* Find which encoder is connected to the CRTC */ + for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) { + mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_P(i)); + NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc); + if (!(mc & (1 << crtc))) + continue; + + switch ((mc & 0x00000f00) >> 8) { + case 0: type = DCB_OUTPUT_ANALOG; break; + case 1: type = DCB_OUTPUT_TV; break; + default: + NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc); + goto ack; } + + or = i; } -} -static void -nv50_crtc_dpms(struct drm_crtc *crtc, int mode) -{ -} + for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { + if (nv_device(drm->device)->chipset < 0x90 || + nv_device(drm->device)->chipset == 0x92 || + nv_device(drm->device)->chipset == 0xa0) + mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_P(i)); + else + mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_P(i)); -static void -nv50_crtc_prepare(struct drm_crtc *crtc) -{ - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - struct nv50_mast *mast = nv50_mast(crtc->dev); - u32 *push; - - nv50_display_flip_stop(crtc); - - push = evo_wait(mast, 2); - if (push) { - if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { - evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x40000000); - } else - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x40000000); - evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x00000000); - } else { - evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1); - evo_data(push, 0x03000000); - evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); - evo_data(push, 0x00000000); + NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc); + if (!(mc & (1 << crtc))) + continue; + + switch ((mc & 0x00000f00) >> 8) { + case 0: type = DCB_OUTPUT_LVDS; break; + case 1: type = DCB_OUTPUT_TMDS; break; + case 2: type = DCB_OUTPUT_TMDS; break; + case 5: type = DCB_OUTPUT_TMDS; break; + case 8: type = DCB_OUTPUT_DP; break; + case 9: type = DCB_OUTPUT_DP; break; + default: + NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc); + goto ack; } - evo_kick(push, mast); + or = i; } - nv50_crtc_cursor_show_hide(nv_crtc, false, false); -} - -static void -nv50_crtc_commit(struct drm_crtc *crtc) -{ - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - struct nv50_mast *mast = nv50_mast(crtc->dev); - u32 *push; - - push = evo_wait(mast, 32); - if (push) { - if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { - evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); - evo_data(push, NvEvoVRAM_LP); - evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2); - evo_data(push, 0xc0000000); - evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); - } else - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); - evo_data(push, nv_crtc->fb.tile_flags); - evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2); - evo_data(push, 0xc0000000); - evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); - evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1); - evo_data(push, NvEvoVRAM); - } else { - evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); - evo_data(push, nv_crtc->fb.tile_flags); - evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4); - evo_data(push, 0x83000000); - evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); - evo_data(push, 0x00000000); - evo_data(push, 0x00000000); - evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); - evo_data(push, NvEvoVRAM); - evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1); - evo_data(push, 0xffffff00); - } - - evo_kick(push, mast); - } - - nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true); - nv50_display_flip_next(crtc, crtc->fb, NULL, 1); -} - -static bool -nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - -static int -nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) -{ - struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); - int ret; - - ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); - if (ret) - return ret; - - if (old_fb) { - nvfb = nouveau_framebuffer(old_fb); - nouveau_bo_unpin(nvfb->nvbo); - } - - return 0; -} - -static int -nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, - struct drm_display_mode *mode, int x, int y, - struct drm_framebuffer *old_fb) -{ - struct nv50_mast *mast = nv50_mast(crtc->dev); - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - struct nouveau_connector *nv_connector; - u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1; - u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1; - u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks; - u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks; - u32 vblan2e = 0, vblan2s = 1; - u32 *push; - int ret; - - hactive = mode->htotal; - hsynce = mode->hsync_end - mode->hsync_start - 1; - hbackp = mode->htotal - mode->hsync_end; - hblanke = hsynce + hbackp; - hfrontp = mode->hsync_start - mode->hdisplay; - hblanks = mode->htotal - hfrontp - 1; - - vactive = mode->vtotal * vscan / ilace; - vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1; - vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace; - vblanke = vsynce + vbackp; - vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; - vblanks = vactive - vfrontp - 1; - if (mode->flags & DRM_MODE_FLAG_INTERLACE) { - vblan2e = vactive + vsynce + vbackp; - vblan2s = vblan2e + (mode->vdisplay * vscan / ilace); - vactive = (vactive * 2) + 1; - } - - ret = nv50_crtc_swap_fbs(crtc, old_fb); - if (ret) - return ret; - - push = evo_wait(mast, 64); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2); - evo_data(push, 0x00800000 | mode->clock); - evo_data(push, (ilace == 2) ? 2 : 0); - evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6); - evo_data(push, 0x00000000); - evo_data(push, (vactive << 16) | hactive); - evo_data(push, ( vsynce << 16) | hsynce); - evo_data(push, (vblanke << 16) | hblanke); - evo_data(push, (vblanks << 16) | hblanks); - evo_data(push, (vblan2e << 16) | vblan2s); - evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2); - evo_data(push, 0x00000311); - evo_data(push, 0x00000100); - } else { - evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6); - evo_data(push, 0x00000000); - evo_data(push, (vactive << 16) | hactive); - evo_data(push, ( vsynce << 16) | hsynce); - evo_data(push, (vblanke << 16) | hblanke); - evo_data(push, (vblanks << 16) | hblanks); - evo_data(push, (vblan2e << 16) | vblan2s); - evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1); - evo_data(push, 0x00000000); /* ??? */ - evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3); - evo_data(push, mode->clock * 1000); - evo_data(push, 0x00200000); /* ??? */ - evo_data(push, mode->clock * 1000); - evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2); - evo_data(push, 0x00000311); - evo_data(push, 0x00000100); - } - - evo_kick(push, mast); - } - - nv_connector = nouveau_crtc_connector_get(nv_crtc); - nv50_crtc_set_dither(nv_crtc, false); - nv50_crtc_set_scale(nv_crtc, false); - nv50_crtc_set_color_vibrance(nv_crtc, false); - nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false); - return 0; -} - -static int -nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) -{ - struct nouveau_drm *drm = nouveau_drm(crtc->dev); - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - int ret; - - if (!crtc->fb) { - NV_DEBUG(drm, "No FB bound\n"); - return 0; - } - - ret = nv50_crtc_swap_fbs(crtc, old_fb); - if (ret) - return ret; - - nv50_display_flip_stop(crtc); - nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true); - nv50_display_flip_next(crtc, crtc->fb, NULL, 1); - return 0; -} - -static int -nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc, - struct drm_framebuffer *fb, int x, int y, - enum mode_set_atomic state) -{ - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - nv50_display_flip_stop(crtc); - nv50_crtc_set_image(nv_crtc, fb, x, y, true); - return 0; -} - -static void -nv50_crtc_lut_load(struct drm_crtc *crtc) -{ - struct nv50_disp *disp = nv50_disp(crtc->dev); - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); - int i; - - for (i = 0; i < 256; i++) { - u16 r = nv_crtc->lut.r[i] >> 2; - u16 g = nv_crtc->lut.g[i] >> 2; - u16 b = nv_crtc->lut.b[i] >> 2; - - if (nv_mclass(disp->core) < NVD0_DISP_CLASS) { - writew(r + 0x0000, lut + (i * 0x08) + 0); - writew(g + 0x0000, lut + (i * 0x08) + 2); - writew(b + 0x0000, lut + (i * 0x08) + 4); - } else { - writew(r + 0x6000, lut + (i * 0x20) + 0); - writew(g + 0x6000, lut + (i * 0x20) + 2); - writew(b + 0x6000, lut + (i * 0x20) + 4); - } - } -} - -static int -nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, - uint32_t handle, uint32_t width, uint32_t height) -{ - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct drm_gem_object *gem; - struct nouveau_bo *nvbo; - bool visible = (handle != 0); - int i, ret = 0; - - if (visible) { - if (width != 64 || height != 64) - return -EINVAL; - - gem = drm_gem_object_lookup(dev, file_priv, handle); - if (unlikely(!gem)) - return -ENOENT; - nvbo = nouveau_gem_object(gem); - - ret = nouveau_bo_map(nvbo); - if (ret == 0) { - for (i = 0; i < 64 * 64; i++) { - u32 v = nouveau_bo_rd32(nvbo, i); - nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v); - } - nouveau_bo_unmap(nvbo); - } - - drm_gem_object_unreference_unlocked(gem); - } - - if (visible != nv_crtc->cursor.visible) { - nv50_crtc_cursor_show_hide(nv_crtc, visible, true); - nv_crtc->cursor.visible = visible; - } - - return ret; -} - -static int -nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) -{ - struct nv50_curs *curs = nv50_curs(crtc); - struct nv50_chan *chan = nv50_chan(curs); - nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff)); - nv_wo32(chan->user, 0x0080, 0x00000000); - return 0; -} - -static void -nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size) -{ - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - u32 end = max(start + size, (u32)256); - u32 i; - - for (i = start; i < end; i++) { - nv_crtc->lut.r[i] = r[i]; - nv_crtc->lut.g[i] = g[i]; - nv_crtc->lut.b[i] = b[i]; - } - - nv50_crtc_lut_load(crtc); -} - -static void -nv50_crtc_destroy(struct drm_crtc *crtc) -{ - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - struct nv50_disp *disp = nv50_disp(crtc->dev); - struct nv50_head *head = nv50_head(crtc); - nv50_dmac_destroy(disp->core, &head->ovly.base); - nv50_pioc_destroy(disp->core, &head->oimm.base); - nv50_dmac_destroy(disp->core, &head->sync.base); - nv50_pioc_destroy(disp->core, &head->curs.base); - nouveau_bo_unmap(nv_crtc->cursor.nvbo); - if (nv_crtc->cursor.nvbo) - nouveau_bo_unpin(nv_crtc->cursor.nvbo); - nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); - nouveau_bo_unmap(nv_crtc->lut.nvbo); - if (nv_crtc->lut.nvbo) - nouveau_bo_unpin(nv_crtc->lut.nvbo); - nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); - drm_crtc_cleanup(crtc); - kfree(crtc); -} + if (type == DCB_OUTPUT_ANY) + goto ack; -static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = { - .dpms = nv50_crtc_dpms, - .prepare = nv50_crtc_prepare, - .commit = nv50_crtc_commit, - .mode_fixup = nv50_crtc_mode_fixup, - .mode_set = nv50_crtc_mode_set, - .mode_set_base = nv50_crtc_mode_set_base, - .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, - .load_lut = nv50_crtc_lut_load, -}; - -static const struct drm_crtc_funcs nv50_crtc_func = { - .cursor_set = nv50_crtc_cursor_set, - .cursor_move = nv50_crtc_cursor_move, - .gamma_set = nv50_crtc_gamma_set, - .set_config = drm_crtc_helper_set_config, - .destroy = nv50_crtc_destroy, - .page_flip = nouveau_crtc_page_flip, -}; - -static void -nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) -{ -} - -static void -nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) -{ -} - -static int -nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index) -{ - struct nv50_disp *disp = nv50_disp(dev); - struct nv50_head *head; - struct drm_crtc *crtc; - int ret, i; - - head = kzalloc(sizeof(*head), GFP_KERNEL); - if (!head) - return -ENOMEM; - - head->base.index = index; - head->base.set_dither = nv50_crtc_set_dither; - head->base.set_scale = nv50_crtc_set_scale; - head->base.set_color_vibrance = nv50_crtc_set_color_vibrance; - head->base.color_vibrance = 50; - head->base.vibrant_hue = 0; - head->base.cursor.set_offset = nv50_cursor_set_offset; - head->base.cursor.set_pos = nv50_cursor_set_pos; - for (i = 0; i < 256; i++) { - head->base.lut.r[i] = i << 8; - head->base.lut.g[i] = i << 8; - head->base.lut.b[i] = i << 8; + /* Enable the encoder */ + for (i = 0; i < drm->vbios.dcb.entries; i++) { + dcb = &drm->vbios.dcb.entry[i]; + if (dcb->type == type && (dcb->or & (1 << or))) + break; } - crtc = &head->base.base; - drm_crtc_init(dev, crtc, &nv50_crtc_func); - drm_crtc_helper_add(crtc, &nv50_crtc_hfunc); - drm_mode_crtc_set_gamma_size(crtc, 256); - - ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, - 0, 0x0000, NULL, &head->base.lut.nvbo); - if (!ret) { - ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM); - if (!ret) { - ret = nouveau_bo_map(head->base.lut.nvbo); - if (ret) - nouveau_bo_unpin(head->base.lut.nvbo); - } - if (ret) - nouveau_bo_ref(NULL, &head->base.lut.nvbo); + if (i == drm->vbios.dcb.entries) { + NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc); + goto ack; } - if (ret) - goto out; - - nv50_crtc_lut_load(crtc); + script = nv50_display_script_select(dev, dcb, mc, pclk); + nouveau_bios_run_display_table(dev, script, pclk, dcb, -1); - /* allocate cursor resources */ - ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index, - &(struct nv50_display_curs_class) { - .head = index, - }, sizeof(struct nv50_display_curs_class), - &head->curs.base); - if (ret) - goto out; - - ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM, - 0, 0x0000, NULL, &head->base.cursor.nvbo); - if (!ret) { - ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM); - if (!ret) { - ret = nouveau_bo_map(head->base.cursor.nvbo); - if (ret) - nouveau_bo_unpin(head->base.lut.nvbo); - } - if (ret) - nouveau_bo_ref(NULL, &head->base.cursor.nvbo); - } - - if (ret) - goto out; - - /* allocate page flip / sync resources */ - ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index, - &(struct nv50_display_sync_class) { - .pushbuf = EVO_PUSH_HANDLE(SYNC, index), - .head = index, - }, sizeof(struct nv50_display_sync_class), - disp->sync->bo.offset, &head->sync.base); - if (ret) - goto out; - - head->sync.sem.offset = EVO_SYNC(1 + index, 0x00); - - /* allocate overlay resources */ - ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index, - &(struct nv50_display_oimm_class) { - .head = index, - }, sizeof(struct nv50_display_oimm_class), - &head->oimm.base); - if (ret) - goto out; - - ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index, - &(struct nv50_display_ovly_class) { - .pushbuf = EVO_PUSH_HANDLE(OVLY, index), - .head = index, - }, sizeof(struct nv50_display_ovly_class), - disp->sync->bo.offset, &head->ovly.base); - if (ret) - goto out; - -out: - if (ret) - nv50_crtc_destroy(crtc); - return ret; -} - -/****************************************************************************** - * DAC - *****************************************************************************/ -static void -nv50_dac_dpms(struct drm_encoder *encoder, int mode) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nv50_disp *disp = nv50_disp(encoder->dev); - int or = nv_encoder->or; - u32 dpms_ctrl; - - dpms_ctrl = 0x00000000; - if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF) - dpms_ctrl |= 0x00000001; - if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF) - dpms_ctrl |= 0x00000004; - - nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl); -} - -static bool -nv50_dac_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_connector *nv_connector; - - nv_connector = nouveau_encoder_connector_get(nv_encoder); - if (nv_connector && nv_connector->native_mode) { - if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { - int id = adjusted_mode->base.id; - *adjusted_mode = *nv_connector->native_mode; - adjusted_mode->base.id = id; - } - } - - return true; -} - -static void -nv50_dac_commit(struct drm_encoder *encoder) -{ -} - -static void -nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct nv50_mast *mast = nv50_mast(encoder->dev); - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); - u32 *push; - - nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); - - push = evo_wait(mast, 8); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - u32 syncs = 0x00000000; - - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - syncs |= 0x00000001; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - syncs |= 0x00000002; - - evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2); - evo_data(push, 1 << nv_crtc->index); - evo_data(push, syncs); - } else { - u32 magic = 0x31ec6000 | (nv_crtc->index << 25); - u32 syncs = 0x00000001; - - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - syncs |= 0x00000008; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - syncs |= 0x00000010; - - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - magic |= 0x00000001; - - evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); - evo_data(push, syncs); - evo_data(push, magic); - evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1); - evo_data(push, 1 << nv_crtc->index); - } - - evo_kick(push, mast); + if (type == DCB_OUTPUT_DP) { + int link = !(dcb->dpconf.sor.link & 1); + if ((mc & 0x000f0000) == 0x00020000) + nv50_sor_dp_calc_tu(dev, or, link, pclk, 18); + else + nv50_sor_dp_calc_tu(dev, or, link, pclk, 24); } - nv_encoder->crtc = encoder->crtc; -} - -static void -nv50_dac_disconnect(struct drm_encoder *encoder) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nv50_mast *mast = nv50_mast(encoder->dev); - const int or = nv_encoder->or; - u32 *push; - - if (nv_encoder->crtc) { - nv50_crtc_prepare(nv_encoder->crtc); - - push = evo_wait(mast, 4); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0400 + (or * 0x080), 1); - evo_data(push, 0x00000000); - } else { - evo_mthd(push, 0x0180 + (or * 0x020), 1); - evo_data(push, 0x00000000); - } - - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); - evo_kick(push, mast); - } + if (dcb->type != DCB_OUTPUT_ANALOG) { + tmp = nv_rd32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or)); + tmp &= ~0x00000f0f; + if (script & 0x0100) + tmp |= 0x00000101; + nv_wr32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp); + } else { + nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0); } - nv_encoder->crtc = NULL; -} - -static enum drm_connector_status -nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) -{ - struct nv50_disp *disp = nv50_disp(encoder->dev); - int ret, or = nouveau_encoder(encoder)->or; - u32 load = 0; - - ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load)); - if (ret || load != 7) - return connector_status_disconnected; + disp->irq.dcb = dcb; + disp->irq.pclk = pclk; + disp->irq.script = script; - return connector_status_connected; +ack: + nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20); + nv_wr32(device, 0x610030, 0x80000000); } +/* If programming a TMDS output on a SOR that can also be configured for + * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off. + * + * It looks like the VBIOS TMDS scripts make an attempt at this, however, + * the VBIOS scripts on at least one board I have only switch it off on + * link 0, causing a blank display if the output has previously been + * programmed for DisplayPort. + */ static void -nv50_dac_destroy(struct drm_encoder *encoder) +nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_output *dcb) { - drm_encoder_cleanup(encoder); - kfree(encoder); -} - -static const struct drm_encoder_helper_funcs nv50_dac_hfunc = { - .dpms = nv50_dac_dpms, - .mode_fixup = nv50_dac_mode_fixup, - .prepare = nv50_dac_disconnect, - .commit = nv50_dac_commit, - .mode_set = nv50_dac_mode_set, - .disable = nv50_dac_disconnect, - .get_crtc = nv50_display_crtc_get, - .detect = nv50_dac_detect -}; - -static const struct drm_encoder_funcs nv50_dac_func = { - .destroy = nv50_dac_destroy, -}; - -static int -nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) -{ - struct drm_device *dev = connector->dev; - struct nouveau_encoder *nv_encoder; + struct nouveau_device *device = nouveau_dev(dev); + int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1); struct drm_encoder *encoder; + u32 tmp; - nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); - if (!nv_encoder) - return -ENOMEM; - nv_encoder->dcb = dcbe; - nv_encoder->or = ffs(dcbe->or) - 1; - - encoder = to_drm_encoder(nv_encoder); - encoder->possible_crtcs = dcbe->heads; - encoder->possible_clones = 0; - drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC); - drm_encoder_helper_add(encoder, &nv50_dac_hfunc); - - drm_mode_connector_attach_encoder(connector, encoder); - return 0; -} - -/****************************************************************************** - * Audio - *****************************************************************************/ -static void -nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_connector *nv_connector; - struct nv50_disp *disp = nv50_disp(encoder->dev); - - nv_connector = nouveau_encoder_connector_get(nv_encoder); - if (!drm_detect_monitor_audio(nv_connector->edid)) + if (dcb->type != DCB_OUTPUT_TMDS) return; - drm_edid_to_eld(&nv_connector->base, nv_connector->edid); - - nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, - nv_connector->base.eld, - nv_connector->base.eld[2] * 4); -} - -static void -nv50_audio_disconnect(struct drm_encoder *encoder) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nv50_disp *disp = nv50_disp(encoder->dev); - - nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0); -} - -/****************************************************************************** - * HDMI - *****************************************************************************/ -static void -nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); - struct nouveau_connector *nv_connector; - struct nv50_disp *disp = nv50_disp(encoder->dev); - const u32 moff = (nv_crtc->index << 3) | nv_encoder->or; - u32 rekey = 56; /* binary driver, and tegra constant */ - u32 max_ac_packet; - - nv_connector = nouveau_encoder_connector_get(nv_encoder); - if (!drm_detect_hdmi_monitor(nv_connector->edid)) - return; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - max_ac_packet = mode->htotal - mode->hdisplay; - max_ac_packet -= rekey; - max_ac_packet -= 18; /* constant from tegra */ - max_ac_packet /= 32; - - nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, - NV84_DISP_SOR_HDMI_PWR_STATE_ON | - (max_ac_packet << 16) | rekey); - - nv50_audio_mode_set(encoder, mode); -} - -static void -nv50_hdmi_disconnect(struct drm_encoder *encoder) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc); - struct nv50_disp *disp = nv50_disp(encoder->dev); - const u32 moff = (nv_crtc->index << 3) | nv_encoder->or; - - nv50_audio_disconnect(encoder); - - nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000); -} - -/****************************************************************************** - * SOR - *****************************************************************************/ -static void -nv50_sor_dpms(struct drm_encoder *encoder, int mode) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct drm_device *dev = encoder->dev; - struct nv50_disp *disp = nv50_disp(dev); - struct drm_encoder *partner; - int or = nv_encoder->or; - - nv_encoder->last_dpms = mode; - - list_for_each_entry(partner, &dev->mode_config.encoder_list, head) { - struct nouveau_encoder *nv_partner = nouveau_encoder(partner); - - if (partner->encoder_type != DRM_MODE_ENCODER_TMDS) - continue; - - if (nv_partner != nv_encoder && - nv_partner->dcb->or == nv_encoder->dcb->or) { - if (nv_partner->last_dpms == DRM_MODE_DPMS_ON) - return; + if (nv_encoder->dcb->type == DCB_OUTPUT_DP && + nv_encoder->dcb->or & (1 << or)) { + tmp = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)); + tmp &= ~NV50_SOR_DP_CTRL_ENABLED; + nv_wr32(device, NV50_SOR_DP_CTRL(or, link), tmp); break; } } - - nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON)); - - if (nv_encoder->dcb->type == DCB_OUTPUT_DP) - nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core); -} - -static bool -nv50_sor_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_connector *nv_connector; - - nv_connector = nouveau_encoder_connector_get(nv_encoder); - if (nv_connector && nv_connector->native_mode) { - if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { - int id = adjusted_mode->base.id; - *adjusted_mode = *nv_connector->native_mode; - adjusted_mode->base.id = id; - } - } - - return true; } static void -nv50_sor_disconnect(struct drm_encoder *encoder) +nv50_display_unk40_handler(struct drm_device *dev) { - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nv50_mast *mast = nv50_mast(encoder->dev); - const int or = nv_encoder->or; - u32 *push; - - if (nv_encoder->crtc) { - nv50_crtc_prepare(nv_encoder->crtc); - - push = evo_wait(mast, 4); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0600 + (or * 0x40), 1); - evo_data(push, 0x00000000); - } else { - evo_mthd(push, 0x0200 + (or * 0x20), 1); - evo_data(push, 0x00000000); - } - - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); - evo_kick(push, mast); - } - - nv50_hdmi_disconnect(encoder); - } + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv50_display *disp = nv50_display(dev); + struct dcb_output *dcb = disp->irq.dcb; + u16 script = disp->irq.script; + u32 unk30 = nv_rd32(device, 0x610030), pclk = disp->irq.pclk; - nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; - nv_encoder->crtc = NULL; -} + NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30); + disp->irq.dcb = NULL; + if (!dcb) + goto ack; -static void -nv50_sor_prepare(struct drm_encoder *encoder) -{ - nv50_sor_disconnect(encoder); - if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP) - evo_sync(encoder->dev); -} + nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1); + nv50_display_unk40_dp_set_tmds(dev, dcb); -static void -nv50_sor_commit(struct drm_encoder *encoder) -{ +ack: + nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40); + nv_wr32(device, 0x610030, 0x80000000); + nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) | 8); } static void -nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, - struct drm_display_mode *mode) +nv50_display_bh(unsigned long data) { - struct nv50_disp *disp = nv50_disp(encoder->dev); - struct nv50_mast *mast = nv50_mast(encoder->dev); - struct drm_device *dev = encoder->dev; + struct drm_device *dev = (struct drm_device *)data; + struct nouveau_device *device = nouveau_dev(dev); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); - struct nouveau_connector *nv_connector; - struct nvbios *bios = &drm->vbios; - u32 *push, lvds = 0; - u8 owner = 1 << nv_crtc->index; - u8 proto = 0xf; - u8 depth = 0x0; - - nv_connector = nouveau_encoder_connector_get(nv_encoder); - switch (nv_encoder->dcb->type) { - case DCB_OUTPUT_TMDS: - if (nv_encoder->dcb->sorconf.link & 1) { - if (mode->clock < 165000) - proto = 0x1; - else - proto = 0x5; - } else { - proto = 0x2; - } - nv50_hdmi_mode_set(encoder, mode); - break; - case DCB_OUTPUT_LVDS: - proto = 0x0; + for (;;) { + uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0); + uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1); - if (bios->fp_no_ddc) { - if (bios->fp.dual_link) - lvds |= 0x0100; - if (bios->fp.if_is_24bit) - lvds |= 0x0200; - } else { - if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { - if (((u8 *)nv_connector->edid)[121] == 2) - lvds |= 0x0100; - } else - if (mode->clock >= bios->fp.duallink_transition_clk) { - lvds |= 0x0100; - } - - if (lvds & 0x0100) { - if (bios->fp.strapless_is_24bit & 2) - lvds |= 0x0200; - } else { - if (bios->fp.strapless_is_24bit & 1) - lvds |= 0x0200; - } + NV_DEBUG(drm, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1); - if (nv_connector->base.display_info.bpc == 8) - lvds |= 0x0200; - } - - nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds); - break; - case DCB_OUTPUT_DP: - if (nv_connector->base.display_info.bpc == 6) { - nv_encoder->dp.datarate = mode->clock * 18 / 8; - depth = 0x2; - } else - if (nv_connector->base.display_info.bpc == 8) { - nv_encoder->dp.datarate = mode->clock * 24 / 8; - depth = 0x5; - } else { - nv_encoder->dp.datarate = mode->clock * 30 / 8; - depth = 0x6; - } - - if (nv_encoder->dcb->sorconf.link & 1) - proto = 0x8; + if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10) + nv50_display_unk10_handler(dev); else - proto = 0x9; - break; - default: - BUG_ON(1); - break; - } - - nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); - - push = evo_wait(nv50_mast(dev), 8); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_CLASS) { - evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1); - evo_data(push, (depth << 16) | (proto << 8) | owner); - } else { - u32 magic = 0x31ec6000 | (nv_crtc->index << 25); - u32 syncs = 0x00000001; - - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - syncs |= 0x00000008; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - syncs |= 0x00000010; - - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - magic |= 0x00000001; - - evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); - evo_data(push, syncs | (depth << 6)); - evo_data(push, magic); - evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1); - evo_data(push, owner | (proto << 8)); - } - - evo_kick(push, mast); + if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20) + nv50_display_unk20_handler(dev); + else + if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40) + nv50_display_unk40_handler(dev); + else + break; } - nv_encoder->crtc = encoder->crtc; + nv_wr32(device, NV03_PMC_INTR_EN_0, 1); } static void -nv50_sor_destroy(struct drm_encoder *encoder) +nv50_display_error_handler(struct drm_device *dev) { - drm_encoder_cleanup(encoder); - kfree(encoder); -} - -static const struct drm_encoder_helper_funcs nv50_sor_hfunc = { - .dpms = nv50_sor_dpms, - .mode_fixup = nv50_sor_mode_fixup, - .prepare = nv50_sor_prepare, - .commit = nv50_sor_commit, - .mode_set = nv50_sor_mode_set, - .disable = nv50_sor_disconnect, - .get_crtc = nv50_display_crtc_get, -}; - -static const struct drm_encoder_funcs nv50_sor_func = { - .destroy = nv50_sor_destroy, -}; - -static int -nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) -{ - struct drm_device *dev = connector->dev; - struct nouveau_encoder *nv_encoder; - struct drm_encoder *encoder; - - nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); - if (!nv_encoder) - return -ENOMEM; - nv_encoder->dcb = dcbe; - nv_encoder->or = ffs(dcbe->or) - 1; - nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; - - encoder = to_drm_encoder(nv_encoder); - encoder->possible_crtcs = dcbe->heads; - encoder->possible_clones = 0; - drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(encoder, &nv50_sor_hfunc); + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + u32 channels = (nv_rd32(device, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16; + u32 addr, data; + int chid; - drm_mode_connector_attach_encoder(connector, encoder); - return 0; -} + for (chid = 0; chid < 5; chid++) { + if (!(channels & (1 << chid))) + continue; -/****************************************************************************** - * Init - *****************************************************************************/ -void -nv50_display_fini(struct drm_device *dev) -{ -} + nv_wr32(device, NV50_PDISPLAY_INTR_0, 0x00010000 << chid); + addr = nv_rd32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid)); + data = nv_rd32(device, NV50_PDISPLAY_TRAPPED_DATA(chid)); + NV_ERROR(drm, "EvoCh %d Mthd 0x%04x Data 0x%08x " + "(0x%04x 0x%02x)\n", chid, + addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); -int -nv50_display_init(struct drm_device *dev) -{ - u32 *push = evo_wait(nv50_mast(dev), 32); - if (push) { - evo_mthd(push, 0x0088, 1); - evo_data(push, NvEvoSync); - evo_kick(push, nv50_mast(dev)); - return evo_sync(dev); + nv_wr32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000); } - - return -EBUSY; } void -nv50_display_destroy(struct drm_device *dev) -{ - struct nv50_disp *disp = nv50_disp(dev); - - nv50_dmac_destroy(disp->core, &disp->mast.base); - - nouveau_bo_unmap(disp->sync); - if (disp->sync) - nouveau_bo_unpin(disp->sync); - nouveau_bo_ref(NULL, &disp->sync); - - nouveau_display(dev)->priv = NULL; - kfree(disp); -} - -int -nv50_display_create(struct drm_device *dev) +nv50_display_intr(struct drm_device *dev) { - static const u16 oclass[] = { - NVE0_DISP_CLASS, - NVD0_DISP_CLASS, - NVA3_DISP_CLASS, - NV94_DISP_CLASS, - NVA0_DISP_CLASS, - NV84_DISP_CLASS, - NV50_DISP_CLASS, - }; struct nouveau_device *device = nouveau_dev(dev); struct nouveau_drm *drm = nouveau_drm(dev); - struct dcb_table *dcb = &drm->vbios.dcb; - struct drm_connector *connector, *tmp; - struct nv50_disp *disp; - struct dcb_output *dcbe; - int crtcs, ret, i; + struct nv50_display *disp = nv50_display(dev); + uint32_t delayed = 0; - disp = kzalloc(sizeof(*disp), GFP_KERNEL); - if (!disp) - return -ENOMEM; + while (nv_rd32(device, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { + uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0); + uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1); + uint32_t clock; - nouveau_display(dev)->priv = disp; - nouveau_display(dev)->dtor = nv50_display_destroy; - nouveau_display(dev)->init = nv50_display_init; - nouveau_display(dev)->fini = nv50_display_fini; + NV_DEBUG(drm, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1); - /* small shared memory area we use for notifiers and semaphores */ - ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, - 0, 0x0000, NULL, &disp->sync); - if (!ret) { - ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM); - if (!ret) { - ret = nouveau_bo_map(disp->sync); - if (ret) - nouveau_bo_unpin(disp->sync); - } - if (ret) - nouveau_bo_ref(NULL, &disp->sync); - } - - if (ret) - goto out; - - /* attempt to allocate a supported evo display class */ - ret = -ENODEV; - for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) { - ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, - 0xd1500000, oclass[i], NULL, 0, - &disp->core); - } - - if (ret) - goto out; - - /* allocate master evo channel */ - ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0, - &(struct nv50_display_mast_class) { - .pushbuf = EVO_PUSH_HANDLE(MAST, 0), - }, sizeof(struct nv50_display_mast_class), - disp->sync->bo.offset, &disp->mast.base); - if (ret) - goto out; - - /* create crtc objects to represent the hw heads */ - if (nv_mclass(disp->core) >= NVD0_DISP_CLASS) - crtcs = nv_rd32(device, 0x022448); - else - crtcs = 2; - - for (i = 0; i < crtcs; i++) { - ret = nv50_crtc_create(dev, disp->core, i); - if (ret) - goto out; - } + if (!intr0 && !(intr1 & ~delayed)) + break; - /* create encoder/connector objects based on VBIOS DCB table */ - for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) { - connector = nouveau_connector_create(dev, dcbe->connector); - if (IS_ERR(connector)) - continue; + if (intr0 & 0x001f0000) { + nv50_display_error_handler(dev); + intr0 &= ~0x001f0000; + } - if (dcbe->location != DCB_LOC_ON_CHIP) { - NV_WARN(drm, "skipping off-chip encoder %d/%d\n", - dcbe->type, ffs(dcbe->or) - 1); - continue; + if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { + intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC; + delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC; } - switch (dcbe->type) { - case DCB_OUTPUT_TMDS: - case DCB_OUTPUT_LVDS: - case DCB_OUTPUT_DP: - nv50_sor_create(connector, dcbe); - break; - case DCB_OUTPUT_ANALOG: - nv50_dac_create(connector, dcbe); - break; - default: - NV_WARN(drm, "skipping unsupported encoder %d/%d\n", - dcbe->type, ffs(dcbe->or) - 1); - continue; + clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 | + NV50_PDISPLAY_INTR_1_CLK_UNK20 | + NV50_PDISPLAY_INTR_1_CLK_UNK40)); + if (clock) { + nv_wr32(device, NV03_PMC_INTR_EN_0, 0); + tasklet_schedule(&disp->tasklet); + delayed |= clock; + intr1 &= ~clock; } - } - /* cull any connectors we created that don't have an encoder */ - list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { - if (connector->encoder_ids[0]) - continue; + if (intr0) { + NV_ERROR(drm, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0); + nv_wr32(device, NV50_PDISPLAY_INTR_0, intr0); + } - NV_WARN(drm, "%s has no encoders, removing\n", - drm_get_connector_name(connector)); - connector->funcs->destroy(connector); + if (intr1) { + NV_ERROR(drm, + "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1); + nv_wr32(device, NV50_PDISPLAY_INTR_1, intr1); + } } - -out: - if (ret) - nv50_display_destroy(dev); - return ret; } diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_display.h b/trunk/drivers/gpu/drm/nouveau/nv50_display.h index 70da347aa8c5..973554d8a7a6 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_display.h +++ b/trunk/drivers/gpu/drm/nouveau/nv50_display.h @@ -30,16 +30,77 @@ #include "nouveau_display.h" #include "nouveau_crtc.h" #include "nouveau_reg.h" +#include "nv50_evo.h" -int nv50_display_create(struct drm_device *); -void nv50_display_destroy(struct drm_device *); -int nv50_display_init(struct drm_device *); -void nv50_display_fini(struct drm_device *); +struct nv50_display_crtc { + struct nouveau_channel *sync; + struct { + struct nouveau_bo *bo; + u32 offset; + u16 value; + } sem; +}; -void nv50_display_flip_stop(struct drm_crtc *); +struct nv50_display { + struct nouveau_channel *master; + + struct nouveau_gpuobj *ramin; + u32 dmao; + u32 hash; + + struct nv50_display_crtc crtc[2]; + + struct tasklet_struct tasklet; + struct { + struct dcb_output *dcb; + u16 script; + u32 pclk; + } irq; +}; + +static inline struct nv50_display * +nv50_display(struct drm_device *dev) +{ + return nouveau_display(dev)->priv; +} + +int nv50_display_early_init(struct drm_device *dev); +void nv50_display_late_takedown(struct drm_device *dev); +int nv50_display_create(struct drm_device *dev); +int nv50_display_init(struct drm_device *dev); +void nv50_display_fini(struct drm_device *dev); +void nv50_display_destroy(struct drm_device *dev); +void nv50_display_intr(struct drm_device *); +int nv50_crtc_blank(struct nouveau_crtc *, bool blank); +int nv50_crtc_set_clock(struct drm_device *, int head, int pclk); + +u32 nv50_display_active_crtcs(struct drm_device *); + +int nv50_display_sync(struct drm_device *); int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *, + struct nouveau_channel *chan); +void nv50_display_flip_stop(struct drm_crtc *); + +int nv50_evo_create(struct drm_device *dev); +void nv50_evo_destroy(struct drm_device *dev); +int nv50_evo_init(struct drm_device *dev); +void nv50_evo_fini(struct drm_device *dev); +void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base, + u64 size); +int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype, + u64 base, u64 size, struct nouveau_gpuobj **); + +int nvd0_display_create(struct drm_device *); +void nvd0_display_destroy(struct drm_device *); +int nvd0_display_init(struct drm_device *); +void nvd0_display_fini(struct drm_device *); +void nvd0_display_intr(struct drm_device *); + +void nvd0_display_flip_stop(struct drm_crtc *); +int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *, struct nouveau_channel *, u32 swap_interval); struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head); +struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int head); #endif /* __NV50_DISPLAY_H__ */ diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_evo.c b/trunk/drivers/gpu/drm/nouveau/nv50_evo.c new file mode 100644 index 000000000000..9f6f55cdfa77 --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nv50_evo.c @@ -0,0 +1,403 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nv50_display.h" + +#include + +#include +#include + +static u32 +nv50_evo_rd32(struct nouveau_object *object, u32 addr) +{ + void __iomem *iomem = object->oclass->ofuncs->rd08; + return ioread32_native(iomem + addr); +} + +static void +nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data) +{ + void __iomem *iomem = object->oclass->ofuncs->rd08; + iowrite32_native(data, iomem + addr); +} + +static void +nv50_evo_channel_del(struct nouveau_channel **pevo) +{ + struct nouveau_channel *evo = *pevo; + + if (!evo) + return; + *pevo = NULL; + + nouveau_bo_unmap(evo->push.buffer); + nouveau_bo_ref(NULL, &evo->push.buffer); + + if (evo->object) + iounmap(evo->object->oclass->ofuncs); + + kfree(evo); +} + +int +nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype, + u64 base, u64 size, struct nouveau_gpuobj **pobj) +{ + struct drm_device *dev = evo->fence; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv50_display *disp = nv50_display(dev); + u32 dmao = disp->dmao; + u32 hash = disp->hash; + u32 flags5; + + if (nv_device(drm->device)->chipset < 0xc0) { + /* not supported on 0x50, specified in format mthd */ + if (nv_device(drm->device)->chipset == 0x50) + memtype = 0; + flags5 = 0x00010000; + } else { + if (memtype & 0x80000000) + flags5 = 0x00000000; /* large pages */ + else + flags5 = 0x00020000; + } + + nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22)); + nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1)); + nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base)); + nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 | + upper_32_bits(base)); + nv_wo32(disp->ramin, dmao + 0x10, 0x00000000); + nv_wo32(disp->ramin, dmao + 0x14, flags5); + + nv_wo32(disp->ramin, hash + 0x00, handle); + nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) | + evo->handle); + + disp->dmao += 0x20; + disp->hash += 0x08; + return 0; +} + +static int +nv50_evo_channel_new(struct drm_device *dev, int chid, + struct nouveau_channel **pevo) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv50_display *disp = nv50_display(dev); + struct nouveau_channel *evo; + int ret; + + evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); + if (!evo) + return -ENOMEM; + *pevo = evo; + + evo->drm = drm; + evo->handle = chid; + evo->fence = dev; + evo->user_get = 4; + evo->user_put = 0; + + ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL, + &evo->push.buffer); + if (ret == 0) + ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM); + if (ret) { + NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret); + nv50_evo_channel_del(pevo); + return ret; + } + + ret = nouveau_bo_map(evo->push.buffer); + if (ret) { + NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret); + nv50_evo_channel_del(pevo); + return ret; + } + + evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL); +#ifdef NOUVEAU_OBJECT_MAGIC + evo->object->_magic = NOUVEAU_OBJECT_MAGIC; +#endif + evo->object->parent = nv_object(disp->ramin)->parent; + evo->object->engine = nv_object(disp->ramin)->engine; + evo->object->oclass = + kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL); + evo->object->oclass->ofuncs = + kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL); + evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32; + evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32; + evo->object->oclass->ofuncs->rd08 = + ioremap(pci_resource_start(dev->pdev, 0) + + NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE); + return 0; +} + +static int +nv50_evo_channel_init(struct nouveau_channel *evo) +{ + struct nouveau_drm *drm = evo->drm; + struct nouveau_device *device = nv_device(drm->device); + int id = evo->handle, ret, i; + u64 pushbuf = evo->push.buffer->bo.offset; + u32 tmp; + + tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)); + if ((tmp & 0x009f0000) == 0x00020000) + nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000); + + tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)); + if ((tmp & 0x003f0000) == 0x00030000) + nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000); + + /* initialise fifo */ + nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 | + NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | + NV50_PDISPLAY_EVO_DMA_CB_VALID); + nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000); + nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id); + nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA, + NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); + + nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000); + nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 | + NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); + if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) { + NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id, + nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id))); + return -EBUSY; + } + + /* enable error reporting on the channel */ + nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id); + + evo->dma.max = (4096/4) - 2; + evo->dma.max &= ~7; + evo->dma.put = 0; + evo->dma.cur = evo->dma.put; + evo->dma.free = evo->dma.max - evo->dma.cur; + + ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); + if (ret) + return ret; + + for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) + OUT_RING(evo, 0); + + return 0; +} + +static void +nv50_evo_channel_fini(struct nouveau_channel *evo) +{ + struct nouveau_drm *drm = evo->drm; + struct nouveau_device *device = nv_device(drm->device); + int id = evo->handle; + + nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000); + nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000); + nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id)); + nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000); + if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) { + NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id, + nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id))); + } +} + +void +nv50_evo_destroy(struct drm_device *dev) +{ + struct nv50_display *disp = nv50_display(dev); + int i; + + for (i = 0; i < 2; i++) { + if (disp->crtc[i].sem.bo) { + nouveau_bo_unmap(disp->crtc[i].sem.bo); + nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo); + } + nv50_evo_channel_del(&disp->crtc[i].sync); + } + nv50_evo_channel_del(&disp->master); + nouveau_gpuobj_ref(NULL, &disp->ramin); +} + +int +nv50_evo_create(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nv50_display *disp = nv50_display(dev); + struct nouveau_channel *evo; + int ret, i, j; + + /* setup object management on it, any other evo channel will + * use this also as there's no per-channel support on the + * hardware + */ + ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536, + NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin); + if (ret) { + NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret); + goto err; + } + + disp->hash = 0x0000; + disp->dmao = 0x1000; + + /* create primary evo channel, the one we use for modesetting + * purporses + */ + ret = nv50_evo_channel_new(dev, 0, &disp->master); + if (ret) + return ret; + evo = disp->master; + + ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000, + disp->ramin->addr + 0x2000, 0x1000, NULL); + if (ret) + goto err; + + /* create some default objects for the scanout memtypes we support */ + ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000, + 0, pfb->ram.size, NULL); + if (ret) + goto err; + + ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000, + 0, pfb->ram.size, NULL); + if (ret) + goto err; + + ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 | + (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe), + 0, pfb->ram.size, NULL); + if (ret) + goto err; + + ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 | + (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe), + 0, pfb->ram.size, NULL); + if (ret) + goto err; + + /* create "display sync" channels and other structures we need + * to implement page flipping + */ + for (i = 0; i < 2; i++) { + struct nv50_display_crtc *dispc = &disp->crtc[i]; + u64 offset; + + ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync); + if (ret) + goto err; + + ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &dispc->sem.bo); + if (!ret) { + ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(dispc->sem.bo); + if (ret) + nouveau_bo_ref(NULL, &dispc->sem.bo); + offset = dispc->sem.bo->bo.offset; + } + + if (ret) + goto err; + + ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000, + offset, 4096, NULL); + if (ret) + goto err; + + ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000, + 0, pfb->ram.size, NULL); + if (ret) + goto err; + + ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 | + (nv_device(drm->device)->chipset < 0xc0 ? + 0x7a : 0xfe), + 0, pfb->ram.size, NULL); + if (ret) + goto err; + + ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 | + (nv_device(drm->device)->chipset < 0xc0 ? + 0x70 : 0xfe), + 0, pfb->ram.size, NULL); + if (ret) + goto err; + + for (j = 0; j < 4096; j += 4) + nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000); + dispc->sem.offset = 0; + } + + return 0; + +err: + nv50_evo_destroy(dev); + return ret; +} + +int +nv50_evo_init(struct drm_device *dev) +{ + struct nv50_display *disp = nv50_display(dev); + int ret, i; + + ret = nv50_evo_channel_init(disp->master); + if (ret) + return ret; + + for (i = 0; i < 2; i++) { + ret = nv50_evo_channel_init(disp->crtc[i].sync); + if (ret) + return ret; + } + + return 0; +} + +void +nv50_evo_fini(struct drm_device *dev) +{ + struct nv50_display *disp = nv50_display(dev); + int i; + + for (i = 0; i < 2; i++) { + if (disp->crtc[i].sync) + nv50_evo_channel_fini(disp->crtc[i].sync); + } + + if (disp->master) + nv50_evo_channel_fini(disp->master); +} diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_evo.h b/trunk/drivers/gpu/drm/nouveau/nv50_evo.h new file mode 100644 index 000000000000..771d879bc834 --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nv50_evo.h @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NV50_EVO_H__ +#define __NV50_EVO_H__ + +#define NV50_EVO_UPDATE 0x00000080 +#define NV50_EVO_UNK84 0x00000084 +#define NV50_EVO_UNK84_NOTIFY 0x40000000 +#define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000 +#define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000 +#define NV50_EVO_DMA_NOTIFY 0x00000088 +#define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff +#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000 +#define NV50_EVO_UNK8C 0x0000008C + +#define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r) +#define NV50_EVO_DAC_MODE_CTRL 0x00000400 +#define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001 +#define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002 +#define NV50_EVO_DAC_MODE_CTRL2 0x00000404 +#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001 +#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002 + +#define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r) +#define NV50_EVO_SOR_MODE_CTRL 0x00000600 +#define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001 +#define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002 +#define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100 +#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400 +#define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000 +#define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000 + +#define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r) +#define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r) +#define NV50_EVO_CRTC_UNK0800 0x00000800 +#define NV50_EVO_CRTC_CLOCK 0x00000804 +#define NV50_EVO_CRTC_INTERLACE 0x00000808 +#define NV50_EVO_CRTC_DISPLAY_START 0x00000810 +#define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814 +#define NV50_EVO_CRTC_SYNC_DURATION 0x00000818 +#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c +#define NV50_EVO_CRTC_UNK0820 0x00000820 +#define NV50_EVO_CRTC_UNK0824 0x00000824 +#define NV50_EVO_CRTC_UNK082C 0x0000082c +#define NV50_EVO_CRTC_CLUT_MODE 0x00000840 +/* You can't have a palette in 8 bit mode (=OFF) */ +#define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000 +#define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000 +#define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000 +#define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844 +#define NV84_EVO_CRTC_CLUT_DMA 0x0000085C +#define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff +#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000 +#define NV50_EVO_CRTC_FB_OFFSET 0x00000860 +#define NV50_EVO_CRTC_FB_SIZE 0x00000868 +#define NV50_EVO_CRTC_FB_CONFIG 0x0000086c +#define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000 +#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000 +#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000 +#define NV50_EVO_CRTC_FB_DEPTH 0x00000870 +#define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00 +#define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900 +#define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800 +#define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00 +#define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100 +#define NV50_EVO_CRTC_FB_DMA 0x00000874 +#define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff +#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000 +#define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880 +#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000 +#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000 +#define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884 +#define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c +#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff +#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000 +#define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0 +#define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000 +#define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011 +#define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4 +#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000 +#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009 +#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8 +#define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE 0x000fff00 +#define NV50_EVO_CRTC_COLOR_CTRL_HUE 0xfff00000 +#define NV50_EVO_CRTC_FB_POS 0x000008c0 +#define NV50_EVO_CRTC_REAL_RES 0x000008c8 +#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4 +#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \ + ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF)) +/* Both of these are needed, otherwise nothing happens. */ +#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8 +#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc +#define NV50_EVO_CRTC_UNK900 0x00000900 +#define NV50_EVO_CRTC_UNK904 0x00000904 + +#endif diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_fence.c b/trunk/drivers/gpu/drm/nouveau/nv50_fence.c index c20f2727ea0b..e0763ea88ee2 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_fence.c @@ -110,11 +110,8 @@ nv50_fence_create(struct nouveau_drm *drm) 0, 0x0000, NULL, &priv->bo); if (!ret) { ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); - if (!ret) { + if (!ret) ret = nouveau_bo_map(priv->bo); - if (ret) - nouveau_bo_unpin(priv->bo); - } if (ret) nouveau_bo_ref(NULL, &priv->bo); } diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_pm.c b/trunk/drivers/gpu/drm/nouveau/nv50_pm.c index 8bd5d2781baf..c4a65039b1ca 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_pm.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_pm.c @@ -546,7 +546,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl, { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_device *device = nouveau_dev(dev); - u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */ + u32 crtc_mask = nv50_display_active_crtcs(dev); struct nouveau_mem_exec_func exec = { .dev = dev, .precharge = mclk_precharge, diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_sor.c b/trunk/drivers/gpu/drm/nouveau/nv50_sor.c new file mode 100644 index 000000000000..b562b59e1326 --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nv50_sor.c @@ -0,0 +1,530 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include + +#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) +#include "nouveau_reg.h" +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "nv50_display.h" + +#include + +static u32 +nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */ + static const u8 nv50[] = { 16, 8, 0, 24 }; + if (nv_device(drm->device)->chipset == 0xaf) + return nvaf[lane]; + return nv50[lane]; +} + +static void +nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern) +{ + struct nouveau_device *device = nouveau_dev(dev); + u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); + nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24); +} + +static void +nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb, + u8 lane, u8 swing, u8 preem) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); + u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane); + u32 mask = 0x000000ff << shift; + u8 *table, *entry, *config; + + table = nouveau_dp_bios_data(dev, dcb, &entry); + if (!table || (table[0] != 0x20 && table[0] != 0x21)) { + NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n"); + return; + } + + config = entry + table[4]; + while (config[0] != swing || config[1] != preem) { + config += table[5]; + if (config >= entry + table[4] + entry[4] * table[5]) + return; + } + + nv_mask(device, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift); + nv_mask(device, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift); + nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8); +} + +static void +nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc, + int link_nr, u32 link_bw, bool enhframe) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); + u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000; + u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)) & ~0x000c0000; + u8 *table, *entry, mask; + int i; + + table = nouveau_dp_bios_data(dev, dcb, &entry); + if (!table || (table[0] != 0x20 && table[0] != 0x21)) { + NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n"); + return; + } + + entry = ROMPTR(dev, entry[10]); + if (entry) { + while (link_bw < ROM16(entry[0]) * 10) + entry += 4; + + nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc); + } + + dpctrl |= ((1 << link_nr) - 1) << 16; + if (enhframe) + dpctrl |= 0x00004000; + + if (link_bw > 162000) + clksor |= 0x00040000; + + nv_wr32(device, 0x614300 + (or * 0x800), clksor); + nv_wr32(device, NV50_SOR_DP_CTRL(or, link), dpctrl); + + mask = 0; + for (i = 0; i < link_nr; i++) + mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3); + nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask); +} + +static void +nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw) +{ + struct nouveau_device *device = nouveau_dev(dev); + u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000; + u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)); + if (clksor & 0x000c0000) + *bw = 270000; + else + *bw = 162000; + + if (dpctrl > 0x00030000) *nr = 4; + else if (dpctrl > 0x00010000) *nr = 2; + else *nr = 1; +} + +void +nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + const u32 symbol = 100000; + int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; + int TU, VTUi, VTUf, VTUa; + u64 link_data_rate, link_ratio, unk; + u32 best_diff = 64 * symbol; + u32 link_nr, link_bw, r; + + /* calculate packed data rate for each lane */ + nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw); + link_data_rate = (clk * bpp / 8) / link_nr; + + /* calculate ratio of packed data rate to link symbol rate */ + link_ratio = link_data_rate * symbol; + r = do_div(link_ratio, link_bw); + + for (TU = 64; TU >= 32; TU--) { + /* calculate average number of valid symbols in each TU */ + u32 tu_valid = link_ratio * TU; + u32 calc, diff; + + /* find a hw representation for the fraction.. */ + VTUi = tu_valid / symbol; + calc = VTUi * symbol; + diff = tu_valid - calc; + if (diff) { + if (diff >= (symbol / 2)) { + VTUf = symbol / (symbol - diff); + if (symbol - (VTUf * diff)) + VTUf++; + + if (VTUf <= 15) { + VTUa = 1; + calc += symbol - (symbol / VTUf); + } else { + VTUa = 0; + VTUf = 1; + calc += symbol; + } + } else { + VTUa = 0; + VTUf = min((int)(symbol / diff), 15); + calc += symbol / VTUf; + } + + diff = calc - tu_valid; + } else { + /* no remainder, but the hw doesn't like the fractional + * part to be zero. decrement the integer part and + * have the fraction add a whole symbol back + */ + VTUa = 0; + VTUf = 1; + VTUi--; + } + + if (diff < best_diff) { + best_diff = diff; + bestTU = TU; + bestVTUa = VTUa; + bestVTUf = VTUf; + bestVTUi = VTUi; + if (diff == 0) + break; + } + } + + if (!bestTU) { + NV_ERROR(drm, "DP: unable to find suitable config\n"); + return; + } + + /* XXX close to vbios numbers, but not right */ + unk = (symbol - link_ratio) * bestTU; + unk *= link_ratio; + r = do_div(unk, symbol); + r = do_div(unk, symbol); + unk += 6; + + nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2); + nv_mask(device, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 | + bestVTUf << 16 | + bestVTUi << 8 | + unk); +} +static void +nv50_sor_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct drm_device *dev = encoder->dev; + struct nouveau_channel *evo = nv50_display(dev)->master; + int ret; + + if (!nv_encoder->crtc) + return; + nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); + + NV_DEBUG(drm, "Disconnecting SOR %d\n", nv_encoder->or); + + ret = RING_SPACE(evo, 4); + if (ret) { + NV_ERROR(drm, "no space while disconnecting SOR\n"); + return; + } + BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); + OUT_RING (evo, 0); + BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING (evo, 0); + + nouveau_hdmi_mode_set(encoder, NULL); + + nv_encoder->crtc = NULL; + nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; +} + +static void +nv50_sor_dpms(struct drm_encoder *encoder, int mode) +{ + struct nouveau_device *device = nouveau_dev(encoder->dev); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_encoder *enc; + uint32_t val; + int or = nv_encoder->or; + + NV_DEBUG(drm, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode); + + nv_encoder->last_dpms = mode; + list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nvenc = nouveau_encoder(enc); + + if (nvenc == nv_encoder || + (nvenc->dcb->type != DCB_OUTPUT_TMDS && + nvenc->dcb->type != DCB_OUTPUT_LVDS && + nvenc->dcb->type != DCB_OUTPUT_DP) || + nvenc->dcb->or != nv_encoder->dcb->or) + continue; + + if (nvenc->last_dpms == DRM_MODE_DPMS_ON) + return; + } + + /* wait for it to be done */ + if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), + NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { + NV_ERROR(drm, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or); + NV_ERROR(drm, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or, + nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or))); + } + + val = nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or)); + + if (mode == DRM_MODE_DPMS_ON) + val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON; + else + val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON; + + nv_wr32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val | + NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING); + if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(or), + NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { + NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or); + NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", or, + nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(or))); + } + + if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { + struct dp_train_func func = { + .link_set = nv50_sor_dp_link_set, + .train_set = nv50_sor_dp_train_set, + .train_adj = nv50_sor_dp_train_adj + }; + + nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func); + } +} + +static void +nv50_sor_save(struct drm_encoder *encoder) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + NV_ERROR(drm, "!!\n"); +} + +static void +nv50_sor_restore(struct drm_encoder *encoder) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + NV_ERROR(drm, "!!\n"); +} + +static bool +nv50_sor_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *connector; + + NV_DEBUG(drm, "or %d\n", nv_encoder->or); + + connector = nouveau_encoder_connector_get(nv_encoder); + if (!connector) { + NV_ERROR(drm, "Encoder has no connector\n"); + return false; + } + + if (connector->scaling_mode != DRM_MODE_SCALE_NONE && + connector->native_mode) + drm_mode_copy(adjusted_mode, connector->native_mode); + + return true; +} + +static void +nv50_sor_prepare(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + nv50_sor_disconnect(encoder); + if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { + /* avoid race between link training and supervisor intr */ + nv50_display_sync(encoder->dev); + } +} + +static void +nv50_sor_commit(struct drm_encoder *encoder) +{ +} + +static void +nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, + struct drm_display_mode *mode) +{ + struct nouveau_channel *evo = nv50_display(encoder->dev)->master; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); + struct nouveau_connector *nv_connector; + uint32_t mode_ctl = 0; + int ret; + + NV_DEBUG(drm, "or %d type %d -> crtc %d\n", + nv_encoder->or, nv_encoder->dcb->type, crtc->index); + nv_encoder->crtc = encoder->crtc; + + switch (nv_encoder->dcb->type) { + case DCB_OUTPUT_TMDS: + if (nv_encoder->dcb->sorconf.link & 1) { + if (mode->clock < 165000) + mode_ctl = 0x0100; + else + mode_ctl = 0x0500; + } else + mode_ctl = 0x0200; + + nouveau_hdmi_mode_set(encoder, mode); + break; + case DCB_OUTPUT_DP: + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (nv_connector && nv_connector->base.display_info.bpc == 6) { + nv_encoder->dp.datarate = mode->clock * 18 / 8; + mode_ctl |= 0x00020000; + } else { + nv_encoder->dp.datarate = mode->clock * 24 / 8; + mode_ctl |= 0x00050000; + } + + if (nv_encoder->dcb->sorconf.link & 1) + mode_ctl |= 0x00000800; + else + mode_ctl |= 0x00000900; + break; + default: + break; + } + + if (crtc->index == 1) + mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1; + else + mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0; + + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC; + + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC; + + nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); + + ret = RING_SPACE(evo, 2); + if (ret) { + NV_ERROR(drm, "no space while connecting SOR\n"); + nv_encoder->crtc = NULL; + return; + } + BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); + OUT_RING(evo, mode_ctl); +} + +static struct drm_crtc * +nv50_sor_crtc_get(struct drm_encoder *encoder) +{ + return nouveau_encoder(encoder)->crtc; +} + +static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = { + .dpms = nv50_sor_dpms, + .save = nv50_sor_save, + .restore = nv50_sor_restore, + .mode_fixup = nv50_sor_mode_fixup, + .prepare = nv50_sor_prepare, + .commit = nv50_sor_commit, + .mode_set = nv50_sor_mode_set, + .get_crtc = nv50_sor_crtc_get, + .detect = NULL, + .disable = nv50_sor_disconnect +}; + +static void +nv50_sor_destroy(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + + NV_DEBUG(drm, "\n"); + + drm_encoder_cleanup(encoder); + + kfree(nv_encoder); +} + +static const struct drm_encoder_funcs nv50_sor_encoder_funcs = { + .destroy = nv50_sor_destroy, +}; + +int +nv50_sor_create(struct drm_connector *connector, struct dcb_output *entry) +{ + struct nouveau_encoder *nv_encoder = NULL; + struct drm_device *dev = connector->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct drm_encoder *encoder; + int type; + + NV_DEBUG(drm, "\n"); + + switch (entry->type) { + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_DP: + type = DRM_MODE_ENCODER_TMDS; + break; + case DCB_OUTPUT_LVDS: + type = DRM_MODE_ENCODER_LVDS; + break; + default: + return -EINVAL; + } + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + encoder = to_drm_encoder(nv_encoder); + + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; + nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type); + drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + + drm_mode_connector_attach_encoder(connector, encoder); + return 0; +} diff --git a/trunk/drivers/gpu/drm/nouveau/nvc0_fence.c b/trunk/drivers/gpu/drm/nouveau/nvc0_fence.c index 2a56b1b551cb..53299eac9676 100644 --- a/trunk/drivers/gpu/drm/nouveau/nvc0_fence.c +++ b/trunk/drivers/gpu/drm/nouveau/nvc0_fence.c @@ -114,9 +114,17 @@ nvc0_fence_context_del(struct nouveau_channel *chan) struct nvc0_fence_chan *fctx = chan->fence; int i; - for (i = 0; i < dev->mode_config.num_crtc; i++) { - struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); - nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]); + if (nv_device(chan->drm->device)->card_type >= NV_D0) { + for (i = 0; i < dev->mode_config.num_crtc; i++) { + struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i); + nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]); + } + } else + if (nv_device(chan->drm->device)->card_type >= NV_50) { + for (i = 0; i < dev->mode_config.num_crtc; i++) { + struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); + nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]); + } } nouveau_bo_vma_del(priv->bo, &fctx->vma); @@ -146,7 +154,12 @@ nvc0_fence_context_new(struct nouveau_channel *chan) /* map display semaphore buffers into channel's vm */ for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { - struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i); + struct nouveau_bo *bo; + if (nv_device(chan->drm->device)->card_type >= NV_D0) + bo = nvd0_display_crtc_sema(chan->drm->dev, i); + else + bo = nv50_display_crtc_sema(chan->drm->dev, i); + ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]); } @@ -190,8 +203,6 @@ nvc0_fence_destroy(struct nouveau_drm *drm) { struct nvc0_fence_priv *priv = drm->fence; nouveau_bo_unmap(priv->bo); - if (priv->bo) - nouveau_bo_unpin(priv->bo); nouveau_bo_ref(NULL, &priv->bo); drm->fence = NULL; kfree(priv); @@ -221,11 +232,8 @@ nvc0_fence_create(struct nouveau_drm *drm) TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo); if (ret == 0) { ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); - if (ret == 0) { + if (ret == 0) ret = nouveau_bo_map(priv->bo); - if (ret) - nouveau_bo_unpin(priv->bo); - } if (ret) nouveau_bo_ref(NULL, &priv->bo); } diff --git a/trunk/drivers/gpu/drm/nouveau/nvd0_display.c b/trunk/drivers/gpu/drm/nouveau/nvd0_display.c new file mode 100644 index 000000000000..c402fca2b2b8 --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nvd0_display.c @@ -0,0 +1,2141 @@ +/* + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include + +#include +#include + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_gem.h" +#include "nouveau_connector.h" +#include "nouveau_encoder.h" +#include "nouveau_crtc.h" +#include "nouveau_fence.h" +#include "nv50_display.h" + +#include + +#include +#include +#include + +#define EVO_DMA_NR 9 + +#define EVO_MASTER (0x00) +#define EVO_FLIP(c) (0x01 + (c)) +#define EVO_OVLY(c) (0x05 + (c)) +#define EVO_OIMM(c) (0x09 + (c)) +#define EVO_CURS(c) (0x0d + (c)) + +/* offsets in shared sync bo of various structures */ +#define EVO_SYNC(c, o) ((c) * 0x0100 + (o)) +#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00) +#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00) +#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10) + +struct evo { + int idx; + dma_addr_t handle; + u32 *ptr; + struct { + u32 offset; + u16 value; + } sem; +}; + +struct nvd0_display { + struct nouveau_gpuobj *mem; + struct nouveau_bo *sync; + struct evo evo[9]; + + struct tasklet_struct tasklet; + u32 modeset; +}; + +static struct nvd0_display * +nvd0_display(struct drm_device *dev) +{ + return nouveau_display(dev)->priv; +} + +static struct drm_crtc * +nvd0_display_crtc_get(struct drm_encoder *encoder) +{ + return nouveau_encoder(encoder)->crtc; +} + +/****************************************************************************** + * EVO channel helpers + *****************************************************************************/ +static inline int +evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data) +{ + struct nouveau_device *device = nouveau_dev(dev); + int ret = 0; + nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000001); + nv_wr32(device, 0x610704 + (id * 0x10), data); + nv_mask(device, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd); + if (!nv_wait(device, 0x610704 + (id * 0x10), 0x80000000, 0x00000000)) + ret = -EBUSY; + nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000000); + return ret; +} + +static u32 * +evo_wait(struct drm_device *dev, int id, int nr) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvd0_display *disp = nvd0_display(dev); + u32 put = nv_rd32(device, 0x640000 + (id * 0x1000)) / 4; + + if (put + nr >= (PAGE_SIZE / 4)) { + disp->evo[id].ptr[put] = 0x20000000; + + nv_wr32(device, 0x640000 + (id * 0x1000), 0x00000000); + if (!nv_wait(device, 0x640004 + (id * 0x1000), ~0, 0x00000000)) { + NV_ERROR(drm, "evo %d dma stalled\n", id); + return NULL; + } + + put = 0; + } + + return disp->evo[id].ptr + put; +} + +static void +evo_kick(u32 *push, struct drm_device *dev, int id) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nvd0_display *disp = nvd0_display(dev); + + nv_wr32(device, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2); +} + +#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m)) +#define evo_data(p,d) *((p)++) = (d) + +static int +evo_init_dma(struct drm_device *dev, int ch) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvd0_display *disp = nvd0_display(dev); + u32 flags; + + flags = 0x00000000; + if (ch == EVO_MASTER) + flags |= 0x01000000; + + nv_wr32(device, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3); + nv_wr32(device, 0x610498 + (ch * 0x0010), 0x00010000); + nv_wr32(device, 0x61049c + (ch * 0x0010), 0x00000001); + nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010); + nv_wr32(device, 0x640000 + (ch * 0x1000), 0x00000000); + nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000013 | flags); + if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) { + NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch, + nv_rd32(device, 0x610490 + (ch * 0x0010))); + return -EBUSY; + } + + nv_mask(device, 0x610090, (1 << ch), (1 << ch)); + nv_mask(device, 0x6100a0, (1 << ch), (1 << ch)); + return 0; +} + +static void +evo_fini_dma(struct drm_device *dev, int ch) +{ + struct nouveau_device *device = nouveau_dev(dev); + + if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000010)) + return; + + nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000); + nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000); + nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000); + nv_mask(device, 0x610090, (1 << ch), 0x00000000); + nv_mask(device, 0x6100a0, (1 << ch), 0x00000000); +} + +static inline void +evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data) +{ + struct nouveau_device *device = nouveau_dev(dev); + nv_wr32(device, 0x640000 + (ch * 0x1000) + mthd, data); +} + +static int +evo_init_pio(struct drm_device *dev, int ch) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + + nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000001); + if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) { + NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch, + nv_rd32(device, 0x610490 + (ch * 0x0010))); + return -EBUSY; + } + + nv_mask(device, 0x610090, (1 << ch), (1 << ch)); + nv_mask(device, 0x6100a0, (1 << ch), (1 << ch)); + return 0; +} + +static void +evo_fini_pio(struct drm_device *dev, int ch) +{ + struct nouveau_device *device = nouveau_dev(dev); + + if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000001)) + return; + + nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010); + nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000); + nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000); + nv_mask(device, 0x610090, (1 << ch), 0x00000000); + nv_mask(device, 0x6100a0, (1 << ch), 0x00000000); +} + +static bool +evo_sync_wait(void *data) +{ + return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000; +} + +static int +evo_sync(struct drm_device *dev, int ch) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nvd0_display *disp = nvd0_display(dev); + u32 *push = evo_wait(dev, ch, 8); + if (push) { + nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000); + evo_mthd(push, 0x0084, 1); + evo_data(push, 0x80000000 | EVO_MAST_NTFY); + evo_mthd(push, 0x0080, 2); + evo_data(push, 0x00000000); + evo_data(push, 0x00000000); + evo_kick(push, dev, ch); + if (nv_wait_cb(device, evo_sync_wait, disp->sync)) + return 0; + } + + return -EBUSY; +} + +/****************************************************************************** + * Page flipping channel + *****************************************************************************/ +struct nouveau_bo * +nvd0_display_crtc_sema(struct drm_device *dev, int crtc) +{ + return nvd0_display(dev)->sync; +} + +void +nvd0_display_flip_stop(struct drm_crtc *crtc) +{ + struct nvd0_display *disp = nvd0_display(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)]; + u32 *push; + + push = evo_wait(crtc->dev, evo->idx, 8); + if (push) { + evo_mthd(push, 0x0084, 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0094, 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x00c0, 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + evo_kick(push, crtc->dev, evo->idx); + } +} + +int +nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct nouveau_channel *chan, u32 swap_interval) +{ + struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); + struct nvd0_display *disp = nvd0_display(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)]; + u64 offset; + u32 *push; + int ret; + + swap_interval <<= 4; + if (swap_interval == 0) + swap_interval |= 0x100; + + push = evo_wait(crtc->dev, evo->idx, 128); + if (unlikely(push == NULL)) + return -EBUSY; + + /* synchronise with the rendering channel, if necessary */ + if (likely(chan)) { + ret = RING_SPACE(chan, 10); + if (ret) + return ret; + + + offset = nvc0_fence_crtc(chan, nv_crtc->index); + offset += evo->sem.offset; + + BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + OUT_RING (chan, upper_32_bits(offset)); + OUT_RING (chan, lower_32_bits(offset)); + OUT_RING (chan, 0xf00d0000 | evo->sem.value); + OUT_RING (chan, 0x1002); + BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + OUT_RING (chan, upper_32_bits(offset)); + OUT_RING (chan, lower_32_bits(offset ^ 0x10)); + OUT_RING (chan, 0x74b1e000); + OUT_RING (chan, 0x1001); + FIRE_RING (chan); + } else { + nouveau_bo_wr32(disp->sync, evo->sem.offset / 4, + 0xf00d0000 | evo->sem.value); + evo_sync(crtc->dev, EVO_MASTER); + } + + /* queue the flip */ + evo_mthd(push, 0x0100, 1); + evo_data(push, 0xfffe0000); + evo_mthd(push, 0x0084, 1); + evo_data(push, swap_interval); + if (!(swap_interval & 0x00000100)) { + evo_mthd(push, 0x00e0, 1); + evo_data(push, 0x40000000); + } + evo_mthd(push, 0x0088, 4); + evo_data(push, evo->sem.offset); + evo_data(push, 0xf00d0000 | evo->sem.value); + evo_data(push, 0x74b1e000); + evo_data(push, NvEvoSync); + evo_mthd(push, 0x00a0, 2); + evo_data(push, 0x00000000); + evo_data(push, 0x00000000); + evo_mthd(push, 0x00c0, 1); + evo_data(push, nv_fb->r_dma); + evo_mthd(push, 0x0110, 2); + evo_data(push, 0x00000000); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0400, 5); + evo_data(push, nv_fb->nvbo->bo.offset >> 8); + evo_data(push, 0); + evo_data(push, (fb->height << 16) | fb->width); + evo_data(push, nv_fb->r_pitch); + evo_data(push, nv_fb->r_format); + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + evo_kick(push, crtc->dev, evo->idx); + + evo->sem.offset ^= 0x10; + evo->sem.value++; + return 0; +} + +/****************************************************************************** + * CRTC + *****************************************************************************/ +static int +nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update) +{ + struct nouveau_drm *drm = nouveau_drm(nv_crtc->base.dev); + struct drm_device *dev = nv_crtc->base.dev; + struct nouveau_connector *nv_connector; + struct drm_connector *connector; + u32 *push, mode = 0x00; + u32 mthd; + + nv_connector = nouveau_crtc_connector_get(nv_crtc); + connector = &nv_connector->base; + if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) { + if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3) + mode = DITHERING_MODE_DYNAMIC2X2; + } else { + mode = nv_connector->dithering_mode; + } + + if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) { + if (connector->display_info.bpc >= 8) + mode |= DITHERING_DEPTH_8BPC; + } else { + mode |= nv_connector->dithering_depth; + } + + if (nv_device(drm->device)->card_type < NV_E0) + mthd = 0x0490 + (nv_crtc->index * 0x0300); + else + mthd = 0x04a0 + (nv_crtc->index * 0x0300); + + push = evo_wait(dev, EVO_MASTER, 4); + if (push) { + evo_mthd(push, mthd, 1); + evo_data(push, mode); + if (update) { + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + } + evo_kick(push, dev, EVO_MASTER); + } + + return 0; +} + +static int +nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update) +{ + struct drm_display_mode *omode, *umode = &nv_crtc->base.mode; + struct drm_device *dev = nv_crtc->base.dev; + struct drm_crtc *crtc = &nv_crtc->base; + struct nouveau_connector *nv_connector; + int mode = DRM_MODE_SCALE_NONE; + u32 oX, oY, *push; + + /* start off at the resolution we programmed the crtc for, this + * effectively handles NONE/FULL scaling + */ + nv_connector = nouveau_crtc_connector_get(nv_crtc); + if (nv_connector && nv_connector->native_mode) + mode = nv_connector->scaling_mode; + + if (mode != DRM_MODE_SCALE_NONE) + omode = nv_connector->native_mode; + else + omode = umode; + + oX = omode->hdisplay; + oY = omode->vdisplay; + if (omode->flags & DRM_MODE_FLAG_DBLSCAN) + oY *= 2; + + /* add overscan compensation if necessary, will keep the aspect + * ratio the same as the backend mode unless overridden by the + * user setting both hborder and vborder properties. + */ + if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON || + (nv_connector->underscan == UNDERSCAN_AUTO && + nv_connector->edid && + drm_detect_hdmi_monitor(nv_connector->edid)))) { + u32 bX = nv_connector->underscan_hborder; + u32 bY = nv_connector->underscan_vborder; + u32 aspect = (oY << 19) / oX; + + if (bX) { + oX -= (bX * 2); + if (bY) oY -= (bY * 2); + else oY = ((oX * aspect) + (aspect / 2)) >> 19; + } else { + oX -= (oX >> 4) + 32; + if (bY) oY -= (bY * 2); + else oY = ((oX * aspect) + (aspect / 2)) >> 19; + } + } + + /* handle CENTER/ASPECT scaling, taking into account the areas + * removed already for overscan compensation + */ + switch (mode) { + case DRM_MODE_SCALE_CENTER: + oX = min((u32)umode->hdisplay, oX); + oY = min((u32)umode->vdisplay, oY); + /* fall-through */ + case DRM_MODE_SCALE_ASPECT: + if (oY < oX) { + u32 aspect = (umode->hdisplay << 19) / umode->vdisplay; + oX = ((oY * aspect) + (aspect / 2)) >> 19; + } else { + u32 aspect = (umode->vdisplay << 19) / umode->hdisplay; + oY = ((oX * aspect) + (aspect / 2)) >> 19; + } + break; + default: + break; + } + + push = evo_wait(dev, EVO_MASTER, 8); + if (push) { + evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3); + evo_data(push, (oY << 16) | oX); + evo_data(push, (oY << 16) | oX); + evo_data(push, (oY << 16) | oX); + evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1); + evo_data(push, (umode->vdisplay << 16) | umode->hdisplay); + evo_kick(push, dev, EVO_MASTER); + if (update) { + nvd0_display_flip_stop(crtc); + nvd0_display_flip_next(crtc, crtc->fb, NULL, 1); + } + } + + return 0; +} + +static int +nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, + int x, int y, bool update) +{ + struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb); + u32 *push; + + push = evo_wait(fb->dev, EVO_MASTER, 16); + if (push) { + evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1); + evo_data(push, nvfb->nvbo->bo.offset >> 8); + evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4); + evo_data(push, (fb->height << 16) | fb->width); + evo_data(push, nvfb->r_pitch); + evo_data(push, nvfb->r_format); + evo_data(push, nvfb->r_dma); + evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1); + evo_data(push, (y << 16) | x); + if (update) { + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + } + evo_kick(push, fb->dev, EVO_MASTER); + } + + nv_crtc->fb.tile_flags = nvfb->r_dma; + return 0; +} + +static void +nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update) +{ + struct drm_device *dev = nv_crtc->base.dev; + u32 *push = evo_wait(dev, EVO_MASTER, 16); + if (push) { + if (show) { + evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2); + evo_data(push, 0x85000000); + evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); + evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); + evo_data(push, NvEvoVRAM); + } else { + evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x05000000); + evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); + } + + if (update) { + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + } + + evo_kick(push, dev, EVO_MASTER); + } +} + +static void +nvd0_crtc_dpms(struct drm_crtc *crtc, int mode) +{ +} + +static void +nvd0_crtc_prepare(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + u32 *push; + + nvd0_display_flip_stop(crtc); + + push = evo_wait(crtc->dev, EVO_MASTER, 2); + if (push) { + evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x03000000); + evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); + evo_kick(push, crtc->dev, EVO_MASTER); + } + + nvd0_crtc_cursor_show(nv_crtc, false, false); +} + +static void +nvd0_crtc_commit(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + u32 *push; + + push = evo_wait(crtc->dev, EVO_MASTER, 32); + if (push) { + evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); + evo_data(push, nv_crtc->fb.tile_flags); + evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4); + evo_data(push, 0x83000000); + evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); + evo_data(push, 0x00000000); + evo_data(push, 0x00000000); + evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); + evo_data(push, NvEvoVRAM); + evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0xffffff00); + evo_kick(push, crtc->dev, EVO_MASTER); + } + + nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true); + nvd0_display_flip_next(crtc, crtc->fb, NULL, 1); +} + +static bool +nvd0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static int +nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) +{ + struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); + int ret; + + ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + if (old_fb) { + nvfb = nouveau_framebuffer(old_fb); + nouveau_bo_unpin(nvfb->nvbo); + } + + return 0; +} + +static int +nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, + struct drm_display_mode *mode, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nouveau_connector *nv_connector; + u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1; + u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1; + u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks; + u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks; + u32 vblan2e = 0, vblan2s = 1; + u32 *push; + int ret; + + hactive = mode->htotal; + hsynce = mode->hsync_end - mode->hsync_start - 1; + hbackp = mode->htotal - mode->hsync_end; + hblanke = hsynce + hbackp; + hfrontp = mode->hsync_start - mode->hdisplay; + hblanks = mode->htotal - hfrontp - 1; + + vactive = mode->vtotal * vscan / ilace; + vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1; + vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace; + vblanke = vsynce + vbackp; + vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; + vblanks = vactive - vfrontp - 1; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { + vblan2e = vactive + vsynce + vbackp; + vblan2s = vblan2e + (mode->vdisplay * vscan / ilace); + vactive = (vactive * 2) + 1; + } + + ret = nvd0_crtc_swap_fbs(crtc, old_fb); + if (ret) + return ret; + + push = evo_wait(crtc->dev, EVO_MASTER, 64); + if (push) { + evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6); + evo_data(push, 0x00000000); + evo_data(push, (vactive << 16) | hactive); + evo_data(push, ( vsynce << 16) | hsynce); + evo_data(push, (vblanke << 16) | hblanke); + evo_data(push, (vblanks << 16) | hblanks); + evo_data(push, (vblan2e << 16) | vblan2s); + evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); /* ??? */ + evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3); + evo_data(push, mode->clock * 1000); + evo_data(push, 0x00200000); /* ??? */ + evo_data(push, mode->clock * 1000); + evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2); + evo_data(push, 0x00000311); + evo_data(push, 0x00000100); + evo_kick(push, crtc->dev, EVO_MASTER); + } + + nv_connector = nouveau_crtc_connector_get(nv_crtc); + nvd0_crtc_set_dither(nv_crtc, false); + nvd0_crtc_set_scale(nv_crtc, false); + nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false); + return 0; +} + +static int +nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct nouveau_drm *drm = nouveau_drm(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + int ret; + + if (!crtc->fb) { + NV_DEBUG(drm, "No FB bound\n"); + return 0; + } + + ret = nvd0_crtc_swap_fbs(crtc, old_fb); + if (ret) + return ret; + + nvd0_display_flip_stop(crtc); + nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true); + nvd0_display_flip_next(crtc, crtc->fb, NULL, 1); + return 0; +} + +static int +nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, int x, int y, + enum mode_set_atomic state) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + nvd0_display_flip_stop(crtc); + nvd0_crtc_set_image(nv_crtc, fb, x, y, true); + return 0; +} + +static void +nvd0_crtc_lut_load(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); + int i; + + for (i = 0; i < 256; i++) { + writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0); + writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2); + writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4); + } +} + +static int +nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t handle, uint32_t width, uint32_t height) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_gem_object *gem; + struct nouveau_bo *nvbo; + bool visible = (handle != 0); + int i, ret = 0; + + if (visible) { + if (width != 64 || height != 64) + return -EINVAL; + + gem = drm_gem_object_lookup(dev, file_priv, handle); + if (unlikely(!gem)) + return -ENOENT; + nvbo = nouveau_gem_object(gem); + + ret = nouveau_bo_map(nvbo); + if (ret == 0) { + for (i = 0; i < 64 * 64; i++) { + u32 v = nouveau_bo_rd32(nvbo, i); + nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v); + } + nouveau_bo_unmap(nvbo); + } + + drm_gem_object_unreference_unlocked(gem); + } + + if (visible != nv_crtc->cursor.visible) { + nvd0_crtc_cursor_show(nv_crtc, visible, true); + nv_crtc->cursor.visible = visible; + } + + return ret; +} + +static int +nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + int ch = EVO_CURS(nv_crtc->index); + + evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff)); + evo_piow(crtc->dev, ch, 0x0080, 0x00000000); + return 0; +} + +static void +nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, + uint32_t start, uint32_t size) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + u32 end = max(start + size, (u32)256); + u32 i; + + for (i = start; i < end; i++) { + nv_crtc->lut.r[i] = r[i]; + nv_crtc->lut.g[i] = g[i]; + nv_crtc->lut.b[i] = b[i]; + } + + nvd0_crtc_lut_load(crtc); +} + +static void +nvd0_crtc_destroy(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + nouveau_bo_unmap(nv_crtc->cursor.nvbo); + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + nouveau_bo_unmap(nv_crtc->lut.nvbo); + nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); + drm_crtc_cleanup(crtc); + kfree(crtc); +} + +static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = { + .dpms = nvd0_crtc_dpms, + .prepare = nvd0_crtc_prepare, + .commit = nvd0_crtc_commit, + .mode_fixup = nvd0_crtc_mode_fixup, + .mode_set = nvd0_crtc_mode_set, + .mode_set_base = nvd0_crtc_mode_set_base, + .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic, + .load_lut = nvd0_crtc_lut_load, +}; + +static const struct drm_crtc_funcs nvd0_crtc_func = { + .cursor_set = nvd0_crtc_cursor_set, + .cursor_move = nvd0_crtc_cursor_move, + .gamma_set = nvd0_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, + .destroy = nvd0_crtc_destroy, + .page_flip = nouveau_crtc_page_flip, +}; + +static void +nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) +{ +} + +static void +nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) +{ +} + +static int +nvd0_crtc_create(struct drm_device *dev, int index) +{ + struct nouveau_crtc *nv_crtc; + struct drm_crtc *crtc; + int ret, i; + + nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); + if (!nv_crtc) + return -ENOMEM; + + nv_crtc->index = index; + nv_crtc->set_dither = nvd0_crtc_set_dither; + nv_crtc->set_scale = nvd0_crtc_set_scale; + nv_crtc->cursor.set_offset = nvd0_cursor_set_offset; + nv_crtc->cursor.set_pos = nvd0_cursor_set_pos; + for (i = 0; i < 256; i++) { + nv_crtc->lut.r[i] = i << 8; + nv_crtc->lut.g[i] = i << 8; + nv_crtc->lut.b[i] = i << 8; + } + + crtc = &nv_crtc->base; + drm_crtc_init(dev, crtc, &nvd0_crtc_func); + drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc); + drm_mode_crtc_set_gamma_size(crtc, 256); + + ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &nv_crtc->cursor.nvbo); + if (!ret) { + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(nv_crtc->cursor.nvbo); + if (ret) + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + } + + if (ret) + goto out; + + ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &nv_crtc->lut.nvbo); + if (!ret) { + ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(nv_crtc->lut.nvbo); + if (ret) + nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); + } + + if (ret) + goto out; + + nvd0_crtc_lut_load(crtc); + +out: + if (ret) + nvd0_crtc_destroy(crtc); + return ret; +} + +/****************************************************************************** + * DAC + *****************************************************************************/ +static void +nvd0_dac_dpms(struct drm_encoder *encoder, int mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + int or = nv_encoder->or; + u32 dpms_ctrl; + + dpms_ctrl = 0x80000000; + if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF) + dpms_ctrl |= 0x00000001; + if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF) + dpms_ctrl |= 0x00000004; + + nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000); + nv_mask(device, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl); + nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000); +} + +static bool +nvd0_dac_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (nv_connector && nv_connector->native_mode) { + if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { + int id = adjusted_mode->base.id; + *adjusted_mode = *nv_connector->native_mode; + adjusted_mode->base.id = id; + } + } + + return true; +} + +static void +nvd0_dac_commit(struct drm_encoder *encoder) +{ +} + +static void +nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + u32 syncs, magic, *push; + + syncs = 0x00000001; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + syncs |= 0x00000008; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + syncs |= 0x00000010; + + magic = 0x31ec6000 | (nv_crtc->index << 25); + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + magic |= 0x00000001; + + nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON); + + push = evo_wait(encoder->dev, EVO_MASTER, 8); + if (push) { + evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); + evo_data(push, syncs); + evo_data(push, magic); + evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2); + evo_data(push, 1 << nv_crtc->index); + evo_data(push, 0x00ff); + evo_kick(push, encoder->dev, EVO_MASTER); + } + + nv_encoder->crtc = encoder->crtc; +} + +static void +nvd0_dac_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + u32 *push; + + if (nv_encoder->crtc) { + nvd0_crtc_prepare(nv_encoder->crtc); + + push = evo_wait(dev, EVO_MASTER, 4); + if (push) { + evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + evo_kick(push, dev, EVO_MASTER); + } + + nv_encoder->crtc = NULL; + } +} + +static enum drm_connector_status +nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) +{ + enum drm_connector_status status = connector_status_disconnected; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + int or = nv_encoder->or; + u32 load; + + nv_wr32(device, 0x61a00c + (or * 0x800), 0x00100000); + udelay(9500); + nv_wr32(device, 0x61a00c + (or * 0x800), 0x80000000); + + load = nv_rd32(device, 0x61a00c + (or * 0x800)); + if ((load & 0x38000000) == 0x38000000) + status = connector_status_connected; + + nv_wr32(device, 0x61a00c + (or * 0x800), 0x00000000); + return status; +} + +static void +nvd0_dac_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = { + .dpms = nvd0_dac_dpms, + .mode_fixup = nvd0_dac_mode_fixup, + .prepare = nvd0_dac_disconnect, + .commit = nvd0_dac_commit, + .mode_set = nvd0_dac_mode_set, + .disable = nvd0_dac_disconnect, + .get_crtc = nvd0_display_crtc_get, + .detect = nvd0_dac_detect +}; + +static const struct drm_encoder_funcs nvd0_dac_func = { + .destroy = nvd0_dac_destroy, +}; + +static int +nvd0_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) +{ + struct drm_device *dev = connector->dev; + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + nv_encoder->dcb = dcbe; + nv_encoder->or = ffs(dcbe->or) - 1; + + encoder = to_drm_encoder(nv_encoder); + encoder->possible_crtcs = dcbe->heads; + encoder->possible_clones = 0; + drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC); + drm_encoder_helper_add(encoder, &nvd0_dac_hfunc); + + drm_mode_connector_attach_encoder(connector, encoder); + return 0; +} + +/****************************************************************************** + * Audio + *****************************************************************************/ +static void +nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector; + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + int i, or = nv_encoder->or * 0x30; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (!drm_detect_monitor_audio(nv_connector->edid)) + return; + + nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000001); + + drm_edid_to_eld(&nv_connector->base, nv_connector->edid); + if (nv_connector->base.eld[0]) { + u8 *eld = nv_connector->base.eld; + + for (i = 0; i < eld[2] * 4; i++) + nv_wr32(device, 0x10ec00 + or, (i << 8) | eld[i]); + for (i = eld[2] * 4; i < 0x60; i++) + nv_wr32(device, 0x10ec00 + or, (i << 8) | 0x00); + + nv_mask(device, 0x10ec10 + or, 0x80000002, 0x80000002); + } +} + +static void +nvd0_audio_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + int or = nv_encoder->or * 0x30; + + nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000000); +} + +/****************************************************************************** + * HDMI + *****************************************************************************/ +static void +nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nouveau_connector *nv_connector; + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + int head = nv_crtc->index * 0x800; + u32 rekey = 56; /* binary driver, and tegra constant */ + u32 max_ac_packet; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (!drm_detect_hdmi_monitor(nv_connector->edid)) + return; + + max_ac_packet = mode->htotal - mode->hdisplay; + max_ac_packet -= rekey; + max_ac_packet -= 18; /* constant from tegra */ + max_ac_packet /= 32; + + /* AVI InfoFrame */ + nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000); + nv_wr32(device, 0x61671c + head, 0x000d0282); + nv_wr32(device, 0x616720 + head, 0x0000006f); + nv_wr32(device, 0x616724 + head, 0x00000000); + nv_wr32(device, 0x616728 + head, 0x00000000); + nv_wr32(device, 0x61672c + head, 0x00000000); + nv_mask(device, 0x616714 + head, 0x00000001, 0x00000001); + + /* ??? InfoFrame? */ + nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000); + nv_wr32(device, 0x6167ac + head, 0x00000010); + nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000001); + + /* HDMI_CTRL */ + nv_mask(device, 0x616798 + head, 0x401f007f, 0x40000000 | rekey | + max_ac_packet << 16); + + /* NFI, audio doesn't work without it though.. */ + nv_mask(device, 0x616548 + head, 0x00000070, 0x00000000); + + nvd0_audio_mode_set(encoder, mode); +} + +static void +nvd0_hdmi_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc); + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + int head = nv_crtc->index * 0x800; + + nvd0_audio_disconnect(encoder); + + nv_mask(device, 0x616798 + head, 0x40000000, 0x00000000); + nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000); + nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000); +} + +/****************************************************************************** + * SOR + *****************************************************************************/ +static inline u32 +nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane) +{ + static const u8 nvd0[] = { 16, 8, 0, 24 }; + return nvd0[lane]; +} + +static void +nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern) +{ + struct nouveau_device *device = nouveau_dev(dev); + const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); + const u32 loff = (or * 0x800) + (link * 0x80); + nv_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); +} + +static void +nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb, + u8 lane, u8 swing, u8 preem) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); + const u32 loff = (or * 0x800) + (link * 0x80); + u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane); + u32 mask = 0x000000ff << shift; + u8 *table, *entry, *config = NULL; + + switch (swing) { + case 0: preem += 0; break; + case 1: preem += 4; break; + case 2: preem += 7; break; + case 3: preem += 9; break; + } + + table = nouveau_dp_bios_data(dev, dcb, &entry); + if (table) { + if (table[0] == 0x30) { + config = entry + table[4]; + config += table[5] * preem; + } else + if (table[0] == 0x40) { + config = table + table[1]; + config += table[2] * table[3]; + config += table[6] * preem; + } + } + + if (!config) { + NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n"); + return; + } + + nv_mask(device, 0x61c118 + loff, mask, config[1] << shift); + nv_mask(device, 0x61c120 + loff, mask, config[2] << shift); + nv_mask(device, 0x61c130 + loff, 0x0000ff00, config[3] << 8); + nv_mask(device, 0x61c13c + loff, 0x00000000, 0x00000000); +} + +static void +nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc, + int link_nr, u32 link_bw, bool enhframe) +{ + struct nouveau_device *device = nouveau_dev(dev); + const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); + const u32 loff = (or * 0x800) + (link * 0x80); + const u32 soff = (or * 0x800); + u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & ~0x001f4000; + u32 clksor = nv_rd32(device, 0x612300 + soff) & ~0x007c0000; + u32 script = 0x0000, lane_mask = 0; + u8 *table, *entry; + int i; + + link_bw /= 27000; + + table = nouveau_dp_bios_data(dev, dcb, &entry); + if (table) { + if (table[0] == 0x30) entry = ROMPTR(dev, entry[10]); + else if (table[0] == 0x40) entry = ROMPTR(dev, entry[9]); + else entry = NULL; + + while (entry) { + if (entry[0] >= link_bw) + break; + entry += 3; + } + + nouveau_bios_run_init_table(dev, script, dcb, crtc); + } + + clksor |= link_bw << 18; + dpctrl |= ((1 << link_nr) - 1) << 16; + if (enhframe) + dpctrl |= 0x00004000; + + for (i = 0; i < link_nr; i++) + lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3); + + nv_wr32(device, 0x612300 + soff, clksor); + nv_wr32(device, 0x61c10c + loff, dpctrl); + nv_mask(device, 0x61c130 + loff, 0x0000000f, lane_mask); +} + +static void +nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_output *dcb, + u32 *link_nr, u32 *link_bw) +{ + struct nouveau_device *device = nouveau_dev(dev); + const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); + const u32 loff = (or * 0x800) + (link * 0x80); + const u32 soff = (or * 0x800); + u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & 0x000f0000; + u32 clksor = nv_rd32(device, 0x612300 + soff); + + if (dpctrl > 0x00030000) *link_nr = 4; + else if (dpctrl > 0x00010000) *link_nr = 2; + else *link_nr = 1; + + *link_bw = (clksor & 0x007c0000) >> 18; + *link_bw *= 27000; +} + +static void +nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_output *dcb, + u32 crtc, u32 datarate) +{ + struct nouveau_device *device = nouveau_dev(dev); + const u32 symbol = 100000; + const u32 TU = 64; + u32 link_nr, link_bw; + u64 ratio, value; + + nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw); + + ratio = datarate; + ratio *= symbol; + do_div(ratio, link_nr * link_bw); + + value = (symbol - ratio) * TU; + value *= ratio; + do_div(value, symbol); + do_div(value, symbol); + + value += 5; + value |= 0x08000000; + + nv_wr32(device, 0x616610 + (crtc * 0x800), value); +} + +static void +nvd0_sor_dpms(struct drm_encoder *encoder, int mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + struct drm_encoder *partner; + int or = nv_encoder->or; + u32 dpms_ctrl; + + nv_encoder->last_dpms = mode; + + list_for_each_entry(partner, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_partner = nouveau_encoder(partner); + + if (partner->encoder_type != DRM_MODE_ENCODER_TMDS) + continue; + + if (nv_partner != nv_encoder && + nv_partner->dcb->or == nv_encoder->dcb->or) { + if (nv_partner->last_dpms == DRM_MODE_DPMS_ON) + return; + break; + } + } + + dpms_ctrl = (mode == DRM_MODE_DPMS_ON); + dpms_ctrl |= 0x80000000; + + nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000); + nv_mask(device, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl); + nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000); + nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000); + + if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { + struct dp_train_func func = { + .link_set = nvd0_sor_dp_link_set, + .train_set = nvd0_sor_dp_train_set, + .train_adj = nvd0_sor_dp_train_adj + }; + + nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func); + } +} + +static bool +nvd0_sor_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (nv_connector && nv_connector->native_mode) { + if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { + int id = adjusted_mode->base.id; + *adjusted_mode = *nv_connector->native_mode; + adjusted_mode->base.id = id; + } + } + + return true; +} + +static void +nvd0_sor_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + u32 *push; + + if (nv_encoder->crtc) { + nvd0_crtc_prepare(nv_encoder->crtc); + + push = evo_wait(dev, EVO_MASTER, 4); + if (push) { + evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + evo_kick(push, dev, EVO_MASTER); + } + + nvd0_hdmi_disconnect(encoder); + + nv_encoder->crtc = NULL; + nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; + } +} + +static void +nvd0_sor_prepare(struct drm_encoder *encoder) +{ + nvd0_sor_disconnect(encoder); + if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP) + evo_sync(encoder->dev, EVO_MASTER); +} + +static void +nvd0_sor_commit(struct drm_encoder *encoder) +{ +} + +static void +nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, + struct drm_display_mode *mode) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nouveau_connector *nv_connector; + struct nvbios *bios = &drm->vbios; + u32 mode_ctrl = (1 << nv_crtc->index); + u32 syncs, magic, *push; + u32 or_config; + + syncs = 0x00000001; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + syncs |= 0x00000008; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + syncs |= 0x00000010; + + magic = 0x31ec6000 | (nv_crtc->index << 25); + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + magic |= 0x00000001; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + switch (nv_encoder->dcb->type) { + case DCB_OUTPUT_TMDS: + if (nv_encoder->dcb->sorconf.link & 1) { + if (mode->clock < 165000) + mode_ctrl |= 0x00000100; + else + mode_ctrl |= 0x00000500; + } else { + mode_ctrl |= 0x00000200; + } + + or_config = (mode_ctrl & 0x00000f00) >> 8; + if (mode->clock >= 165000) + or_config |= 0x0100; + + nvd0_hdmi_mode_set(encoder, mode); + break; + case DCB_OUTPUT_LVDS: + or_config = (mode_ctrl & 0x00000f00) >> 8; + if (bios->fp_no_ddc) { + if (bios->fp.dual_link) + or_config |= 0x0100; + if (bios->fp.if_is_24bit) + or_config |= 0x0200; + } else { + if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { + if (((u8 *)nv_connector->edid)[121] == 2) + or_config |= 0x0100; + } else + if (mode->clock >= bios->fp.duallink_transition_clk) { + or_config |= 0x0100; + } + + if (or_config & 0x0100) { + if (bios->fp.strapless_is_24bit & 2) + or_config |= 0x0200; + } else { + if (bios->fp.strapless_is_24bit & 1) + or_config |= 0x0200; + } + + if (nv_connector->base.display_info.bpc == 8) + or_config |= 0x0200; + + } + break; + case DCB_OUTPUT_DP: + if (nv_connector->base.display_info.bpc == 6) { + nv_encoder->dp.datarate = mode->clock * 18 / 8; + syncs |= 0x00000002 << 6; + } else { + nv_encoder->dp.datarate = mode->clock * 24 / 8; + syncs |= 0x00000005 << 6; + } + + if (nv_encoder->dcb->sorconf.link & 1) + mode_ctrl |= 0x00000800; + else + mode_ctrl |= 0x00000900; + + or_config = (mode_ctrl & 0x00000f00) >> 8; + break; + default: + BUG_ON(1); + break; + } + + nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON); + + if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { + nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index, + nv_encoder->dp.datarate); + } + + push = evo_wait(dev, EVO_MASTER, 8); + if (push) { + evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); + evo_data(push, syncs); + evo_data(push, magic); + evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2); + evo_data(push, mode_ctrl); + evo_data(push, or_config); + evo_kick(push, dev, EVO_MASTER); + } + + nv_encoder->crtc = encoder->crtc; +} + +static void +nvd0_sor_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = { + .dpms = nvd0_sor_dpms, + .mode_fixup = nvd0_sor_mode_fixup, + .prepare = nvd0_sor_prepare, + .commit = nvd0_sor_commit, + .mode_set = nvd0_sor_mode_set, + .disable = nvd0_sor_disconnect, + .get_crtc = nvd0_display_crtc_get, +}; + +static const struct drm_encoder_funcs nvd0_sor_func = { + .destroy = nvd0_sor_destroy, +}; + +static int +nvd0_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) +{ + struct drm_device *dev = connector->dev; + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + nv_encoder->dcb = dcbe; + nv_encoder->or = ffs(dcbe->or) - 1; + nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; + + encoder = to_drm_encoder(nv_encoder); + encoder->possible_crtcs = dcbe->heads; + encoder->possible_clones = 0; + drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(encoder, &nvd0_sor_hfunc); + + drm_mode_connector_attach_encoder(connector, encoder); + return 0; +} + +/****************************************************************************** + * IRQ + *****************************************************************************/ +static struct dcb_output * +lookup_dcb(struct drm_device *dev, int id, u32 mc) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + int type, or, i, link = -1; + + if (id < 4) { + type = DCB_OUTPUT_ANALOG; + or = id; + } else { + switch (mc & 0x00000f00) { + case 0x00000000: link = 0; type = DCB_OUTPUT_LVDS; break; + case 0x00000100: link = 0; type = DCB_OUTPUT_TMDS; break; + case 0x00000200: link = 1; type = DCB_OUTPUT_TMDS; break; + case 0x00000500: link = 0; type = DCB_OUTPUT_TMDS; break; + case 0x00000800: link = 0; type = DCB_OUTPUT_DP; break; + case 0x00000900: link = 1; type = DCB_OUTPUT_DP; break; + default: + NV_ERROR(drm, "PDISP: unknown SOR mc 0x%08x\n", mc); + return NULL; + } + + or = id - 4; + } + + for (i = 0; i < drm->vbios.dcb.entries; i++) { + struct dcb_output *dcb = &drm->vbios.dcb.entry[i]; + if (dcb->type == type && (dcb->or & (1 << or)) && + (link < 0 || link == !(dcb->sorconf.link & 1))) + return dcb; + } + + NV_ERROR(drm, "PDISP: DCB for %d/0x%08x not found\n", id, mc); + return NULL; +} + +static void +nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct dcb_output *dcb; + int i; + + for (i = 0; mask && i < 8; i++) { + u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20)); + if (!(mcc & (1 << crtc))) + continue; + + dcb = lookup_dcb(dev, i, mcc); + if (!dcb) + continue; + + nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc); + } + + nv_wr32(device, 0x6101d4, 0x00000000); + nv_wr32(device, 0x6109d4, 0x00000000); + nv_wr32(device, 0x6101d0, 0x80000000); +} + +static void +nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct dcb_output *dcb; + u32 or, tmp, pclk; + int i; + + for (i = 0; mask && i < 8; i++) { + u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20)); + if (!(mcc & (1 << crtc))) + continue; + + dcb = lookup_dcb(dev, i, mcc); + if (!dcb) + continue; + + nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc); + } + + pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000; + NV_DEBUG(drm, "PDISP: crtc %d pclk %d mask 0x%08x\n", + crtc, pclk, mask); + if (pclk && (mask & 0x00010000)) { + nv50_crtc_set_clock(dev, crtc, pclk); + } + + for (i = 0; mask && i < 8; i++) { + u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20)); + u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20)); + if (!(mcp & (1 << crtc))) + continue; + + dcb = lookup_dcb(dev, i, mcp); + if (!dcb) + continue; + or = ffs(dcb->or) - 1; + + nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc); + + nv_wr32(device, 0x612200 + (crtc * 0x800), 0x00000000); + switch (dcb->type) { + case DCB_OUTPUT_ANALOG: + nv_wr32(device, 0x612280 + (or * 0x800), 0x00000000); + break; + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_LVDS: + case DCB_OUTPUT_DP: + if (cfg & 0x00000100) + tmp = 0x00000101; + else + tmp = 0x00000000; + + nv_mask(device, 0x612300 + (or * 0x800), 0x00000707, tmp); + break; + default: + break; + } + + break; + } + + nv_wr32(device, 0x6101d4, 0x00000000); + nv_wr32(device, 0x6109d4, 0x00000000); + nv_wr32(device, 0x6101d0, 0x80000000); +} + +static void +nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct dcb_output *dcb; + int pclk, i; + + pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000; + + for (i = 0; mask && i < 8; i++) { + u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20)); + u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20)); + if (!(mcp & (1 << crtc))) + continue; + + dcb = lookup_dcb(dev, i, mcp); + if (!dcb) + continue; + + nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc); + } + + nv_wr32(device, 0x6101d4, 0x00000000); + nv_wr32(device, 0x6109d4, 0x00000000); + nv_wr32(device, 0x6101d0, 0x80000000); +} + +static void +nvd0_display_bh(unsigned long data) +{ + struct drm_device *dev = (struct drm_device *)data; + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvd0_display *disp = nvd0_display(dev); + u32 mask = 0, crtc = ~0; + int i; + + if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) { + NV_INFO(drm, "PDISP: modeset req %d\n", disp->modeset); + NV_INFO(drm, " STAT: 0x%08x 0x%08x 0x%08x\n", + nv_rd32(device, 0x6101d0), + nv_rd32(device, 0x6101d4), nv_rd32(device, 0x6109d4)); + for (i = 0; i < 8; i++) { + NV_INFO(drm, " %s%d: 0x%08x 0x%08x\n", + i < 4 ? "DAC" : "SOR", i, + nv_rd32(device, 0x640180 + (i * 0x20)), + nv_rd32(device, 0x660180 + (i * 0x20))); + } + } + + while (!mask && ++crtc < dev->mode_config.num_crtc) + mask = nv_rd32(device, 0x6101d4 + (crtc * 0x800)); + + if (disp->modeset & 0x00000001) + nvd0_display_unk1_handler(dev, crtc, mask); + if (disp->modeset & 0x00000002) + nvd0_display_unk2_handler(dev, crtc, mask); + if (disp->modeset & 0x00000004) + nvd0_display_unk4_handler(dev, crtc, mask); +} + +void +nvd0_display_intr(struct drm_device *dev) +{ + struct nvd0_display *disp = nvd0_display(dev); + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + u32 intr = nv_rd32(device, 0x610088); + + if (intr & 0x00000001) { + u32 stat = nv_rd32(device, 0x61008c); + nv_wr32(device, 0x61008c, stat); + intr &= ~0x00000001; + } + + if (intr & 0x00000002) { + u32 stat = nv_rd32(device, 0x61009c); + int chid = ffs(stat) - 1; + if (chid >= 0) { + u32 mthd = nv_rd32(device, 0x6101f0 + (chid * 12)); + u32 data = nv_rd32(device, 0x6101f4 + (chid * 12)); + u32 unkn = nv_rd32(device, 0x6101f8 + (chid * 12)); + + NV_INFO(drm, "EvoCh: chid %d mthd 0x%04x data 0x%08x " + "0x%08x 0x%08x\n", + chid, (mthd & 0x0000ffc), data, mthd, unkn); + nv_wr32(device, 0x61009c, (1 << chid)); + nv_wr32(device, 0x6101f0 + (chid * 12), 0x90000000); + } + + intr &= ~0x00000002; + } + + if (intr & 0x00100000) { + u32 stat = nv_rd32(device, 0x6100ac); + + if (stat & 0x00000007) { + disp->modeset = stat; + tasklet_schedule(&disp->tasklet); + + nv_wr32(device, 0x6100ac, (stat & 0x00000007)); + stat &= ~0x00000007; + } + + if (stat) { + NV_INFO(drm, "PDISP: unknown intr24 0x%08x\n", stat); + nv_wr32(device, 0x6100ac, stat); + } + + intr &= ~0x00100000; + } + + intr &= ~0x0f000000; /* vblank, handled in core */ + if (intr) + NV_INFO(drm, "PDISP: unknown intr 0x%08x\n", intr); +} + +/****************************************************************************** + * Init + *****************************************************************************/ +void +nvd0_display_fini(struct drm_device *dev) +{ + int i; + + /* fini cursors + overlays + flips */ + for (i = 1; i >= 0; i--) { + evo_fini_pio(dev, EVO_CURS(i)); + evo_fini_pio(dev, EVO_OIMM(i)); + evo_fini_dma(dev, EVO_OVLY(i)); + evo_fini_dma(dev, EVO_FLIP(i)); + } + + /* fini master */ + evo_fini_dma(dev, EVO_MASTER); +} + +int +nvd0_display_init(struct drm_device *dev) +{ + struct nvd0_display *disp = nvd0_display(dev); + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + int ret, i; + u32 *push; + + if (nv_rd32(device, 0x6100ac) & 0x00000100) { + nv_wr32(device, 0x6100ac, 0x00000100); + nv_mask(device, 0x6194e8, 0x00000001, 0x00000000); + if (!nv_wait(device, 0x6194e8, 0x00000002, 0x00000000)) { + NV_ERROR(drm, "PDISP: 0x6194e8 0x%08x\n", + nv_rd32(device, 0x6194e8)); + return -EBUSY; + } + } + + /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't + * work at all unless you do the SOR part below. + */ + for (i = 0; i < 3; i++) { + u32 dac = nv_rd32(device, 0x61a000 + (i * 0x800)); + nv_wr32(device, 0x6101c0 + (i * 0x800), dac); + } + + for (i = 0; i < 4; i++) { + u32 sor = nv_rd32(device, 0x61c000 + (i * 0x800)); + nv_wr32(device, 0x6301c4 + (i * 0x800), sor); + } + + for (i = 0; i < dev->mode_config.num_crtc; i++) { + u32 crtc0 = nv_rd32(device, 0x616104 + (i * 0x800)); + u32 crtc1 = nv_rd32(device, 0x616108 + (i * 0x800)); + u32 crtc2 = nv_rd32(device, 0x61610c + (i * 0x800)); + nv_wr32(device, 0x6101b4 + (i * 0x800), crtc0); + nv_wr32(device, 0x6101b8 + (i * 0x800), crtc1); + nv_wr32(device, 0x6101bc + (i * 0x800), crtc2); + } + + /* point at our hash table / objects, enable interrupts */ + nv_wr32(device, 0x610010, (disp->mem->addr >> 8) | 9); + nv_mask(device, 0x6100b0, 0x00000307, 0x00000307); + + /* init master */ + ret = evo_init_dma(dev, EVO_MASTER); + if (ret) + goto error; + + /* init flips + overlays + cursors */ + for (i = 0; i < dev->mode_config.num_crtc; i++) { + if ((ret = evo_init_dma(dev, EVO_FLIP(i))) || + (ret = evo_init_dma(dev, EVO_OVLY(i))) || + (ret = evo_init_pio(dev, EVO_OIMM(i))) || + (ret = evo_init_pio(dev, EVO_CURS(i)))) + goto error; + } + + push = evo_wait(dev, EVO_MASTER, 32); + if (!push) { + ret = -EBUSY; + goto error; + } + evo_mthd(push, 0x0088, 1); + evo_data(push, NvEvoSync); + evo_mthd(push, 0x0084, 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0084, 1); + evo_data(push, 0x80000000); + evo_mthd(push, 0x008c, 1); + evo_data(push, 0x00000000); + evo_kick(push, dev, EVO_MASTER); + +error: + if (ret) + nvd0_display_fini(dev); + return ret; +} + +void +nvd0_display_destroy(struct drm_device *dev) +{ + struct nvd0_display *disp = nvd0_display(dev); + struct pci_dev *pdev = dev->pdev; + int i; + + for (i = 0; i < EVO_DMA_NR; i++) { + struct evo *evo = &disp->evo[i]; + pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle); + } + + nouveau_gpuobj_ref(NULL, &disp->mem); + nouveau_bo_unmap(disp->sync); + nouveau_bo_ref(NULL, &disp->sync); + + nouveau_display(dev)->priv = NULL; + kfree(disp); +} + +int +nvd0_display_create(struct drm_device *dev) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_bar *bar = nouveau_bar(device); + struct nouveau_fb *pfb = nouveau_fb(device); + struct dcb_table *dcb = &drm->vbios.dcb; + struct drm_connector *connector, *tmp; + struct pci_dev *pdev = dev->pdev; + struct nvd0_display *disp; + struct dcb_output *dcbe; + int crtcs, ret, i; + + disp = kzalloc(sizeof(*disp), GFP_KERNEL); + if (!disp) + return -ENOMEM; + + nouveau_display(dev)->priv = disp; + nouveau_display(dev)->dtor = nvd0_display_destroy; + nouveau_display(dev)->init = nvd0_display_init; + nouveau_display(dev)->fini = nvd0_display_fini; + + /* create crtc objects to represent the hw heads */ + crtcs = nv_rd32(device, 0x022448); + for (i = 0; i < crtcs; i++) { + ret = nvd0_crtc_create(dev, i); + if (ret) + goto out; + } + + /* create encoder/connector objects based on VBIOS DCB table */ + for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) { + connector = nouveau_connector_create(dev, dcbe->connector); + if (IS_ERR(connector)) + continue; + + if (dcbe->location != DCB_LOC_ON_CHIP) { + NV_WARN(drm, "skipping off-chip encoder %d/%d\n", + dcbe->type, ffs(dcbe->or) - 1); + continue; + } + + switch (dcbe->type) { + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_LVDS: + case DCB_OUTPUT_DP: + nvd0_sor_create(connector, dcbe); + break; + case DCB_OUTPUT_ANALOG: + nvd0_dac_create(connector, dcbe); + break; + default: + NV_WARN(drm, "skipping unsupported encoder %d/%d\n", + dcbe->type, ffs(dcbe->or) - 1); + continue; + } + } + + /* cull any connectors we created that don't have an encoder */ + list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { + if (connector->encoder_ids[0]) + continue; + + NV_WARN(drm, "%s has no encoders, removing\n", + drm_get_connector_name(connector)); + connector->funcs->destroy(connector); + } + + /* setup interrupt handling */ + tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev); + + /* small shared memory area we use for notifiers and semaphores */ + ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &disp->sync); + if (!ret) { + ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(disp->sync); + if (ret) + nouveau_bo_ref(NULL, &disp->sync); + } + + if (ret) + goto out; + + /* hash table and dma objects for the memory areas we care about */ + ret = nouveau_gpuobj_new(nv_object(device), NULL, 0x4000, 0x10000, + NVOBJ_FLAG_ZERO_ALLOC, &disp->mem); + if (ret) + goto out; + + /* create evo dma channels */ + for (i = 0; i < EVO_DMA_NR; i++) { + struct evo *evo = &disp->evo[i]; + u64 offset = disp->sync->bo.offset; + u32 dmao = 0x1000 + (i * 0x100); + u32 hash = 0x0000 + (i * 0x040); + + evo->idx = i; + evo->sem.offset = EVO_SYNC(evo->idx, 0x00); + evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle); + if (!evo->ptr) { + ret = -ENOMEM; + goto out; + } + + nv_wo32(disp->mem, dmao + 0x00, 0x00000049); + nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8); + nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8); + nv_wo32(disp->mem, dmao + 0x0c, 0x00000000); + nv_wo32(disp->mem, dmao + 0x10, 0x00000000); + nv_wo32(disp->mem, dmao + 0x14, 0x00000000); + nv_wo32(disp->mem, hash + 0x00, NvEvoSync); + nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) | + ((dmao + 0x00) << 9)); + + nv_wo32(disp->mem, dmao + 0x20, 0x00000049); + nv_wo32(disp->mem, dmao + 0x24, 0x00000000); + nv_wo32(disp->mem, dmao + 0x28, (pfb->ram.size - 1) >> 8); + nv_wo32(disp->mem, dmao + 0x2c, 0x00000000); + nv_wo32(disp->mem, dmao + 0x30, 0x00000000); + nv_wo32(disp->mem, dmao + 0x34, 0x00000000); + nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM); + nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) | + ((dmao + 0x20) << 9)); + + nv_wo32(disp->mem, dmao + 0x40, 0x00000009); + nv_wo32(disp->mem, dmao + 0x44, 0x00000000); + nv_wo32(disp->mem, dmao + 0x48, (pfb->ram.size - 1) >> 8); + nv_wo32(disp->mem, dmao + 0x4c, 0x00000000); + nv_wo32(disp->mem, dmao + 0x50, 0x00000000); + nv_wo32(disp->mem, dmao + 0x54, 0x00000000); + nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP); + nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) | + ((dmao + 0x40) << 9)); + + nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009); + nv_wo32(disp->mem, dmao + 0x64, 0x00000000); + nv_wo32(disp->mem, dmao + 0x68, (pfb->ram.size - 1) >> 8); + nv_wo32(disp->mem, dmao + 0x6c, 0x00000000); + nv_wo32(disp->mem, dmao + 0x70, 0x00000000); + nv_wo32(disp->mem, dmao + 0x74, 0x00000000); + nv_wo32(disp->mem, hash + 0x18, NvEvoFB32); + nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) | + ((dmao + 0x60) << 9)); + } + + bar->flush(bar); + +out: + if (ret) + nvd0_display_destroy(dev); + return ret; +} diff --git a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c index 2e566e123e9e..24d932f53203 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1697,34 +1697,22 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; } else { - if (ASIC_IS_AVIVO(rdev)) { - /* in DP mode, the DP ref clock can come from either PPLL - * depending on the asic: - * DCE3: PPLL1 or PPLL2 - */ - if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { - /* use the same PPLL for all DP monitors */ - pll = radeon_get_shared_dp_ppll(crtc); - if (pll != ATOM_PPLL_INVALID) - return pll; - } else { - /* use the same PPLL for all monitors with the same clock */ - pll = radeon_get_shared_nondp_ppll(crtc); - if (pll != ATOM_PPLL_INVALID) - return pll; - } - /* all other cases */ - pll_in_use = radeon_get_pll_use_mask(crtc); - if (!(pll_in_use & (1 << ATOM_PPLL1))) - return ATOM_PPLL1; - if (!(pll_in_use & (1 << ATOM_PPLL2))) - return ATOM_PPLL2; - DRM_ERROR("unable to allocate a PPLL\n"); - return ATOM_PPLL_INVALID; - } else { - /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */ - return radeon_crtc->crtc_id; - } + /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */ + /* some atombios (observed in some DCE2/DCE3) code have a bug, + * the matching btw pll and crtc is done through + * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the + * pll (1 or 2) to select which register to write. ie if using + * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2 + * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to + * choose which value to write. Which is reverse order from + * register logic. So only case that works is when pllid is + * same as crtcid or when both pll and crtc are enabled and + * both use same clock. + * + * So just return crtc id as if crtc and pll were hard linked + * together even if they aren't + */ + return radeon_crtc->crtc_id; } } diff --git a/trunk/drivers/gpu/drm/radeon/atombios_dp.c b/trunk/drivers/gpu/drm/radeon/atombios_dp.c index 064023bed480..d5699fe4f1e8 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_dp.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_dp.c @@ -34,7 +34,8 @@ /* move these to drm_dp_helper.c/h */ #define DP_LINK_CONFIGURATION_SIZE 9 -#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE +#define DP_LINK_STATUS_SIZE 6 +#define DP_DPCD_SIZE 8 static char *voltage_names[] = { "0.4V", "0.6V", "0.8V", "1.2V" @@ -289,6 +290,78 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, /***** general DP utility functions *****/ +static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_LANE0_1_STATUS + (lane >> 1); + int s = (lane & 1) * 4; + u8 l = dp_link_status(link_status, i); + return (l >> s) & 0xf; +} + +static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + int lane; + u8 lane_status; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = dp_get_lane_status(link_status, lane); + if ((lane_status & DP_LANE_CR_DONE) == 0) + return false; + } + return true; +} + +static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + u8 lane_align; + u8 lane_status; + int lane; + + lane_align = dp_link_status(link_status, + DP_LANE_ALIGN_STATUS_UPDATED); + if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) + return false; + for (lane = 0; lane < lane_count; lane++) { + lane_status = dp_get_lane_status(link_status, lane); + if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS) + return false; + } + return true; +} + +static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], + int lane) + +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : + DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); + u8 l = dp_link_status(link_status, i); + + return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; +} + +static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : + DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); + u8 l = dp_link_status(link_status, i); + + return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; +} + #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 #define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5 @@ -301,8 +374,8 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], int lane; for (lane = 0; lane < lane_count; lane++) { - u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane); - u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + u8 this_v = dp_get_adjust_request_voltage(link_status, lane); + u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane); DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n", lane, @@ -347,6 +420,37 @@ static int dp_get_max_dp_pix_clock(int link_rate, return (link_rate * lane_num * 8) / bpp; } +static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE]) +{ + switch (dpcd[DP_MAX_LINK_RATE]) { + case DP_LINK_BW_1_62: + default: + return 162000; + case DP_LINK_BW_2_7: + return 270000; + case DP_LINK_BW_5_4: + return 540000; + } +} + +static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE]) +{ + return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; +} + +static u8 dp_get_dp_link_rate_coded(int link_rate) +{ + switch (link_rate) { + case 162000: + default: + return DP_LINK_BW_1_62; + case 270000: + return DP_LINK_BW_2_7; + case 540000: + return DP_LINK_BW_5_4; + } +} + /***** radeon specific DP functions *****/ /* First get the min lane# when low rate is used according to pixel clock @@ -358,8 +462,8 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector, int pix_clock) { int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); - int max_link_rate = drm_dp_max_link_rate(dpcd); - int max_lane_num = drm_dp_max_lane_count(dpcd); + int max_link_rate = dp_get_max_link_rate(dpcd); + int max_lane_num = dp_get_max_lane_number(dpcd); int lane_num; int max_dp_pix_clock; @@ -396,7 +500,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector, return 540000; } - return drm_dp_max_link_rate(dpcd); + return dp_get_max_link_rate(dpcd); } static u8 radeon_dp_encoder_service(struct radeon_device *rdev, @@ -447,15 +551,14 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector) bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; - u8 msg[DP_DPCD_SIZE]; + u8 msg[25]; int ret, i; - ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, - DP_DPCD_SIZE, 0); + ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0); if (ret > 0) { - memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); + memcpy(dig_connector->dpcd, msg, 8); DRM_DEBUG_KMS("DPCD: "); - for (i = 0; i < DP_DPCD_SIZE; i++) + for (i = 0; i < 8; i++) DRM_DEBUG_KMS("%02x ", msg[i]); DRM_DEBUG_KMS("\n"); @@ -561,7 +664,7 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) if (!radeon_dp_get_link_status(radeon_connector, link_status)) return false; - if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) + if (dp_channel_eq_ok(link_status, dig->dp_lane_count)) return false; return true; } @@ -574,8 +677,9 @@ struct radeon_dp_link_train_info { int enc_id; int dp_clock; int dp_lane_count; + int rd_interval; bool tp3_supported; - u8 dpcd[DP_RECEIVER_CAP_SIZE]; + u8 dpcd[8]; u8 train_set[4]; u8 link_status[DP_LINK_STATUS_SIZE]; u8 tries; @@ -661,7 +765,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); /* set the link rate on the sink */ - tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock); + tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock); radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); /* start training on the source */ @@ -717,14 +821,17 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info) dp_info->tries = 0; voltage = 0xff; while (1) { - drm_dp_link_train_clock_recovery_delay(dp_info->dpcd); + if (dp_info->rd_interval == 0) + udelay(100); + else + mdelay(dp_info->rd_interval * 4); if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { DRM_ERROR("displayport link status failed\n"); break; } - if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { + if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { clock_recovery = true; break; } @@ -779,14 +886,17 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info) dp_info->tries = 0; channel_eq = false; while (1) { - drm_dp_link_train_channel_eq_delay(dp_info->dpcd); + if (dp_info->rd_interval == 0) + udelay(400); + else + mdelay(dp_info->rd_interval * 4); if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { DRM_ERROR("displayport link status failed\n"); break; } - if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { + if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { channel_eq = true; break; } @@ -864,13 +974,14 @@ void radeon_dp_link_train(struct drm_encoder *encoder, else dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; + dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL); tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT); if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) dp_info.tp3_supported = true; else dp_info.tp3_supported = false; - memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); + memcpy(dp_info.dpcd, dig_connector->dpcd, 8); dp_info.rdev = rdev; dp_info.encoder = encoder; dp_info.connector = connector; diff --git a/trunk/drivers/gpu/drm/radeon/atombios_encoders.c b/trunk/drivers/gpu/drm/radeon/atombios_encoders.c index ba498f8e47a2..010bae19554a 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1625,7 +1625,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); /* some early dce3.2 boards have a bug in their transmitter control table */ - if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730)) + if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730)) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index 14313ad43b76..219942c660d7 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -1330,6 +1330,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav break; udelay(1); } + } else { + save->crtc_enabled[i] = false; } } @@ -1372,7 +1374,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); for (i = 0; i < rdev->num_crtc; i++) { - if (save->crtc_enabled) { + if (save->crtc_enabled[i]) { if (ASIC_IS_DCE6(rdev)) { tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; diff --git a/trunk/drivers/gpu/drm/radeon/evergreen_cs.c b/trunk/drivers/gpu/drm/radeon/evergreen_cs.c index 95e6318b6268..c042e497e450 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen_cs.c @@ -2725,6 +2725,9 @@ static bool evergreen_vm_reg_valid(u32 reg) /* check config regs */ switch (reg) { case GRBM_GFX_INDEX: + case CP_STRMOUT_CNTL: + case CP_COHER_CNTL: + case CP_COHER_SIZE: case VGT_VTX_VECT_EJECT_REG: case VGT_CACHE_INVALIDATION: case VGT_GS_VERTEX_REUSE: diff --git a/trunk/drivers/gpu/drm/radeon/evergreend.h b/trunk/drivers/gpu/drm/radeon/evergreend.h index df542f1a5dfb..2bc0f6a1b428 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreend.h +++ b/trunk/drivers/gpu/drm/radeon/evergreend.h @@ -91,6 +91,10 @@ #define FB_READ_EN (1 << 0) #define FB_WRITE_EN (1 << 1) +#define CP_STRMOUT_CNTL 0x84FC + +#define CP_COHER_CNTL 0x85F0 +#define CP_COHER_SIZE 0x85F4 #define CP_COHER_BASE 0x85F8 #define CP_STALLED_STAT1 0x8674 #define CP_STALLED_STAT2 0x8678 diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c index 169ecc9628ea..cda280d157da 100644 --- a/trunk/drivers/gpu/drm/radeon/r600.c +++ b/trunk/drivers/gpu/drm/radeon/r600.c @@ -1424,7 +1424,13 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev, int r600_count_pipe_bits(uint32_t val) { - return hweight32(val); + int i, ret = 0; + + for (i = 0; i < 32; i++) { + ret += val & 1; + val >>= 1; + } + return ret; } static void r600_gpu_init(struct radeon_device *rdev) diff --git a/trunk/drivers/gpu/drm/radeon/r600_cp.c b/trunk/drivers/gpu/drm/radeon/r600_cp.c index be85f75aedda..2514123d2d00 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_cp.c +++ b/trunk/drivers/gpu/drm/radeon/r600_cp.c @@ -721,7 +721,12 @@ static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes, static int r600_count_pipe_bits(uint32_t val) { - return hweight32(val); + int i, ret = 0; + for (i = 0; i < 32; i++) { + ret += val & 1; + val >>= 1; + } + return ret; } static void r600_gfx_init(struct drm_device *dev, diff --git a/trunk/drivers/gpu/drm/radeon/radeon_agp.c b/trunk/drivers/gpu/drm/radeon/radeon_agp.c index 10ea17a6b2a6..42433344cb1b 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_agp.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_agp.c @@ -69,9 +69,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/ { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59, PCI_VENDOR_ID_DELL, 0x00e3, 2}, - /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */ + /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66, PCI_VENDOR_ID_DELL, 0x0149, 1}, + /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */ + { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, + PCI_VENDOR_ID_IBM, 0x0531, 1}, /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, 0x1025, 0x0061, 1}, diff --git a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c index 47bf162ab9c6..b884c362a8c2 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1599,7 +1599,7 @@ radeon_add_atom_connector(struct drm_device *dev, connector->interlace_allowed = true; connector->doublescan_allowed = true; radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); break; @@ -1608,13 +1608,13 @@ radeon_add_atom_connector(struct drm_device *dev, case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: case DRM_MODE_CONNECTOR_DisplayPort: - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_vborder_property, 0); subpixel_order = SubPixelHorizontalRGB; @@ -1625,14 +1625,14 @@ radeon_add_atom_connector(struct drm_device *dev, connector->doublescan_allowed = false; if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); } break; case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_eDP: - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; @@ -1651,7 +1651,7 @@ radeon_add_atom_connector(struct drm_device *dev, DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); /* no HPD on analog connectors */ @@ -1669,7 +1669,7 @@ radeon_add_atom_connector(struct drm_device *dev, DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); /* no HPD on analog connectors */ @@ -1692,23 +1692,23 @@ radeon_add_atom_connector(struct drm_device *dev, DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } subpixel_order = SubPixelHorizontalRGB; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); if (ASIC_IS_AVIVO(rdev)) { - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_vborder_property, 0); } if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); } @@ -1732,17 +1732,17 @@ radeon_add_atom_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); if (ASIC_IS_AVIVO(rdev)) { - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_vborder_property, 0); } @@ -1771,17 +1771,17 @@ radeon_add_atom_connector(struct drm_device *dev, DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } subpixel_order = SubPixelHorizontalRGB; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); if (ASIC_IS_AVIVO(rdev)) { - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_vborder_property, 0); } @@ -1806,7 +1806,7 @@ radeon_add_atom_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; @@ -1819,10 +1819,10 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.tv_std_property, radeon_atombios_get_tv_info(rdev)); /* no HPD on analog connectors */ @@ -1843,7 +1843,7 @@ radeon_add_atom_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; @@ -1922,7 +1922,7 @@ radeon_add_legacy_connector(struct drm_device *dev, DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); /* no HPD on analog connectors */ @@ -1940,7 +1940,7 @@ radeon_add_legacy_connector(struct drm_device *dev, DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); /* no HPD on analog connectors */ @@ -1959,7 +1959,7 @@ radeon_add_legacy_connector(struct drm_device *dev, } if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); } @@ -1983,10 +1983,10 @@ radeon_add_legacy_connector(struct drm_device *dev, */ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) radeon_connector->dac_load_detect = false; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, radeon_connector->dac_load_detect); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.tv_std_property, radeon_combios_get_tv_info(rdev)); /* no HPD on analog connectors */ @@ -2002,7 +2002,7 @@ radeon_add_legacy_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_drv.c b/trunk/drivers/gpu/drm/radeon/radeon_drv.c index 8c1a83c6eb07..07eb84e8a8a4 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_drv.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_drv.c @@ -281,15 +281,12 @@ static struct drm_driver driver_old = { static struct drm_driver kms_driver; -static int radeon_kick_out_firmware_fb(struct pci_dev *pdev) +static void radeon_kick_out_firmware_fb(struct pci_dev *pdev) { struct apertures_struct *ap; bool primary = false; ap = alloc_apertures(1); - if (!ap) - return -ENOMEM; - ap->ranges[0].base = pci_resource_start(pdev, 0); ap->ranges[0].size = pci_resource_len(pdev, 0); @@ -298,19 +295,13 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev) #endif remove_conflicting_framebuffers(ap, "radeondrmfb", primary); kfree(ap); - - return 0; } static int __devinit radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - int ret; - /* Get rid of things like offb */ - ret = radeon_kick_out_firmware_fb(pdev); - if (ret) - return ret; + radeon_kick_out_firmware_fb(pdev); return drm_get_pci_dev(pdev, ent, &kms_driver); } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gart.c b/trunk/drivers/gpu/drm/radeon/radeon_gart.c index 8690be757d80..4debd60e5aa6 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gart.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gart.c @@ -1237,7 +1237,7 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev, { struct radeon_bo_va *bo_va; - BUG_ON(!radeon_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->tbo.reserved)); list_for_each_entry(bo_va, &bo->va, bo_list) { bo_va->valid = false; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_mode.h b/trunk/drivers/gpu/drm/radeon/radeon_mode.h index d818b503b42f..92c5f473cf08 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_mode.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_mode.h @@ -427,7 +427,7 @@ struct radeon_connector_atom_dig { uint32_t igp_lane_info; /* displayport */ struct radeon_i2c_chan *dp_i2c_bus; - u8 dpcd[DP_RECEIVER_CAP_SIZE]; + u8 dpcd[8]; u8 dp_sink_type; int dp_clock; int dp_lane_count; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_object.c b/trunk/drivers/gpu/drm/radeon/radeon_object.c index 7c4b4bb05a36..b91118ccef86 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_object.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_object.c @@ -140,7 +140,7 @@ int radeon_bo_create(struct radeon_device *rdev, /* Kernel allocation are uninterruptible */ down_read(&rdev->pm.mclk_lock); r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, - &bo->placement, page_align, !kernel, NULL, + &bo->placement, page_align, 0, !kernel, NULL, acc_size, sg, &radeon_ttm_bo_destroy); up_read(&rdev->pm.mclk_lock); if (unlikely(r != 0)) { @@ -384,7 +384,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) int steal; int i; - BUG_ON(!radeon_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->tbo.reserved)); if (!bo->tiling_flags) return 0; @@ -510,7 +510,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo, uint32_t *tiling_flags, uint32_t *pitch) { - BUG_ON(!radeon_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->tbo.reserved)); if (tiling_flags) *tiling_flags = bo->tiling_flags; if (pitch) @@ -520,7 +520,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo, int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, bool force_drop) { - BUG_ON(!radeon_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->tbo.reserved)); if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) return 0; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_object.h b/trunk/drivers/gpu/drm/radeon/radeon_object.h index 5fc86b03043b..93cd491fff2e 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_object.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_object.h @@ -80,7 +80,7 @@ static inline unsigned long radeon_bo_size(struct radeon_bo *bo) static inline bool radeon_bo_is_reserved(struct radeon_bo *bo) { - return ttm_bo_is_reserved(&bo->tbo); + return !!atomic_read(&bo->tbo.reserved); } static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo) diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ttm.c b/trunk/drivers/gpu/drm/radeon/radeon_ttm.c index 563c8edcb03b..5ebe1b3e5db2 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_ttm.c @@ -265,7 +265,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ &fence); /* FIXME: handle copy error */ - r = ttm_bo_move_accel_cleanup(bo, (void *)fence, + r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, evict, no_wait_reserve, no_wait_gpu, new_mem); radeon_fence_unref(&fence); return r; @@ -471,12 +471,13 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re { } -static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) +static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, + bool lazy, bool interruptible) { return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); } -static int radeon_sync_obj_flush(void *sync_obj) +static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg) { return 0; } @@ -491,7 +492,7 @@ static void *radeon_sync_obj_ref(void *sync_obj) return radeon_fence_ref((struct radeon_fence *)sync_obj); } -static bool radeon_sync_obj_signaled(void *sync_obj) +static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg) { return radeon_fence_signaled((struct radeon_fence *)sync_obj); } diff --git a/trunk/drivers/gpu/drm/radeon/rv770d.h b/trunk/drivers/gpu/drm/radeon/rv770d.h index b0adfc595d75..e2d9dc8e751e 100644 --- a/trunk/drivers/gpu/drm/radeon/rv770d.h +++ b/trunk/drivers/gpu/drm/radeon/rv770d.h @@ -551,6 +551,54 @@ #define HDMI_OFFSET0 (0x7400 - 0x7400) #define HDMI_OFFSET1 (0x7800 - 0x7400) +/* DCE3.2 ELD audio interface */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */ +#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */ +# define MAX_CHANNELS(x) (((x) & 0x7) << 0) +/* max channels minus one. 7 = 8 channels */ +# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8) +# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16) +# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */ +/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO + * bit0 = 32 kHz + * bit1 = 44.1 kHz + * bit2 = 48 kHz + * bit3 = 88.2 kHz + * bit4 = 96 kHz + * bit5 = 176.4 kHz + * bit6 = 192 kHz + */ + +#define AZ_HOT_PLUG_CONTROL 0x7300 +# define AZ_FORCE_CODEC_WAKE (1 << 0) +# define PIN0_JACK_DETECTION_ENABLE (1 << 4) +# define PIN1_JACK_DETECTION_ENABLE (1 << 5) +# define PIN2_JACK_DETECTION_ENABLE (1 << 6) +# define PIN3_JACK_DETECTION_ENABLE (1 << 7) +# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8) +# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9) +# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10) +# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11) +# define CODEC_HOT_PLUG_ENABLE (1 << 12) +# define PIN0_AUDIO_ENABLED (1 << 24) +# define PIN1_AUDIO_ENABLED (1 << 25) +# define PIN2_AUDIO_ENABLED (1 << 26) +# define PIN3_AUDIO_ENABLED (1 << 27) +# define AUDIO_ENABLED (1 << 31) + + #define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 #define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 #define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 diff --git a/trunk/drivers/gpu/drm/radeon/si.c b/trunk/drivers/gpu/drm/radeon/si.c index b0db712060fb..4422d630b33b 100644 --- a/trunk/drivers/gpu/drm/radeon/si.c +++ b/trunk/drivers/gpu/drm/radeon/si.c @@ -2474,6 +2474,7 @@ static bool si_vm_reg_valid(u32 reg) /* check config regs */ switch (reg) { case GRBM_GFX_INDEX: + case CP_STRMOUT_CNTL: case VGT_VTX_VECT_EJECT_REG: case VGT_CACHE_INVALIDATION: case VGT_ESGS_RING_SIZE: diff --git a/trunk/drivers/gpu/drm/radeon/sid.h b/trunk/drivers/gpu/drm/radeon/sid.h index 7d2a20e56577..a8871afc5b4e 100644 --- a/trunk/drivers/gpu/drm/radeon/sid.h +++ b/trunk/drivers/gpu/drm/radeon/sid.h @@ -424,6 +424,7 @@ # define RDERR_INT_ENABLE (1 << 0) # define GUI_IDLE_INT_ENABLE (1 << 19) +#define CP_STRMOUT_CNTL 0x84FC #define SCRATCH_REG0 0x8500 #define SCRATCH_REG1 0x8504 #define SCRATCH_REG2 0x8508 diff --git a/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index d917a411ca85..0e7a9306bd0c 100644 --- a/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -748,7 +748,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev, connector->encoder = encoder; drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); return 0; diff --git a/trunk/drivers/gpu/drm/tegra/Kconfig b/trunk/drivers/gpu/drm/tegra/Kconfig deleted file mode 100644 index be1daf7344d3..000000000000 --- a/trunk/drivers/gpu/drm/tegra/Kconfig +++ /dev/null @@ -1,23 +0,0 @@ -config DRM_TEGRA - tristate "NVIDIA Tegra DRM" - depends on DRM && OF && ARCH_TEGRA - select DRM_KMS_HELPER - select DRM_GEM_CMA_HELPER - select DRM_KMS_CMA_HELPER - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - help - Choose this option if you have an NVIDIA Tegra SoC. - - To compile this driver as a module, choose M here: the module - will be called tegra-drm. - -if DRM_TEGRA - -config DRM_TEGRA_DEBUG - bool "NVIDIA Tegra DRM debug support" - help - Say yes here to enable debugging support. - -endif diff --git a/trunk/drivers/gpu/drm/tegra/Makefile b/trunk/drivers/gpu/drm/tegra/Makefile deleted file mode 100644 index 80f73d1315d0..000000000000 --- a/trunk/drivers/gpu/drm/tegra/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -ccflags-y := -Iinclude/drm -ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG - -tegra-drm-y := drm.o fb.o dc.o host1x.o -tegra-drm-y += output.o rgb.o hdmi.o - -obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o diff --git a/trunk/drivers/gpu/drm/tegra/dc.c b/trunk/drivers/gpu/drm/tegra/dc.c deleted file mode 100644 index 074410371e2a..000000000000 --- a/trunk/drivers/gpu/drm/tegra/dc.c +++ /dev/null @@ -1,834 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include - -#include - -#include "drm.h" -#include "dc.h" - -struct tegra_dc_window { - fixed20_12 x; - fixed20_12 y; - fixed20_12 w; - fixed20_12 h; - unsigned int outx; - unsigned int outy; - unsigned int outw; - unsigned int outh; - unsigned int stride; - unsigned int fmt; -}; - -static const struct drm_crtc_funcs tegra_crtc_funcs = { - .set_config = drm_crtc_helper_set_config, - .destroy = drm_crtc_cleanup, -}; - -static void tegra_crtc_dpms(struct drm_crtc *crtc, int mode) -{ -} - -static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted) -{ - return true; -} - -static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v, - unsigned int bpp) -{ - fixed20_12 outf = dfixed_init(out); - u32 dda_inc; - int max; - - if (v) - max = 15; - else { - switch (bpp) { - case 2: - max = 8; - break; - - default: - WARN_ON_ONCE(1); - /* fallthrough */ - case 4: - max = 4; - break; - } - } - - outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1)); - inf.full -= dfixed_const(1); - - dda_inc = dfixed_div(inf, outf); - dda_inc = min_t(u32, dda_inc, dfixed_const(max)); - - return dda_inc; -} - -static inline u32 compute_initial_dda(fixed20_12 in) -{ - return dfixed_frac(in); -} - -static int tegra_dc_set_timings(struct tegra_dc *dc, - struct drm_display_mode *mode) -{ - /* TODO: For HDMI compliance, h & v ref_to_sync should be set to 1 */ - unsigned int h_ref_to_sync = 0; - unsigned int v_ref_to_sync = 0; - unsigned long value; - - tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS); - - value = (v_ref_to_sync << 16) | h_ref_to_sync; - tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC); - - value = ((mode->vsync_end - mode->vsync_start) << 16) | - ((mode->hsync_end - mode->hsync_start) << 0); - tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH); - - value = ((mode->vsync_start - mode->vdisplay) << 16) | - ((mode->hsync_start - mode->hdisplay) << 0); - tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH); - - value = ((mode->vtotal - mode->vsync_end) << 16) | - ((mode->htotal - mode->hsync_end) << 0); - tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH); - - value = (mode->vdisplay << 16) | mode->hdisplay; - tegra_dc_writel(dc, value, DC_DISP_ACTIVE); - - return 0; -} - -static int tegra_crtc_setup_clk(struct drm_crtc *crtc, - struct drm_display_mode *mode, - unsigned long *div) -{ - unsigned long pclk = mode->clock * 1000, rate; - struct tegra_dc *dc = to_tegra_dc(crtc); - struct tegra_output *output = NULL; - struct drm_encoder *encoder; - long err; - - list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head) - if (encoder->crtc == crtc) { - output = encoder_to_output(encoder); - break; - } - - if (!output) - return -ENODEV; - - /* - * This assumes that the display controller will divide its parent - * clock by 2 to generate the pixel clock. - */ - err = tegra_output_setup_clock(output, dc->clk, pclk * 2); - if (err < 0) { - dev_err(dc->dev, "failed to setup clock: %ld\n", err); - return err; - } - - rate = clk_get_rate(dc->clk); - *div = (rate * 2 / pclk) - 2; - - DRM_DEBUG_KMS("rate: %lu, div: %lu\n", rate, *div); - - return 0; -} - -static int tegra_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted, - int x, int y, struct drm_framebuffer *old_fb) -{ - struct tegra_framebuffer *fb = to_tegra_fb(crtc->fb); - struct tegra_dc *dc = to_tegra_dc(crtc); - unsigned int h_dda, v_dda, bpp; - struct tegra_dc_window win; - unsigned long div, value; - int err; - - err = tegra_crtc_setup_clk(crtc, mode, &div); - if (err) { - dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err); - return err; - } - - /* program display mode */ - tegra_dc_set_timings(dc, mode); - - value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL; - tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS); - - value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1)); - value &= ~LVS_OUTPUT_POLARITY_LOW; - value &= ~LHS_OUTPUT_POLARITY_LOW; - tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1)); - - value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB | - DISP_ORDER_RED_BLUE; - tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL); - - tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS); - - value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1; - tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); - - /* setup window parameters */ - memset(&win, 0, sizeof(win)); - win.x.full = dfixed_const(0); - win.y.full = dfixed_const(0); - win.w.full = dfixed_const(mode->hdisplay); - win.h.full = dfixed_const(mode->vdisplay); - win.outx = 0; - win.outy = 0; - win.outw = mode->hdisplay; - win.outh = mode->vdisplay; - - switch (crtc->fb->pixel_format) { - case DRM_FORMAT_XRGB8888: - win.fmt = WIN_COLOR_DEPTH_B8G8R8A8; - break; - - case DRM_FORMAT_RGB565: - win.fmt = WIN_COLOR_DEPTH_B5G6R5; - break; - - default: - win.fmt = WIN_COLOR_DEPTH_B8G8R8A8; - WARN_ON(1); - break; - } - - bpp = crtc->fb->bits_per_pixel / 8; - win.stride = crtc->fb->pitches[0]; - - /* program window registers */ - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_WINDOW_HEADER); - value |= WINDOW_A_SELECT; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); - - tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH); - tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP); - - value = V_POSITION(win.outy) | H_POSITION(win.outx); - tegra_dc_writel(dc, value, DC_WIN_POSITION); - - value = V_SIZE(win.outh) | H_SIZE(win.outw); - tegra_dc_writel(dc, value, DC_WIN_SIZE); - - value = V_PRESCALED_SIZE(dfixed_trunc(win.h)) | - H_PRESCALED_SIZE(dfixed_trunc(win.w) * bpp); - tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE); - - h_dda = compute_dda_inc(win.w, win.outw, false, bpp); - v_dda = compute_dda_inc(win.h, win.outh, true, bpp); - - value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda); - tegra_dc_writel(dc, value, DC_WIN_DDA_INC); - - h_dda = compute_initial_dda(win.x); - v_dda = compute_initial_dda(win.y); - - tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA); - tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA); - - tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE); - tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE); - - tegra_dc_writel(dc, fb->obj->paddr, DC_WINBUF_START_ADDR); - tegra_dc_writel(dc, win.stride, DC_WIN_LINE_STRIDE); - tegra_dc_writel(dc, dfixed_trunc(win.x) * bpp, - DC_WINBUF_ADDR_H_OFFSET); - tegra_dc_writel(dc, dfixed_trunc(win.y), DC_WINBUF_ADDR_V_OFFSET); - - value = WIN_ENABLE; - - if (bpp < 24) - value |= COLOR_EXPAND; - - tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); - - tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_NOKEY); - tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_1WIN); - - return 0; -} - -static void tegra_crtc_prepare(struct drm_crtc *crtc) -{ - struct tegra_dc *dc = to_tegra_dc(crtc); - unsigned int syncpt; - unsigned long value; - - /* hardware initialization */ - tegra_periph_reset_deassert(dc->clk); - usleep_range(10000, 20000); - - if (dc->pipe) - syncpt = SYNCPT_VBLANK1; - else - syncpt = SYNCPT_VBLANK0; - - /* initialize display controller */ - tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); - tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC); - - value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_TYPE); - - value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | - WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY); - - value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); - value |= DISP_CTRL_MODE_C_DISPLAY; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - - /* initialize timer */ - value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) | - WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20); - tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY); - - value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) | - WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1); - tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); - - value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_MASK); - - value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); -} - -static void tegra_crtc_commit(struct drm_crtc *crtc) -{ - struct tegra_dc *dc = to_tegra_dc(crtc); - unsigned long update_mask; - unsigned long value; - - update_mask = GENERAL_ACT_REQ | WIN_A_ACT_REQ; - - tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL); - - value = tegra_dc_readl(dc, DC_CMD_INT_ENABLE); - value |= FRAME_END_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); - - value = tegra_dc_readl(dc, DC_CMD_INT_MASK); - value |= FRAME_END_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_MASK); - - tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL); -} - -static void tegra_crtc_load_lut(struct drm_crtc *crtc) -{ -} - -static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = { - .dpms = tegra_crtc_dpms, - .mode_fixup = tegra_crtc_mode_fixup, - .mode_set = tegra_crtc_mode_set, - .prepare = tegra_crtc_prepare, - .commit = tegra_crtc_commit, - .load_lut = tegra_crtc_load_lut, -}; - -static irqreturn_t tegra_drm_irq(int irq, void *data) -{ - struct tegra_dc *dc = data; - unsigned long status; - - status = tegra_dc_readl(dc, DC_CMD_INT_STATUS); - tegra_dc_writel(dc, status, DC_CMD_INT_STATUS); - - if (status & FRAME_END_INT) { - /* - dev_dbg(dc->dev, "%s(): frame end\n", __func__); - */ - } - - if (status & VBLANK_INT) { - /* - dev_dbg(dc->dev, "%s(): vertical blank\n", __func__); - */ - drm_handle_vblank(dc->base.dev, dc->pipe); - } - - if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) { - /* - dev_dbg(dc->dev, "%s(): underflow\n", __func__); - */ - } - - return IRQ_HANDLED; -} - -static int tegra_dc_show_regs(struct seq_file *s, void *data) -{ - struct drm_info_node *node = s->private; - struct tegra_dc *dc = node->info_ent->data; - -#define DUMP_REG(name) \ - seq_printf(s, "%-40s %#05x %08lx\n", #name, name, \ - tegra_dc_readl(dc, name)) - - DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT); - DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); - DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_ERROR); - DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT); - DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL); - DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_ERROR); - DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT); - DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL); - DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_ERROR); - DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT); - DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL); - DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_ERROR); - DUMP_REG(DC_CMD_CONT_SYNCPT_VSYNC); - DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0); - DUMP_REG(DC_CMD_DISPLAY_COMMAND); - DUMP_REG(DC_CMD_SIGNAL_RAISE); - DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL); - DUMP_REG(DC_CMD_INT_STATUS); - DUMP_REG(DC_CMD_INT_MASK); - DUMP_REG(DC_CMD_INT_ENABLE); - DUMP_REG(DC_CMD_INT_TYPE); - DUMP_REG(DC_CMD_INT_POLARITY); - DUMP_REG(DC_CMD_SIGNAL_RAISE1); - DUMP_REG(DC_CMD_SIGNAL_RAISE2); - DUMP_REG(DC_CMD_SIGNAL_RAISE3); - DUMP_REG(DC_CMD_STATE_ACCESS); - DUMP_REG(DC_CMD_STATE_CONTROL); - DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER); - DUMP_REG(DC_CMD_REG_ACT_CONTROL); - DUMP_REG(DC_COM_CRC_CONTROL); - DUMP_REG(DC_COM_CRC_CHECKSUM); - DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(0)); - DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(1)); - DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(2)); - DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(3)); - DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(0)); - DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(1)); - DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(2)); - DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(3)); - DUMP_REG(DC_COM_PIN_OUTPUT_DATA(0)); - DUMP_REG(DC_COM_PIN_OUTPUT_DATA(1)); - DUMP_REG(DC_COM_PIN_OUTPUT_DATA(2)); - DUMP_REG(DC_COM_PIN_OUTPUT_DATA(3)); - DUMP_REG(DC_COM_PIN_INPUT_ENABLE(0)); - DUMP_REG(DC_COM_PIN_INPUT_ENABLE(1)); - DUMP_REG(DC_COM_PIN_INPUT_ENABLE(2)); - DUMP_REG(DC_COM_PIN_INPUT_ENABLE(3)); - DUMP_REG(DC_COM_PIN_INPUT_DATA(0)); - DUMP_REG(DC_COM_PIN_INPUT_DATA(1)); - DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(0)); - DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(1)); - DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(2)); - DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(3)); - DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(4)); - DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(5)); - DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(6)); - DUMP_REG(DC_COM_PIN_MISC_CONTROL); - DUMP_REG(DC_COM_PIN_PM0_CONTROL); - DUMP_REG(DC_COM_PIN_PM0_DUTY_CYCLE); - DUMP_REG(DC_COM_PIN_PM1_CONTROL); - DUMP_REG(DC_COM_PIN_PM1_DUTY_CYCLE); - DUMP_REG(DC_COM_SPI_CONTROL); - DUMP_REG(DC_COM_SPI_START_BYTE); - DUMP_REG(DC_COM_HSPI_WRITE_DATA_AB); - DUMP_REG(DC_COM_HSPI_WRITE_DATA_CD); - DUMP_REG(DC_COM_HSPI_CS_DC); - DUMP_REG(DC_COM_SCRATCH_REGISTER_A); - DUMP_REG(DC_COM_SCRATCH_REGISTER_B); - DUMP_REG(DC_COM_GPIO_CTRL); - DUMP_REG(DC_COM_GPIO_DEBOUNCE_COUNTER); - DUMP_REG(DC_COM_CRC_CHECKSUM_LATCHED); - DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0); - DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1); - DUMP_REG(DC_DISP_DISP_WIN_OPTIONS); - DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY); - DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); - DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS); - DUMP_REG(DC_DISP_REF_TO_SYNC); - DUMP_REG(DC_DISP_SYNC_WIDTH); - DUMP_REG(DC_DISP_BACK_PORCH); - DUMP_REG(DC_DISP_ACTIVE); - DUMP_REG(DC_DISP_FRONT_PORCH); - DUMP_REG(DC_DISP_H_PULSE0_CONTROL); - DUMP_REG(DC_DISP_H_PULSE0_POSITION_A); - DUMP_REG(DC_DISP_H_PULSE0_POSITION_B); - DUMP_REG(DC_DISP_H_PULSE0_POSITION_C); - DUMP_REG(DC_DISP_H_PULSE0_POSITION_D); - DUMP_REG(DC_DISP_H_PULSE1_CONTROL); - DUMP_REG(DC_DISP_H_PULSE1_POSITION_A); - DUMP_REG(DC_DISP_H_PULSE1_POSITION_B); - DUMP_REG(DC_DISP_H_PULSE1_POSITION_C); - DUMP_REG(DC_DISP_H_PULSE1_POSITION_D); - DUMP_REG(DC_DISP_H_PULSE2_CONTROL); - DUMP_REG(DC_DISP_H_PULSE2_POSITION_A); - DUMP_REG(DC_DISP_H_PULSE2_POSITION_B); - DUMP_REG(DC_DISP_H_PULSE2_POSITION_C); - DUMP_REG(DC_DISP_H_PULSE2_POSITION_D); - DUMP_REG(DC_DISP_V_PULSE0_CONTROL); - DUMP_REG(DC_DISP_V_PULSE0_POSITION_A); - DUMP_REG(DC_DISP_V_PULSE0_POSITION_B); - DUMP_REG(DC_DISP_V_PULSE0_POSITION_C); - DUMP_REG(DC_DISP_V_PULSE1_CONTROL); - DUMP_REG(DC_DISP_V_PULSE1_POSITION_A); - DUMP_REG(DC_DISP_V_PULSE1_POSITION_B); - DUMP_REG(DC_DISP_V_PULSE1_POSITION_C); - DUMP_REG(DC_DISP_V_PULSE2_CONTROL); - DUMP_REG(DC_DISP_V_PULSE2_POSITION_A); - DUMP_REG(DC_DISP_V_PULSE3_CONTROL); - DUMP_REG(DC_DISP_V_PULSE3_POSITION_A); - DUMP_REG(DC_DISP_M0_CONTROL); - DUMP_REG(DC_DISP_M1_CONTROL); - DUMP_REG(DC_DISP_DI_CONTROL); - DUMP_REG(DC_DISP_PP_CONTROL); - DUMP_REG(DC_DISP_PP_SELECT_A); - DUMP_REG(DC_DISP_PP_SELECT_B); - DUMP_REG(DC_DISP_PP_SELECT_C); - DUMP_REG(DC_DISP_PP_SELECT_D); - DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL); - DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL); - DUMP_REG(DC_DISP_DISP_COLOR_CONTROL); - DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS); - DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS); - DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS); - DUMP_REG(DC_DISP_LCD_SPI_OPTIONS); - DUMP_REG(DC_DISP_BORDER_COLOR); - DUMP_REG(DC_DISP_COLOR_KEY0_LOWER); - DUMP_REG(DC_DISP_COLOR_KEY0_UPPER); - DUMP_REG(DC_DISP_COLOR_KEY1_LOWER); - DUMP_REG(DC_DISP_COLOR_KEY1_UPPER); - DUMP_REG(DC_DISP_CURSOR_FOREGROUND); - DUMP_REG(DC_DISP_CURSOR_BACKGROUND); - DUMP_REG(DC_DISP_CURSOR_START_ADDR); - DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS); - DUMP_REG(DC_DISP_CURSOR_POSITION); - DUMP_REG(DC_DISP_CURSOR_POSITION_NS); - DUMP_REG(DC_DISP_INIT_SEQ_CONTROL); - DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A); - DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B); - DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C); - DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D); - DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL); - DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST); - DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST); - DUMP_REG(DC_DISP_MCCIF_DISPLAY1A_HYST); - DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST); - DUMP_REG(DC_DISP_DAC_CRT_CTRL); - DUMP_REG(DC_DISP_DISP_MISC_CONTROL); - DUMP_REG(DC_DISP_SD_CONTROL); - DUMP_REG(DC_DISP_SD_CSC_COEFF); - DUMP_REG(DC_DISP_SD_LUT(0)); - DUMP_REG(DC_DISP_SD_LUT(1)); - DUMP_REG(DC_DISP_SD_LUT(2)); - DUMP_REG(DC_DISP_SD_LUT(3)); - DUMP_REG(DC_DISP_SD_LUT(4)); - DUMP_REG(DC_DISP_SD_LUT(5)); - DUMP_REG(DC_DISP_SD_LUT(6)); - DUMP_REG(DC_DISP_SD_LUT(7)); - DUMP_REG(DC_DISP_SD_LUT(8)); - DUMP_REG(DC_DISP_SD_FLICKER_CONTROL); - DUMP_REG(DC_DISP_DC_PIXEL_COUNT); - DUMP_REG(DC_DISP_SD_HISTOGRAM(0)); - DUMP_REG(DC_DISP_SD_HISTOGRAM(1)); - DUMP_REG(DC_DISP_SD_HISTOGRAM(2)); - DUMP_REG(DC_DISP_SD_HISTOGRAM(3)); - DUMP_REG(DC_DISP_SD_HISTOGRAM(4)); - DUMP_REG(DC_DISP_SD_HISTOGRAM(5)); - DUMP_REG(DC_DISP_SD_HISTOGRAM(6)); - DUMP_REG(DC_DISP_SD_HISTOGRAM(7)); - DUMP_REG(DC_DISP_SD_BL_TF(0)); - DUMP_REG(DC_DISP_SD_BL_TF(1)); - DUMP_REG(DC_DISP_SD_BL_TF(2)); - DUMP_REG(DC_DISP_SD_BL_TF(3)); - DUMP_REG(DC_DISP_SD_BL_CONTROL); - DUMP_REG(DC_DISP_SD_HW_K_VALUES); - DUMP_REG(DC_DISP_SD_MAN_K_VALUES); - DUMP_REG(DC_WIN_WIN_OPTIONS); - DUMP_REG(DC_WIN_BYTE_SWAP); - DUMP_REG(DC_WIN_BUFFER_CONTROL); - DUMP_REG(DC_WIN_COLOR_DEPTH); - DUMP_REG(DC_WIN_POSITION); - DUMP_REG(DC_WIN_SIZE); - DUMP_REG(DC_WIN_PRESCALED_SIZE); - DUMP_REG(DC_WIN_H_INITIAL_DDA); - DUMP_REG(DC_WIN_V_INITIAL_DDA); - DUMP_REG(DC_WIN_DDA_INC); - DUMP_REG(DC_WIN_LINE_STRIDE); - DUMP_REG(DC_WIN_BUF_STRIDE); - DUMP_REG(DC_WIN_UV_BUF_STRIDE); - DUMP_REG(DC_WIN_BUFFER_ADDR_MODE); - DUMP_REG(DC_WIN_DV_CONTROL); - DUMP_REG(DC_WIN_BLEND_NOKEY); - DUMP_REG(DC_WIN_BLEND_1WIN); - DUMP_REG(DC_WIN_BLEND_2WIN_X); - DUMP_REG(DC_WIN_BLEND_2WIN_Y); - DUMP_REG(DC_WIN_BLEND32WIN_XY); - DUMP_REG(DC_WIN_HP_FETCH_CONTROL); - DUMP_REG(DC_WINBUF_START_ADDR); - DUMP_REG(DC_WINBUF_START_ADDR_NS); - DUMP_REG(DC_WINBUF_START_ADDR_U); - DUMP_REG(DC_WINBUF_START_ADDR_U_NS); - DUMP_REG(DC_WINBUF_START_ADDR_V); - DUMP_REG(DC_WINBUF_START_ADDR_V_NS); - DUMP_REG(DC_WINBUF_ADDR_H_OFFSET); - DUMP_REG(DC_WINBUF_ADDR_H_OFFSET_NS); - DUMP_REG(DC_WINBUF_ADDR_V_OFFSET); - DUMP_REG(DC_WINBUF_ADDR_V_OFFSET_NS); - DUMP_REG(DC_WINBUF_UFLOW_STATUS); - DUMP_REG(DC_WINBUF_AD_UFLOW_STATUS); - DUMP_REG(DC_WINBUF_BD_UFLOW_STATUS); - DUMP_REG(DC_WINBUF_CD_UFLOW_STATUS); - -#undef DUMP_REG - - return 0; -} - -static struct drm_info_list debugfs_files[] = { - { "regs", tegra_dc_show_regs, 0, NULL }, -}; - -static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor) -{ - unsigned int i; - char *name; - int err; - - name = kasprintf(GFP_KERNEL, "dc.%d", dc->pipe); - dc->debugfs = debugfs_create_dir(name, minor->debugfs_root); - kfree(name); - - if (!dc->debugfs) - return -ENOMEM; - - dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), - GFP_KERNEL); - if (!dc->debugfs_files) { - err = -ENOMEM; - goto remove; - } - - for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) - dc->debugfs_files[i].data = dc; - - err = drm_debugfs_create_files(dc->debugfs_files, - ARRAY_SIZE(debugfs_files), - dc->debugfs, minor); - if (err < 0) - goto free; - - dc->minor = minor; - - return 0; - -free: - kfree(dc->debugfs_files); - dc->debugfs_files = NULL; -remove: - debugfs_remove(dc->debugfs); - dc->debugfs = NULL; - - return err; -} - -static int tegra_dc_debugfs_exit(struct tegra_dc *dc) -{ - drm_debugfs_remove_files(dc->debugfs_files, ARRAY_SIZE(debugfs_files), - dc->minor); - dc->minor = NULL; - - kfree(dc->debugfs_files); - dc->debugfs_files = NULL; - - debugfs_remove(dc->debugfs); - dc->debugfs = NULL; - - return 0; -} - -static int tegra_dc_drm_init(struct host1x_client *client, - struct drm_device *drm) -{ - struct tegra_dc *dc = host1x_client_to_dc(client); - int err; - - dc->pipe = drm->mode_config.num_crtc; - - drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs); - drm_mode_crtc_set_gamma_size(&dc->base, 256); - drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs); - - err = tegra_dc_rgb_init(drm, dc); - if (err < 0 && err != -ENODEV) { - dev_err(dc->dev, "failed to initialize RGB output: %d\n", err); - return err; - } - - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_dc_debugfs_init(dc, drm->primary); - if (err < 0) - dev_err(dc->dev, "debugfs setup failed: %d\n", err); - } - - err = devm_request_irq(dc->dev, dc->irq, tegra_drm_irq, 0, - dev_name(dc->dev), dc); - if (err < 0) { - dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq, - err); - return err; - } - - return 0; -} - -static int tegra_dc_drm_exit(struct host1x_client *client) -{ - struct tegra_dc *dc = host1x_client_to_dc(client); - int err; - - devm_free_irq(dc->dev, dc->irq, dc); - - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_dc_debugfs_exit(dc); - if (err < 0) - dev_err(dc->dev, "debugfs cleanup failed: %d\n", err); - } - - err = tegra_dc_rgb_exit(dc); - if (err) { - dev_err(dc->dev, "failed to shutdown RGB output: %d\n", err); - return err; - } - - return 0; -} - -static const struct host1x_client_ops dc_client_ops = { - .drm_init = tegra_dc_drm_init, - .drm_exit = tegra_dc_drm_exit, -}; - -static int tegra_dc_probe(struct platform_device *pdev) -{ - struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); - struct resource *regs; - struct tegra_dc *dc; - int err; - - dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL); - if (!dc) - return -ENOMEM; - - INIT_LIST_HEAD(&dc->list); - dc->dev = &pdev->dev; - - dc->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(dc->clk)) { - dev_err(&pdev->dev, "failed to get clock\n"); - return PTR_ERR(dc->clk); - } - - err = clk_prepare_enable(dc->clk); - if (err < 0) - return err; - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) { - dev_err(&pdev->dev, "failed to get registers\n"); - return -ENXIO; - } - - dc->regs = devm_request_and_ioremap(&pdev->dev, regs); - if (!dc->regs) { - dev_err(&pdev->dev, "failed to remap registers\n"); - return -ENXIO; - } - - dc->irq = platform_get_irq(pdev, 0); - if (dc->irq < 0) { - dev_err(&pdev->dev, "failed to get IRQ\n"); - return -ENXIO; - } - - INIT_LIST_HEAD(&dc->client.list); - dc->client.ops = &dc_client_ops; - dc->client.dev = &pdev->dev; - - err = tegra_dc_rgb_probe(dc); - if (err < 0 && err != -ENODEV) { - dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err); - return err; - } - - err = host1x_register_client(host1x, &dc->client); - if (err < 0) { - dev_err(&pdev->dev, "failed to register host1x client: %d\n", - err); - return err; - } - - platform_set_drvdata(pdev, dc); - - return 0; -} - -static int tegra_dc_remove(struct platform_device *pdev) -{ - struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); - struct tegra_dc *dc = platform_get_drvdata(pdev); - int err; - - err = host1x_unregister_client(host1x, &dc->client); - if (err < 0) { - dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", - err); - return err; - } - - clk_disable_unprepare(dc->clk); - - return 0; -} - -static struct of_device_id tegra_dc_of_match[] = { - { .compatible = "nvidia,tegra30-dc", }, - { .compatible = "nvidia,tegra20-dc", }, - { }, -}; - -struct platform_driver tegra_dc_driver = { - .driver = { - .name = "tegra-dc", - .owner = THIS_MODULE, - .of_match_table = tegra_dc_of_match, - }, - .probe = tegra_dc_probe, - .remove = tegra_dc_remove, -}; diff --git a/trunk/drivers/gpu/drm/tegra/dc.h b/trunk/drivers/gpu/drm/tegra/dc.h deleted file mode 100644 index 99977b5d5c36..000000000000 --- a/trunk/drivers/gpu/drm/tegra/dc.h +++ /dev/null @@ -1,388 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef TEGRA_DC_H -#define TEGRA_DC_H 1 - -#define DC_CMD_GENERAL_INCR_SYNCPT 0x000 -#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001 -#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002 -#define DC_CMD_WIN_A_INCR_SYNCPT 0x008 -#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009 -#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR 0x00a -#define DC_CMD_WIN_B_INCR_SYNCPT 0x010 -#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL 0x011 -#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR 0x012 -#define DC_CMD_WIN_C_INCR_SYNCPT 0x018 -#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019 -#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a -#define DC_CMD_CONT_SYNCPT_VSYNC 0x028 -#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031 -#define DC_CMD_DISPLAY_COMMAND 0x032 -#define DISP_CTRL_MODE_STOP (0 << 5) -#define DISP_CTRL_MODE_C_DISPLAY (1 << 5) -#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5) -#define DC_CMD_SIGNAL_RAISE 0x033 -#define DC_CMD_DISPLAY_POWER_CONTROL 0x036 -#define PW0_ENABLE (1 << 0) -#define PW1_ENABLE (1 << 2) -#define PW2_ENABLE (1 << 4) -#define PW3_ENABLE (1 << 6) -#define PW4_ENABLE (1 << 8) -#define PM0_ENABLE (1 << 16) -#define PM1_ENABLE (1 << 18) - -#define DC_CMD_INT_STATUS 0x037 -#define DC_CMD_INT_MASK 0x038 -#define DC_CMD_INT_ENABLE 0x039 -#define DC_CMD_INT_TYPE 0x03a -#define DC_CMD_INT_POLARITY 0x03b -#define CTXSW_INT (1 << 0) -#define FRAME_END_INT (1 << 1) -#define VBLANK_INT (1 << 2) -#define WIN_A_UF_INT (1 << 8) -#define WIN_B_UF_INT (1 << 9) -#define WIN_C_UF_INT (1 << 10) -#define WIN_A_OF_INT (1 << 14) -#define WIN_B_OF_INT (1 << 15) -#define WIN_C_OF_INT (1 << 16) - -#define DC_CMD_SIGNAL_RAISE1 0x03c -#define DC_CMD_SIGNAL_RAISE2 0x03d -#define DC_CMD_SIGNAL_RAISE3 0x03e - -#define DC_CMD_STATE_ACCESS 0x040 - -#define DC_CMD_STATE_CONTROL 0x041 -#define GENERAL_ACT_REQ (1 << 0) -#define WIN_A_ACT_REQ (1 << 1) -#define WIN_B_ACT_REQ (1 << 2) -#define WIN_C_ACT_REQ (1 << 3) -#define GENERAL_UPDATE (1 << 8) -#define WIN_A_UPDATE (1 << 9) -#define WIN_B_UPDATE (1 << 10) -#define WIN_C_UPDATE (1 << 11) -#define NC_HOST_TRIG (1 << 24) - -#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042 -#define WINDOW_A_SELECT (1 << 4) -#define WINDOW_B_SELECT (1 << 5) -#define WINDOW_C_SELECT (1 << 6) - -#define DC_CMD_REG_ACT_CONTROL 0x043 - -#define DC_COM_CRC_CONTROL 0x300 -#define DC_COM_CRC_CHECKSUM 0x301 -#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x)) -#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x)) -#define LVS_OUTPUT_POLARITY_LOW (1 << 28) -#define LHS_OUTPUT_POLARITY_LOW (1 << 30) -#define DC_COM_PIN_OUTPUT_DATA(x) (0x30a + (x)) -#define DC_COM_PIN_INPUT_ENABLE(x) (0x30e + (x)) -#define DC_COM_PIN_INPUT_DATA(x) (0x312 + (x)) -#define DC_COM_PIN_OUTPUT_SELECT(x) (0x314 + (x)) - -#define DC_COM_PIN_MISC_CONTROL 0x31b -#define DC_COM_PIN_PM0_CONTROL 0x31c -#define DC_COM_PIN_PM0_DUTY_CYCLE 0x31d -#define DC_COM_PIN_PM1_CONTROL 0x31e -#define DC_COM_PIN_PM1_DUTY_CYCLE 0x31f - -#define DC_COM_SPI_CONTROL 0x320 -#define DC_COM_SPI_START_BYTE 0x321 -#define DC_COM_HSPI_WRITE_DATA_AB 0x322 -#define DC_COM_HSPI_WRITE_DATA_CD 0x323 -#define DC_COM_HSPI_CS_DC 0x324 -#define DC_COM_SCRATCH_REGISTER_A 0x325 -#define DC_COM_SCRATCH_REGISTER_B 0x326 -#define DC_COM_GPIO_CTRL 0x327 -#define DC_COM_GPIO_DEBOUNCE_COUNTER 0x328 -#define DC_COM_CRC_CHECKSUM_LATCHED 0x329 - -#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400 -#define H_PULSE_0_ENABLE (1 << 8) -#define H_PULSE_1_ENABLE (1 << 10) -#define H_PULSE_2_ENABLE (1 << 12) - -#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401 - -#define DC_DISP_DISP_WIN_OPTIONS 0x402 -#define HDMI_ENABLE (1 << 30) - -#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403 -#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24) -#define WINDOW_A_THRESHOLD(x) (((x) & 0x7f) << 16) -#define WINDOW_B_THRESHOLD(x) (((x) & 0x7f) << 8) -#define WINDOW_C_THRESHOLD(x) (((x) & 0xff) << 0) - -#define DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER 0x404 -#define CURSOR_DELAY(x) (((x) & 0x3f) << 24) -#define WINDOW_A_DELAY(x) (((x) & 0x3f) << 16) -#define WINDOW_B_DELAY(x) (((x) & 0x3f) << 8) -#define WINDOW_C_DELAY(x) (((x) & 0x3f) << 0) - -#define DC_DISP_DISP_TIMING_OPTIONS 0x405 -#define VSYNC_H_POSITION(x) ((x) & 0xfff) - -#define DC_DISP_REF_TO_SYNC 0x406 -#define DC_DISP_SYNC_WIDTH 0x407 -#define DC_DISP_BACK_PORCH 0x408 -#define DC_DISP_ACTIVE 0x409 -#define DC_DISP_FRONT_PORCH 0x40a -#define DC_DISP_H_PULSE0_CONTROL 0x40b -#define DC_DISP_H_PULSE0_POSITION_A 0x40c -#define DC_DISP_H_PULSE0_POSITION_B 0x40d -#define DC_DISP_H_PULSE0_POSITION_C 0x40e -#define DC_DISP_H_PULSE0_POSITION_D 0x40f -#define DC_DISP_H_PULSE1_CONTROL 0x410 -#define DC_DISP_H_PULSE1_POSITION_A 0x411 -#define DC_DISP_H_PULSE1_POSITION_B 0x412 -#define DC_DISP_H_PULSE1_POSITION_C 0x413 -#define DC_DISP_H_PULSE1_POSITION_D 0x414 -#define DC_DISP_H_PULSE2_CONTROL 0x415 -#define DC_DISP_H_PULSE2_POSITION_A 0x416 -#define DC_DISP_H_PULSE2_POSITION_B 0x417 -#define DC_DISP_H_PULSE2_POSITION_C 0x418 -#define DC_DISP_H_PULSE2_POSITION_D 0x419 -#define DC_DISP_V_PULSE0_CONTROL 0x41a -#define DC_DISP_V_PULSE0_POSITION_A 0x41b -#define DC_DISP_V_PULSE0_POSITION_B 0x41c -#define DC_DISP_V_PULSE0_POSITION_C 0x41d -#define DC_DISP_V_PULSE1_CONTROL 0x41e -#define DC_DISP_V_PULSE1_POSITION_A 0x41f -#define DC_DISP_V_PULSE1_POSITION_B 0x420 -#define DC_DISP_V_PULSE1_POSITION_C 0x421 -#define DC_DISP_V_PULSE2_CONTROL 0x422 -#define DC_DISP_V_PULSE2_POSITION_A 0x423 -#define DC_DISP_V_PULSE3_CONTROL 0x424 -#define DC_DISP_V_PULSE3_POSITION_A 0x425 -#define DC_DISP_M0_CONTROL 0x426 -#define DC_DISP_M1_CONTROL 0x427 -#define DC_DISP_DI_CONTROL 0x428 -#define DC_DISP_PP_CONTROL 0x429 -#define DC_DISP_PP_SELECT_A 0x42a -#define DC_DISP_PP_SELECT_B 0x42b -#define DC_DISP_PP_SELECT_C 0x42c -#define DC_DISP_PP_SELECT_D 0x42d - -#define PULSE_MODE_NORMAL (0 << 3) -#define PULSE_MODE_ONE_CLOCK (1 << 3) -#define PULSE_POLARITY_HIGH (0 << 4) -#define PULSE_POLARITY_LOW (1 << 4) -#define PULSE_QUAL_ALWAYS (0 << 6) -#define PULSE_QUAL_VACTIVE (2 << 6) -#define PULSE_QUAL_VACTIVE1 (3 << 6) -#define PULSE_LAST_START_A (0 << 8) -#define PULSE_LAST_END_A (1 << 8) -#define PULSE_LAST_START_B (2 << 8) -#define PULSE_LAST_END_B (3 << 8) -#define PULSE_LAST_START_C (4 << 8) -#define PULSE_LAST_END_C (5 << 8) -#define PULSE_LAST_START_D (6 << 8) -#define PULSE_LAST_END_D (7 << 8) - -#define PULSE_START(x) (((x) & 0xfff) << 0) -#define PULSE_END(x) (((x) & 0xfff) << 16) - -#define DC_DISP_DISP_CLOCK_CONTROL 0x42e -#define PIXEL_CLK_DIVIDER_PCD1 (0 << 8) -#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8) -#define PIXEL_CLK_DIVIDER_PCD2 (2 << 8) -#define PIXEL_CLK_DIVIDER_PCD3 (3 << 8) -#define PIXEL_CLK_DIVIDER_PCD4 (4 << 8) -#define PIXEL_CLK_DIVIDER_PCD6 (5 << 8) -#define PIXEL_CLK_DIVIDER_PCD8 (6 << 8) -#define PIXEL_CLK_DIVIDER_PCD9 (7 << 8) -#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8) -#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8) -#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8) -#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8) -#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8) -#define SHIFT_CLK_DIVIDER(x) ((x) & 0xff) - -#define DC_DISP_DISP_INTERFACE_CONTROL 0x42f -#define DISP_DATA_FORMAT_DF1P1C (0 << 0) -#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0) -#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0) -#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0) -#define DISP_DATA_FORMAT_DF2S (4 << 0) -#define DISP_DATA_FORMAT_DF3S (5 << 0) -#define DISP_DATA_FORMAT_DFSPI (6 << 0) -#define DISP_DATA_FORMAT_DF1P3C24B (7 << 0) -#define DISP_DATA_FORMAT_DF1P3C18B (8 << 0) -#define DISP_ALIGNMENT_MSB (0 << 8) -#define DISP_ALIGNMENT_LSB (1 << 8) -#define DISP_ORDER_RED_BLUE (0 << 9) -#define DISP_ORDER_BLUE_RED (1 << 9) - -#define DC_DISP_DISP_COLOR_CONTROL 0x430 -#define BASE_COLOR_SIZE666 (0 << 0) -#define BASE_COLOR_SIZE111 (1 << 0) -#define BASE_COLOR_SIZE222 (2 << 0) -#define BASE_COLOR_SIZE333 (3 << 0) -#define BASE_COLOR_SIZE444 (4 << 0) -#define BASE_COLOR_SIZE555 (5 << 0) -#define BASE_COLOR_SIZE565 (6 << 0) -#define BASE_COLOR_SIZE332 (7 << 0) -#define BASE_COLOR_SIZE888 (8 << 0) -#define DITHER_CONTROL_DISABLE (0 << 8) -#define DITHER_CONTROL_ORDERED (2 << 8) -#define DITHER_CONTROL_ERRDIFF (3 << 8) - -#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431 - -#define DC_DISP_DATA_ENABLE_OPTIONS 0x432 -#define DE_SELECT_ACTIVE_BLANK (0 << 0) -#define DE_SELECT_ACTIVE (1 << 0) -#define DE_SELECT_ACTIVE_IS (2 << 0) -#define DE_CONTROL_ONECLK (0 << 2) -#define DE_CONTROL_NORMAL (1 << 2) -#define DE_CONTROL_EARLY_EXT (2 << 2) -#define DE_CONTROL_EARLY (3 << 2) -#define DE_CONTROL_ACTIVE_BLANK (4 << 2) - -#define DC_DISP_SERIAL_INTERFACE_OPTIONS 0x433 -#define DC_DISP_LCD_SPI_OPTIONS 0x434 -#define DC_DISP_BORDER_COLOR 0x435 -#define DC_DISP_COLOR_KEY0_LOWER 0x436 -#define DC_DISP_COLOR_KEY0_UPPER 0x437 -#define DC_DISP_COLOR_KEY1_LOWER 0x438 -#define DC_DISP_COLOR_KEY1_UPPER 0x439 - -#define DC_DISP_CURSOR_FOREGROUND 0x43c -#define DC_DISP_CURSOR_BACKGROUND 0x43d - -#define DC_DISP_CURSOR_START_ADDR 0x43e -#define DC_DISP_CURSOR_START_ADDR_NS 0x43f - -#define DC_DISP_CURSOR_POSITION 0x440 -#define DC_DISP_CURSOR_POSITION_NS 0x441 - -#define DC_DISP_INIT_SEQ_CONTROL 0x442 -#define DC_DISP_SPI_INIT_SEQ_DATA_A 0x443 -#define DC_DISP_SPI_INIT_SEQ_DATA_B 0x444 -#define DC_DISP_SPI_INIT_SEQ_DATA_C 0x445 -#define DC_DISP_SPI_INIT_SEQ_DATA_D 0x446 - -#define DC_DISP_DC_MCCIF_FIFOCTRL 0x480 -#define DC_DISP_MCCIF_DISPLAY0A_HYST 0x481 -#define DC_DISP_MCCIF_DISPLAY0B_HYST 0x482 -#define DC_DISP_MCCIF_DISPLAY1A_HYST 0x483 -#define DC_DISP_MCCIF_DISPLAY1B_HYST 0x484 - -#define DC_DISP_DAC_CRT_CTRL 0x4c0 -#define DC_DISP_DISP_MISC_CONTROL 0x4c1 -#define DC_DISP_SD_CONTROL 0x4c2 -#define DC_DISP_SD_CSC_COEFF 0x4c3 -#define DC_DISP_SD_LUT(x) (0x4c4 + (x)) -#define DC_DISP_SD_FLICKER_CONTROL 0x4cd -#define DC_DISP_DC_PIXEL_COUNT 0x4ce -#define DC_DISP_SD_HISTOGRAM(x) (0x4cf + (x)) -#define DC_DISP_SD_BL_PARAMETERS 0x4d7 -#define DC_DISP_SD_BL_TF(x) (0x4d8 + (x)) -#define DC_DISP_SD_BL_CONTROL 0x4dc -#define DC_DISP_SD_HW_K_VALUES 0x4dd -#define DC_DISP_SD_MAN_K_VALUES 0x4de - -#define DC_WIN_WIN_OPTIONS 0x700 -#define COLOR_EXPAND (1 << 6) -#define WIN_ENABLE (1 << 30) - -#define DC_WIN_BYTE_SWAP 0x701 -#define BYTE_SWAP_NOSWAP (0 << 0) -#define BYTE_SWAP_SWAP2 (1 << 0) -#define BYTE_SWAP_SWAP4 (2 << 0) -#define BYTE_SWAP_SWAP4HW (3 << 0) - -#define DC_WIN_BUFFER_CONTROL 0x702 -#define BUFFER_CONTROL_HOST (0 << 0) -#define BUFFER_CONTROL_VI (1 << 0) -#define BUFFER_CONTROL_EPP (2 << 0) -#define BUFFER_CONTROL_MPEGE (3 << 0) -#define BUFFER_CONTROL_SB2D (4 << 0) - -#define DC_WIN_COLOR_DEPTH 0x703 -#define WIN_COLOR_DEPTH_P1 0 -#define WIN_COLOR_DEPTH_P2 1 -#define WIN_COLOR_DEPTH_P4 2 -#define WIN_COLOR_DEPTH_P8 3 -#define WIN_COLOR_DEPTH_B4G4R4A4 4 -#define WIN_COLOR_DEPTH_B5G5R5A 5 -#define WIN_COLOR_DEPTH_B5G6R5 6 -#define WIN_COLOR_DEPTH_AB5G5R5 7 -#define WIN_COLOR_DEPTH_B8G8R8A8 12 -#define WIN_COLOR_DEPTH_R8G8B8A8 13 -#define WIN_COLOR_DEPTH_B6x2G6x2R6x2A8 14 -#define WIN_COLOR_DEPTH_R6x2G6x2B6x2A8 15 -#define WIN_COLOR_DEPTH_YCbCr422 16 -#define WIN_COLOR_DEPTH_YUV422 17 -#define WIN_COLOR_DEPTH_YCbCr420P 18 -#define WIN_COLOR_DEPTH_YUV420P 19 -#define WIN_COLOR_DEPTH_YCbCr422P 20 -#define WIN_COLOR_DEPTH_YUV422P 21 -#define WIN_COLOR_DEPTH_YCbCr422R 22 -#define WIN_COLOR_DEPTH_YUV422R 23 -#define WIN_COLOR_DEPTH_YCbCr422RA 24 -#define WIN_COLOR_DEPTH_YUV422RA 25 - -#define DC_WIN_POSITION 0x704 -#define H_POSITION(x) (((x) & 0x1fff) << 0) -#define V_POSITION(x) (((x) & 0x1fff) << 16) - -#define DC_WIN_SIZE 0x705 -#define H_SIZE(x) (((x) & 0x1fff) << 0) -#define V_SIZE(x) (((x) & 0x1fff) << 16) - -#define DC_WIN_PRESCALED_SIZE 0x706 -#define H_PRESCALED_SIZE(x) (((x) & 0x7fff) << 0) -#define V_PRESCALED_SIZE(x) (((x) & 0x1fff) << 16) - -#define DC_WIN_H_INITIAL_DDA 0x707 -#define DC_WIN_V_INITIAL_DDA 0x708 -#define DC_WIN_DDA_INC 0x709 -#define H_DDA_INC(x) (((x) & 0xffff) << 0) -#define V_DDA_INC(x) (((x) & 0xffff) << 16) - -#define DC_WIN_LINE_STRIDE 0x70a -#define DC_WIN_BUF_STRIDE 0x70b -#define DC_WIN_UV_BUF_STRIDE 0x70c -#define DC_WIN_BUFFER_ADDR_MODE 0x70d -#define DC_WIN_DV_CONTROL 0x70e - -#define DC_WIN_BLEND_NOKEY 0x70f -#define DC_WIN_BLEND_1WIN 0x710 -#define DC_WIN_BLEND_2WIN_X 0x711 -#define DC_WIN_BLEND_2WIN_Y 0x712 -#define DC_WIN_BLEND32WIN_XY 0x713 - -#define DC_WIN_HP_FETCH_CONTROL 0x714 - -#define DC_WINBUF_START_ADDR 0x800 -#define DC_WINBUF_START_ADDR_NS 0x801 -#define DC_WINBUF_START_ADDR_U 0x802 -#define DC_WINBUF_START_ADDR_U_NS 0x803 -#define DC_WINBUF_START_ADDR_V 0x804 -#define DC_WINBUF_START_ADDR_V_NS 0x805 - -#define DC_WINBUF_ADDR_H_OFFSET 0x806 -#define DC_WINBUF_ADDR_H_OFFSET_NS 0x807 -#define DC_WINBUF_ADDR_V_OFFSET 0x808 -#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809 - -#define DC_WINBUF_UFLOW_STATUS 0x80a - -#define DC_WINBUF_AD_UFLOW_STATUS 0xbca -#define DC_WINBUF_BD_UFLOW_STATUS 0xdca -#define DC_WINBUF_CD_UFLOW_STATUS 0xfca - -/* synchronization points */ -#define SYNCPT_VBLANK0 26 -#define SYNCPT_VBLANK1 27 - -#endif /* TEGRA_DC_H */ diff --git a/trunk/drivers/gpu/drm/tegra/drm.c b/trunk/drivers/gpu/drm/tegra/drm.c deleted file mode 100644 index 3a503c9e4686..000000000000 --- a/trunk/drivers/gpu/drm/tegra/drm.c +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include - -#include -#include -#include - -#include "drm.h" - -#define DRIVER_NAME "tegra" -#define DRIVER_DESC "NVIDIA Tegra graphics" -#define DRIVER_DATE "20120330" -#define DRIVER_MAJOR 0 -#define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 0 - -static int tegra_drm_load(struct drm_device *drm, unsigned long flags) -{ - struct device *dev = drm->dev; - struct host1x *host1x; - int err; - - host1x = dev_get_drvdata(dev); - drm->dev_private = host1x; - host1x->drm = drm; - - drm_mode_config_init(drm); - - err = host1x_drm_init(host1x, drm); - if (err < 0) - return err; - - err = tegra_drm_fb_init(drm); - if (err < 0) - return err; - - drm_kms_helper_poll_init(drm); - - return 0; -} - -static int tegra_drm_unload(struct drm_device *drm) -{ - drm_kms_helper_poll_fini(drm); - tegra_drm_fb_exit(drm); - - drm_mode_config_cleanup(drm); - - return 0; -} - -static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) -{ - return 0; -} - -static void tegra_drm_lastclose(struct drm_device *drm) -{ - struct host1x *host1x = drm->dev_private; - - drm_fbdev_cma_restore_mode(host1x->fbdev); -} - -static struct drm_ioctl_desc tegra_drm_ioctls[] = { -}; - -static const struct file_operations tegra_drm_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - .mmap = drm_gem_cma_mmap, - .poll = drm_poll, - .fasync = drm_fasync, - .read = drm_read, -#ifdef CONFIG_COMPAT - .compat_ioctl = drm_compat_ioctl, -#endif - .llseek = noop_llseek, -}; - -struct drm_driver tegra_drm_driver = { - .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM, - .load = tegra_drm_load, - .unload = tegra_drm_unload, - .open = tegra_drm_open, - .lastclose = tegra_drm_lastclose, - - .gem_free_object = drm_gem_cma_free_object, - .gem_vm_ops = &drm_gem_cma_vm_ops, - .dumb_create = drm_gem_cma_dumb_create, - .dumb_map_offset = drm_gem_cma_dumb_map_offset, - .dumb_destroy = drm_gem_cma_dumb_destroy, - - .ioctls = tegra_drm_ioctls, - .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), - .fops = &tegra_drm_fops, - - .name = DRIVER_NAME, - .desc = DRIVER_DESC, - .date = DRIVER_DATE, - .major = DRIVER_MAJOR, - .minor = DRIVER_MINOR, - .patchlevel = DRIVER_PATCHLEVEL, -}; diff --git a/trunk/drivers/gpu/drm/tegra/drm.h b/trunk/drivers/gpu/drm/tegra/drm.h deleted file mode 100644 index 3a843a77ddc7..000000000000 --- a/trunk/drivers/gpu/drm/tegra/drm.h +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef TEGRA_DRM_H -#define TEGRA_DRM_H 1 - -#include -#include -#include -#include -#include -#include -#include - -struct tegra_framebuffer { - struct drm_framebuffer base; - struct drm_gem_cma_object *obj; -}; - -static inline struct tegra_framebuffer *to_tegra_fb(struct drm_framebuffer *fb) -{ - return container_of(fb, struct tegra_framebuffer, base); -} - -struct host1x { - struct drm_device *drm; - struct device *dev; - void __iomem *regs; - struct clk *clk; - int syncpt; - int irq; - - struct mutex drm_clients_lock; - struct list_head drm_clients; - struct list_head drm_active; - - struct mutex clients_lock; - struct list_head clients; - - struct drm_fbdev_cma *fbdev; - struct tegra_framebuffer fb; -}; - -struct host1x_client; - -struct host1x_client_ops { - int (*drm_init)(struct host1x_client *client, struct drm_device *drm); - int (*drm_exit)(struct host1x_client *client); -}; - -struct host1x_client { - struct host1x *host1x; - struct device *dev; - - const struct host1x_client_ops *ops; - - struct list_head list; -}; - -extern int host1x_drm_init(struct host1x *host1x, struct drm_device *drm); -extern int host1x_drm_exit(struct host1x *host1x); - -extern int host1x_register_client(struct host1x *host1x, - struct host1x_client *client); -extern int host1x_unregister_client(struct host1x *host1x, - struct host1x_client *client); - -struct tegra_output; - -struct tegra_dc { - struct host1x_client client; - - struct host1x *host1x; - struct device *dev; - - struct drm_crtc base; - int pipe; - - struct clk *clk; - - void __iomem *regs; - int irq; - - struct tegra_output *rgb; - - struct list_head list; - - struct drm_info_list *debugfs_files; - struct drm_minor *minor; - struct dentry *debugfs; -}; - -static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client) -{ - return container_of(client, struct tegra_dc, client); -} - -static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc) -{ - return container_of(crtc, struct tegra_dc, base); -} - -static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value, - unsigned long reg) -{ - writel(value, dc->regs + (reg << 2)); -} - -static inline unsigned long tegra_dc_readl(struct tegra_dc *dc, - unsigned long reg) -{ - return readl(dc->regs + (reg << 2)); -} - -struct tegra_output_ops { - int (*enable)(struct tegra_output *output); - int (*disable)(struct tegra_output *output); - int (*setup_clock)(struct tegra_output *output, struct clk *clk, - unsigned long pclk); - int (*check_mode)(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status); -}; - -enum tegra_output_type { - TEGRA_OUTPUT_RGB, - TEGRA_OUTPUT_HDMI, -}; - -struct tegra_output { - struct device_node *of_node; - struct device *dev; - - const struct tegra_output_ops *ops; - enum tegra_output_type type; - - struct i2c_adapter *ddc; - const struct edid *edid; - unsigned int hpd_irq; - int hpd_gpio; - - struct drm_encoder encoder; - struct drm_connector connector; -}; - -static inline struct tegra_output *encoder_to_output(struct drm_encoder *e) -{ - return container_of(e, struct tegra_output, encoder); -} - -static inline struct tegra_output *connector_to_output(struct drm_connector *c) -{ - return container_of(c, struct tegra_output, connector); -} - -static inline int tegra_output_enable(struct tegra_output *output) -{ - if (output && output->ops && output->ops->enable) - return output->ops->enable(output); - - return output ? -ENOSYS : -EINVAL; -} - -static inline int tegra_output_disable(struct tegra_output *output) -{ - if (output && output->ops && output->ops->disable) - return output->ops->disable(output); - - return output ? -ENOSYS : -EINVAL; -} - -static inline int tegra_output_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk) -{ - if (output && output->ops && output->ops->setup_clock) - return output->ops->setup_clock(output, clk, pclk); - - return output ? -ENOSYS : -EINVAL; -} - -static inline int tegra_output_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - if (output && output->ops && output->ops->check_mode) - return output->ops->check_mode(output, mode, status); - - return output ? -ENOSYS : -EINVAL; -} - -/* from rgb.c */ -extern int tegra_dc_rgb_probe(struct tegra_dc *dc); -extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc); -extern int tegra_dc_rgb_exit(struct tegra_dc *dc); - -/* from output.c */ -extern int tegra_output_parse_dt(struct tegra_output *output); -extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output); -extern int tegra_output_exit(struct tegra_output *output); - -/* from gem.c */ -extern struct tegra_gem_object *tegra_gem_alloc(struct drm_device *drm, - size_t size); -extern int tegra_gem_handle_create(struct drm_device *drm, - struct drm_file *file, size_t size, - unsigned long flags, uint32_t *handle); -extern int tegra_gem_dumb_create(struct drm_file *file, struct drm_device *drm, - struct drm_mode_create_dumb *args); -extern int tegra_gem_dumb_map_offset(struct drm_file *file, - struct drm_device *drm, uint32_t handle, - uint64_t *offset); -extern int tegra_gem_dumb_destroy(struct drm_file *file, - struct drm_device *drm, uint32_t handle); -extern int tegra_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); -extern int tegra_gem_init_object(struct drm_gem_object *obj); -extern void tegra_gem_free_object(struct drm_gem_object *obj); -extern struct vm_operations_struct tegra_gem_vm_ops; - -/* from fb.c */ -extern int tegra_drm_fb_init(struct drm_device *drm); -extern void tegra_drm_fb_exit(struct drm_device *drm); - -extern struct platform_driver tegra_host1x_driver; -extern struct platform_driver tegra_hdmi_driver; -extern struct platform_driver tegra_dc_driver; -extern struct drm_driver tegra_drm_driver; - -#endif /* TEGRA_DRM_H */ diff --git a/trunk/drivers/gpu/drm/tegra/fb.c b/trunk/drivers/gpu/drm/tegra/fb.c deleted file mode 100644 index 97993c6835fd..000000000000 --- a/trunk/drivers/gpu/drm/tegra/fb.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include "drm.h" - -static void tegra_drm_fb_output_poll_changed(struct drm_device *drm) -{ - struct host1x *host1x = drm->dev_private; - - drm_fbdev_cma_hotplug_event(host1x->fbdev); -} - -static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { - .fb_create = drm_fb_cma_create, - .output_poll_changed = tegra_drm_fb_output_poll_changed, -}; - -int tegra_drm_fb_init(struct drm_device *drm) -{ - struct host1x *host1x = drm->dev_private; - struct drm_fbdev_cma *fbdev; - - drm->mode_config.min_width = 0; - drm->mode_config.min_height = 0; - - drm->mode_config.max_width = 4096; - drm->mode_config.max_height = 4096; - - drm->mode_config.funcs = &tegra_drm_mode_funcs; - - fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc, - drm->mode_config.num_connector); - if (IS_ERR(fbdev)) - return PTR_ERR(fbdev); - -#ifndef CONFIG_FRAMEBUFFER_CONSOLE - drm_fbdev_cma_restore_mode(fbdev); -#endif - - host1x->fbdev = fbdev; - - return 0; -} - -void tegra_drm_fb_exit(struct drm_device *drm) -{ - struct host1x *host1x = drm->dev_private; - - drm_fbdev_cma_fini(host1x->fbdev); -} diff --git a/trunk/drivers/gpu/drm/tegra/hdmi.c b/trunk/drivers/gpu/drm/tegra/hdmi.c deleted file mode 100644 index ab4016412bbf..000000000000 --- a/trunk/drivers/gpu/drm/tegra/hdmi.c +++ /dev/null @@ -1,1334 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "hdmi.h" -#include "drm.h" -#include "dc.h" - -struct tegra_hdmi { - struct host1x_client client; - struct tegra_output output; - struct device *dev; - - struct regulator *vdd; - struct regulator *pll; - - void __iomem *regs; - unsigned int irq; - - struct clk *clk_parent; - struct clk *clk; - - unsigned int audio_source; - unsigned int audio_freq; - bool stereo; - bool dvi; - - struct drm_info_list *debugfs_files; - struct drm_minor *minor; - struct dentry *debugfs; -}; - -static inline struct tegra_hdmi * -host1x_client_to_hdmi(struct host1x_client *client) -{ - return container_of(client, struct tegra_hdmi, client); -} - -static inline struct tegra_hdmi *to_hdmi(struct tegra_output *output) -{ - return container_of(output, struct tegra_hdmi, output); -} - -#define HDMI_AUDIOCLK_FREQ 216000000 -#define HDMI_REKEY_DEFAULT 56 - -enum { - AUTO = 0, - SPDIF, - HDA, -}; - -static inline unsigned long tegra_hdmi_readl(struct tegra_hdmi *hdmi, - unsigned long reg) -{ - return readl(hdmi->regs + (reg << 2)); -} - -static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, unsigned long val, - unsigned long reg) -{ - writel(val, hdmi->regs + (reg << 2)); -} - -struct tegra_hdmi_audio_config { - unsigned int pclk; - unsigned int n; - unsigned int cts; - unsigned int aval; -}; - -static const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = { - { 25200000, 4096, 25200, 24000 }, - { 27000000, 4096, 27000, 24000 }, - { 74250000, 4096, 74250, 24000 }, - { 148500000, 4096, 148500, 24000 }, - { 0, 0, 0, 0 }, -}; - -static const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = { - { 25200000, 5880, 26250, 25000 }, - { 27000000, 5880, 28125, 25000 }, - { 74250000, 4704, 61875, 20000 }, - { 148500000, 4704, 123750, 20000 }, - { 0, 0, 0, 0 }, -}; - -static const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = { - { 25200000, 6144, 25200, 24000 }, - { 27000000, 6144, 27000, 24000 }, - { 74250000, 6144, 74250, 24000 }, - { 148500000, 6144, 148500, 24000 }, - { 0, 0, 0, 0 }, -}; - -static const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = { - { 25200000, 11760, 26250, 25000 }, - { 27000000, 11760, 28125, 25000 }, - { 74250000, 9408, 61875, 20000 }, - { 148500000, 9408, 123750, 20000 }, - { 0, 0, 0, 0 }, -}; - -static const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = { - { 25200000, 12288, 25200, 24000 }, - { 27000000, 12288, 27000, 24000 }, - { 74250000, 12288, 74250, 24000 }, - { 148500000, 12288, 148500, 24000 }, - { 0, 0, 0, 0 }, -}; - -static const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = { - { 25200000, 23520, 26250, 25000 }, - { 27000000, 23520, 28125, 25000 }, - { 74250000, 18816, 61875, 20000 }, - { 148500000, 18816, 123750, 20000 }, - { 0, 0, 0, 0 }, -}; - -static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = { - { 25200000, 24576, 25200, 24000 }, - { 27000000, 24576, 27000, 24000 }, - { 74250000, 24576, 74250, 24000 }, - { 148500000, 24576, 148500, 24000 }, - { 0, 0, 0, 0 }, -}; - -struct tmds_config { - unsigned int pclk; - u32 pll0; - u32 pll1; - u32 pe_current; - u32 drive_current; -}; - -static const struct tmds_config tegra2_tmds_config[] = { - { /* 480p modes */ - .pclk = 27000000, - .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | - SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) | - SOR_PLL_TX_REG_LOAD(3), - .pll1 = SOR_PLL_TMDS_TERM_ENABLE, - .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) | - PE_CURRENT1(PE_CURRENT_0_0_mA) | - PE_CURRENT2(PE_CURRENT_0_0_mA) | - PE_CURRENT3(PE_CURRENT_0_0_mA), - .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), - }, { /* 720p modes */ - .pclk = 74250000, - .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | - SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) | - SOR_PLL_TX_REG_LOAD(3), - .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, - .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) | - PE_CURRENT1(PE_CURRENT_6_0_mA) | - PE_CURRENT2(PE_CURRENT_6_0_mA) | - PE_CURRENT3(PE_CURRENT_6_0_mA), - .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), - }, { /* 1080p modes */ - .pclk = UINT_MAX, - .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | - SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) | - SOR_PLL_TX_REG_LOAD(3), - .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, - .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) | - PE_CURRENT1(PE_CURRENT_6_0_mA) | - PE_CURRENT2(PE_CURRENT_6_0_mA) | - PE_CURRENT3(PE_CURRENT_6_0_mA), - .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), - }, -}; - -static const struct tmds_config tegra3_tmds_config[] = { - { /* 480p modes */ - .pclk = 27000000, - .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | - SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) | - SOR_PLL_TX_REG_LOAD(0), - .pll1 = SOR_PLL_TMDS_TERM_ENABLE, - .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) | - PE_CURRENT1(PE_CURRENT_0_0_mA) | - PE_CURRENT2(PE_CURRENT_0_0_mA) | - PE_CURRENT3(PE_CURRENT_0_0_mA), - .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), - }, { /* 720p modes */ - .pclk = 74250000, - .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | - SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) | - SOR_PLL_TX_REG_LOAD(0), - .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, - .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) | - PE_CURRENT1(PE_CURRENT_5_0_mA) | - PE_CURRENT2(PE_CURRENT_5_0_mA) | - PE_CURRENT3(PE_CURRENT_5_0_mA), - .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), - }, { /* 1080p modes */ - .pclk = UINT_MAX, - .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | - SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) | - SOR_PLL_TX_REG_LOAD(0), - .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, - .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) | - PE_CURRENT1(PE_CURRENT_5_0_mA) | - PE_CURRENT2(PE_CURRENT_5_0_mA) | - PE_CURRENT3(PE_CURRENT_5_0_mA), - .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), - }, -}; - -static const struct tegra_hdmi_audio_config * -tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk) -{ - const struct tegra_hdmi_audio_config *table; - - switch (audio_freq) { - case 32000: - table = tegra_hdmi_audio_32k; - break; - - case 44100: - table = tegra_hdmi_audio_44_1k; - break; - - case 48000: - table = tegra_hdmi_audio_48k; - break; - - case 88200: - table = tegra_hdmi_audio_88_2k; - break; - - case 96000: - table = tegra_hdmi_audio_96k; - break; - - case 176400: - table = tegra_hdmi_audio_176_4k; - break; - - case 192000: - table = tegra_hdmi_audio_192k; - break; - - default: - return NULL; - } - - while (table->pclk) { - if (table->pclk == pclk) - return table; - - table++; - } - - return NULL; -} - -static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi) -{ - const unsigned int freqs[] = { - 32000, 44100, 48000, 88200, 96000, 176400, 192000 - }; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(freqs); i++) { - unsigned int f = freqs[i]; - unsigned int eight_half; - unsigned long value; - unsigned int delta; - - if (f > 96000) - delta = 2; - else if (f > 480000) - delta = 6; - else - delta = 9; - - eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128); - value = AUDIO_FS_LOW(eight_half - delta) | - AUDIO_FS_HIGH(eight_half + delta); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_FS(i)); - } -} - -static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk) -{ - struct device_node *node = hdmi->dev->of_node; - const struct tegra_hdmi_audio_config *config; - unsigned int offset = 0; - unsigned long value; - - switch (hdmi->audio_source) { - case HDA: - value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL; - break; - - case SPDIF: - value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF; - break; - - default: - value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO; - break; - } - - if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { - value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) | - AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0); - } else { - value |= AUDIO_CNTRL0_INJECT_NULLSMPL; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0); - - value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) | - AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0); - } - - config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk); - if (!config) { - dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n", - hdmi->audio_freq, pclk); - return -EINVAL; - } - - tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL); - - value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE | - AUDIO_N_VALUE(config->n - 1); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N); - - tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE, - HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH); - - value = ACR_SUBPACK_CTS(config->cts); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); - - value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE); - - value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_AUDIO_N); - value &= ~AUDIO_N_RESETF; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N); - - if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { - switch (hdmi->audio_freq) { - case 32000: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320; - break; - - case 44100: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441; - break; - - case 48000: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480; - break; - - case 88200: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882; - break; - - case 96000: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960; - break; - - case 176400: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764; - break; - - case 192000: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920; - break; - } - - tegra_hdmi_writel(hdmi, config->aval, offset); - } - - tegra_hdmi_setup_audio_fs_tables(hdmi); - - return 0; -} - -static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, - unsigned int offset, u8 type, - u8 version, void *data, size_t size) -{ - unsigned long value; - u8 *ptr = data; - u32 subpack[2]; - size_t i; - u8 csum; - - /* first byte of data is the checksum */ - csum = type + version + size - 1; - - for (i = 1; i < size; i++) - csum += ptr[i]; - - ptr[0] = 0x100 - csum; - - value = INFOFRAME_HEADER_TYPE(type) | - INFOFRAME_HEADER_VERSION(version) | - INFOFRAME_HEADER_LEN(size - 1); - tegra_hdmi_writel(hdmi, value, offset); - - /* The audio inforame only has one set of subpack registers. The hdmi - * block pads the rest of the data as per the spec so we have to fixup - * the length before filling in the subpacks. - */ - if (offset == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER) - size = 6; - - /* each subpack 7 bytes devided into: - * subpack_low - bytes 0 - 3 - * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00) - */ - for (i = 0; i < size; i++) { - size_t index = i % 7; - - if (index == 0) - memset(subpack, 0x0, sizeof(subpack)); - - ((u8 *)subpack)[index] = ptr[i]; - - if (index == 6 || (i + 1 == size)) { - unsigned int reg = offset + 1 + (i / 7) * 2; - - tegra_hdmi_writel(hdmi, subpack[0], reg); - tegra_hdmi_writel(hdmi, subpack[1], reg + 1); - } - } -} - -static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi, - struct drm_display_mode *mode) -{ - struct hdmi_avi_infoframe frame; - unsigned int h_front_porch; - unsigned int hsize = 16; - unsigned int vsize = 9; - - if (hdmi->dvi) { - tegra_hdmi_writel(hdmi, 0, - HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); - return; - } - - h_front_porch = mode->htotal - mode->hsync_end; - memset(&frame, 0, sizeof(frame)); - frame.r = HDMI_AVI_R_SAME; - - switch (mode->vdisplay) { - case 480: - if (mode->hdisplay == 640) { - frame.m = HDMI_AVI_M_4_3; - frame.vic = 1; - } else { - frame.m = HDMI_AVI_M_16_9; - frame.vic = 3; - } - break; - - case 576: - if (((hsize * 10) / vsize) > 14) { - frame.m = HDMI_AVI_M_16_9; - frame.vic = 18; - } else { - frame.m = HDMI_AVI_M_4_3; - frame.vic = 17; - } - break; - - case 720: - case 1470: /* stereo mode */ - frame.m = HDMI_AVI_M_16_9; - - if (h_front_porch == 110) - frame.vic = 4; - else - frame.vic = 19; - break; - - case 1080: - case 2205: /* stereo mode */ - frame.m = HDMI_AVI_M_16_9; - - switch (h_front_porch) { - case 88: - frame.vic = 16; - break; - - case 528: - frame.vic = 31; - break; - - default: - frame.vic = 32; - break; - } - break; - - default: - frame.m = HDMI_AVI_M_16_9; - frame.vic = 0; - break; - } - - tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER, - HDMI_INFOFRAME_TYPE_AVI, HDMI_AVI_VERSION, - &frame, sizeof(frame)); - - tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, - HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); -} - -static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi) -{ - struct hdmi_audio_infoframe frame; - - if (hdmi->dvi) { - tegra_hdmi_writel(hdmi, 0, - HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); - return; - } - - memset(&frame, 0, sizeof(frame)); - frame.cc = HDMI_AUDIO_CC_2; - - tegra_hdmi_write_infopack(hdmi, - HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER, - HDMI_INFOFRAME_TYPE_AUDIO, - HDMI_AUDIO_VERSION, - &frame, sizeof(frame)); - - tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, - HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); -} - -static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) -{ - struct hdmi_stereo_infoframe frame; - unsigned long value; - - if (!hdmi->stereo) { - value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - value &= ~GENERIC_CTRL_ENABLE; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - return; - } - - memset(&frame, 0, sizeof(frame)); - frame.regid0 = 0x03; - frame.regid1 = 0x0c; - frame.regid2 = 0x00; - frame.hdmi_video_format = 2; - - /* TODO: 74 MHz limit? */ - if (1) { - frame._3d_structure = 0; - } else { - frame._3d_structure = 8; - frame._3d_ext_data = 0; - } - - tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_HEADER, - HDMI_INFOFRAME_TYPE_VENDOR, - HDMI_VENDOR_VERSION, &frame, 6); - - value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - value |= GENERIC_CTRL_ENABLE; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); -} - -static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi, - const struct tmds_config *tmds) -{ - unsigned long value; - - tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0); - tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1); - tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT); - - value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT); -} - -static int tegra_output_hdmi_enable(struct tegra_output *output) -{ - unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey; - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - struct drm_display_mode *mode = &dc->base.mode; - struct tegra_hdmi *hdmi = to_hdmi(output); - struct device_node *node = hdmi->dev->of_node; - unsigned int pulse_start, div82, pclk; - const struct tmds_config *tmds; - unsigned int num_tmds; - unsigned long value; - int retries = 1000; - int err; - - pclk = mode->clock * 1000; - h_sync_width = mode->hsync_end - mode->hsync_start; - h_front_porch = mode->htotal - mode->hsync_end; - h_back_porch = mode->hsync_start - mode->hdisplay; - - err = regulator_enable(hdmi->vdd); - if (err < 0) { - dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err); - return err; - } - - err = regulator_enable(hdmi->pll); - if (err < 0) { - dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err); - return err; - } - - /* - * This assumes that the display controller will divide its parent - * clock by 2 to generate the pixel clock. - */ - err = tegra_output_setup_clock(output, hdmi->clk, pclk * 2); - if (err < 0) { - dev_err(hdmi->dev, "failed to setup clock: %d\n", err); - return err; - } - - err = clk_set_rate(hdmi->clk, pclk); - if (err < 0) - return err; - - err = clk_enable(hdmi->clk); - if (err < 0) { - dev_err(hdmi->dev, "failed to enable clock: %d\n", err); - return err; - } - - tegra_periph_reset_assert(hdmi->clk); - usleep_range(1000, 2000); - tegra_periph_reset_deassert(hdmi->clk); - - tegra_dc_writel(dc, VSYNC_H_POSITION(1), - DC_DISP_DISP_TIMING_OPTIONS); - tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888, - DC_DISP_DISP_COLOR_CONTROL); - - /* video_preamble uses h_pulse2 */ - pulse_start = 1 + h_sync_width + h_back_porch - 10; - - tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0); - - value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE | - PULSE_LAST_END_A; - tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL); - - value = PULSE_START(pulse_start) | PULSE_END(pulse_start + 8); - tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A); - - value = VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) | - VSYNC_WINDOW_ENABLE; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW); - - if (dc->pipe) - value = HDMI_SRC_DISPLAYB; - else - value = HDMI_SRC_DISPLAYA; - - if ((mode->hdisplay == 720) && ((mode->vdisplay == 480) || - (mode->vdisplay == 576))) - tegra_hdmi_writel(hdmi, - value | ARM_VIDEO_RANGE_FULL, - HDMI_NV_PDISP_INPUT_CONTROL); - else - tegra_hdmi_writel(hdmi, - value | ARM_VIDEO_RANGE_LIMITED, - HDMI_NV_PDISP_INPUT_CONTROL); - - div82 = clk_get_rate(hdmi->clk) / 1000000 * 4; - value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK); - - if (!hdmi->dvi) { - err = tegra_hdmi_setup_audio(hdmi, pclk); - if (err < 0) - hdmi->dvi = true; - } - - if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) { - /* - * TODO: add ELD support - */ - } - - rekey = HDMI_REKEY_DEFAULT; - value = HDMI_CTRL_REKEY(rekey); - value |= HDMI_CTRL_MAX_AC_PACKET((h_sync_width + h_back_porch + - h_front_porch - rekey - 18) / 32); - - if (!hdmi->dvi) - value |= HDMI_CTRL_ENABLE; - - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL); - - if (hdmi->dvi) - tegra_hdmi_writel(hdmi, 0x0, - HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - else - tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO, - HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - - tegra_hdmi_setup_avi_infoframe(hdmi, mode); - tegra_hdmi_setup_audio_infoframe(hdmi); - tegra_hdmi_setup_stereo_infoframe(hdmi); - - /* TMDS CONFIG */ - if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { - num_tmds = ARRAY_SIZE(tegra3_tmds_config); - tmds = tegra3_tmds_config; - } else { - num_tmds = ARRAY_SIZE(tegra2_tmds_config); - tmds = tegra2_tmds_config; - } - - for (i = 0; i < num_tmds; i++) { - if (pclk <= tmds[i].pclk) { - tegra_hdmi_setup_tmds(hdmi, &tmds[i]); - break; - } - } - - tegra_hdmi_writel(hdmi, - SOR_SEQ_CTL_PU_PC(0) | - SOR_SEQ_PU_PC_ALT(0) | - SOR_SEQ_PD_PC(8) | - SOR_SEQ_PD_PC_ALT(8), - HDMI_NV_PDISP_SOR_SEQ_CTL); - - value = SOR_SEQ_INST_WAIT_TIME(1) | - SOR_SEQ_INST_WAIT_UNITS_VSYNC | - SOR_SEQ_INST_HALT | - SOR_SEQ_INST_PIN_A_LOW | - SOR_SEQ_INST_PIN_B_LOW | - SOR_SEQ_INST_DRIVE_PWM_OUT_LO; - - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0)); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8)); - - value = 0x1c800; - value &= ~SOR_CSTM_ROTCLK(~0); - value |= SOR_CSTM_ROTCLK(2); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM); - - tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND); - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); - - /* start SOR */ - tegra_hdmi_writel(hdmi, - SOR_PWR_NORMAL_STATE_PU | - SOR_PWR_NORMAL_START_NORMAL | - SOR_PWR_SAFE_STATE_PD | - SOR_PWR_SETTING_NEW_TRIGGER, - HDMI_NV_PDISP_SOR_PWR); - tegra_hdmi_writel(hdmi, - SOR_PWR_NORMAL_STATE_PU | - SOR_PWR_NORMAL_START_NORMAL | - SOR_PWR_SAFE_STATE_PD | - SOR_PWR_SETTING_NEW_DONE, - HDMI_NV_PDISP_SOR_PWR); - - do { - BUG_ON(--retries < 0); - value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR); - } while (value & SOR_PWR_SETTING_NEW_PENDING); - - value = SOR_STATE_ASY_CRCMODE_COMPLETE | - SOR_STATE_ASY_OWNER_HEAD0 | - SOR_STATE_ASY_SUBOWNER_BOTH | - SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A | - SOR_STATE_ASY_DEPOL_POS; - - /* setup sync polarities */ - if (mode->flags & DRM_MODE_FLAG_PHSYNC) - value |= SOR_STATE_ASY_HSYNCPOL_POS; - - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - value |= SOR_STATE_ASY_HSYNCPOL_NEG; - - if (mode->flags & DRM_MODE_FLAG_PVSYNC) - value |= SOR_STATE_ASY_VSYNCPOL_POS; - - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - value |= SOR_STATE_ASY_VSYNCPOL_NEG; - - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE2); - - value = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE1); - - tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0); - tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0); - tegra_hdmi_writel(hdmi, value | SOR_STATE_ATTACHED, - HDMI_NV_PDISP_SOR_STATE1); - tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0); - - tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS); - - value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - value = DISP_CTRL_MODE_C_DISPLAY; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); - - /* TODO: add HDCP support */ - - return 0; -} - -static int tegra_output_hdmi_disable(struct tegra_output *output) -{ - struct tegra_hdmi *hdmi = to_hdmi(output); - - tegra_periph_reset_assert(hdmi->clk); - clk_disable(hdmi->clk); - regulator_disable(hdmi->pll); - regulator_disable(hdmi->vdd); - - return 0; -} - -static int tegra_output_hdmi_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk) -{ - struct tegra_hdmi *hdmi = to_hdmi(output); - struct clk *base; - int err; - - err = clk_set_parent(clk, hdmi->clk_parent); - if (err < 0) { - dev_err(output->dev, "failed to set parent: %d\n", err); - return err; - } - - base = clk_get_parent(hdmi->clk_parent); - - /* - * This assumes that the parent clock is pll_d_out0 or pll_d2_out - * respectively, each of which divides the base pll_d by 2. - */ - err = clk_set_rate(base, pclk * 2); - if (err < 0) - dev_err(output->dev, - "failed to set base clock rate to %lu Hz\n", - pclk * 2); - - return 0; -} - -static int tegra_output_hdmi_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - struct tegra_hdmi *hdmi = to_hdmi(output); - unsigned long pclk = mode->clock * 1000; - struct clk *parent; - long err; - - parent = clk_get_parent(hdmi->clk_parent); - - err = clk_round_rate(parent, pclk * 4); - if (err < 0) - *status = MODE_NOCLOCK; - else - *status = MODE_OK; - - return 0; -} - -static const struct tegra_output_ops hdmi_ops = { - .enable = tegra_output_hdmi_enable, - .disable = tegra_output_hdmi_disable, - .setup_clock = tegra_output_hdmi_setup_clock, - .check_mode = tegra_output_hdmi_check_mode, -}; - -static int tegra_hdmi_show_regs(struct seq_file *s, void *data) -{ - struct drm_info_node *node = s->private; - struct tegra_hdmi *hdmi = node->info_ent->data; - -#define DUMP_REG(name) \ - seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \ - tegra_hdmi_readl(hdmi, name)) - - DUMP_REG(HDMI_CTXSW); - DUMP_REG(HDMI_NV_PDISP_SOR_STATE0); - DUMP_REG(HDMI_NV_PDISP_SOR_STATE1); - DUMP_REG(HDMI_NV_PDISP_SOR_STATE2); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); - DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS); - DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER); - DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL); - DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT); - DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL); - DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS); - DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK); - DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1); - DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2); - DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0); - DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1); - DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA); - DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE); - DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1); - DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2); - DUMP_REG(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL); - DUMP_REG(HDMI_NV_PDISP_SOR_CAP); - DUMP_REG(HDMI_NV_PDISP_SOR_PWR); - DUMP_REG(HDMI_NV_PDISP_SOR_TEST); - DUMP_REG(HDMI_NV_PDISP_SOR_PLL0); - DUMP_REG(HDMI_NV_PDISP_SOR_PLL1); - DUMP_REG(HDMI_NV_PDISP_SOR_PLL2); - DUMP_REG(HDMI_NV_PDISP_SOR_CSTM); - DUMP_REG(HDMI_NV_PDISP_SOR_LVDS); - DUMP_REG(HDMI_NV_PDISP_SOR_CRCA); - DUMP_REG(HDMI_NV_PDISP_SOR_CRCB); - DUMP_REG(HDMI_NV_PDISP_SOR_BLANK); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(0)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(1)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(2)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(3)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(4)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(5)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(6)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(7)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(8)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(9)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(10)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(11)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(12)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(13)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(14)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(15)); - DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0); - DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1); - DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0); - DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1); - DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0); - DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1); - DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0); - DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1); - DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0); - DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1); - DUMP_REG(HDMI_NV_PDISP_SOR_TRIG); - DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK); - DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT); - DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0); - DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1); - DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2); - DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0)); - DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1)); - DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2)); - DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3)); - DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4)); - DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5)); - DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6)); - DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH); - DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD); - DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0); - DUMP_REG(HDMI_NV_PDISP_AUDIO_N); - DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING); - DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK); - DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL); - DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL); - DUMP_REG(HDMI_NV_PDISP_SCRATCH); - DUMP_REG(HDMI_NV_PDISP_PE_CURRENT); - DUMP_REG(HDMI_NV_PDISP_KEY_CTRL); - DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0); - DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1); - DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2); - DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0); - DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1); - DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2); - DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3); - DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG); - DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX); - DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0); - DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR); - DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE); - -#undef DUMP_REG - - return 0; -} - -static struct drm_info_list debugfs_files[] = { - { "regs", tegra_hdmi_show_regs, 0, NULL }, -}; - -static int tegra_hdmi_debugfs_init(struct tegra_hdmi *hdmi, - struct drm_minor *minor) -{ - unsigned int i; - int err; - - hdmi->debugfs = debugfs_create_dir("hdmi", minor->debugfs_root); - if (!hdmi->debugfs) - return -ENOMEM; - - hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), - GFP_KERNEL); - if (!hdmi->debugfs_files) { - err = -ENOMEM; - goto remove; - } - - for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) - hdmi->debugfs_files[i].data = hdmi; - - err = drm_debugfs_create_files(hdmi->debugfs_files, - ARRAY_SIZE(debugfs_files), - hdmi->debugfs, minor); - if (err < 0) - goto free; - - hdmi->minor = minor; - - return 0; - -free: - kfree(hdmi->debugfs_files); - hdmi->debugfs_files = NULL; -remove: - debugfs_remove(hdmi->debugfs); - hdmi->debugfs = NULL; - - return err; -} - -static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi) -{ - drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files), - hdmi->minor); - hdmi->minor = NULL; - - kfree(hdmi->debugfs_files); - hdmi->debugfs_files = NULL; - - debugfs_remove(hdmi->debugfs); - hdmi->debugfs = NULL; - - return 0; -} - -static int tegra_hdmi_drm_init(struct host1x_client *client, - struct drm_device *drm) -{ - struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); - int err; - - hdmi->output.type = TEGRA_OUTPUT_HDMI; - hdmi->output.dev = client->dev; - hdmi->output.ops = &hdmi_ops; - - err = tegra_output_init(drm, &hdmi->output); - if (err < 0) { - dev_err(client->dev, "output setup failed: %d\n", err); - return err; - } - - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_hdmi_debugfs_init(hdmi, drm->primary); - if (err < 0) - dev_err(client->dev, "debugfs setup failed: %d\n", err); - } - - return 0; -} - -static int tegra_hdmi_drm_exit(struct host1x_client *client) -{ - struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); - int err; - - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_hdmi_debugfs_exit(hdmi); - if (err < 0) - dev_err(client->dev, "debugfs cleanup failed: %d\n", - err); - } - - err = tegra_output_disable(&hdmi->output); - if (err < 0) { - dev_err(client->dev, "output failed to disable: %d\n", err); - return err; - } - - err = tegra_output_exit(&hdmi->output); - if (err < 0) { - dev_err(client->dev, "output cleanup failed: %d\n", err); - return err; - } - - return 0; -} - -static const struct host1x_client_ops hdmi_client_ops = { - .drm_init = tegra_hdmi_drm_init, - .drm_exit = tegra_hdmi_drm_exit, -}; - -static int tegra_hdmi_probe(struct platform_device *pdev) -{ - struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); - struct tegra_hdmi *hdmi; - struct resource *regs; - int err; - - hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); - if (!hdmi) - return -ENOMEM; - - hdmi->dev = &pdev->dev; - hdmi->audio_source = AUTO; - hdmi->audio_freq = 44100; - hdmi->stereo = false; - hdmi->dvi = false; - - hdmi->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(hdmi->clk)) { - dev_err(&pdev->dev, "failed to get clock\n"); - return PTR_ERR(hdmi->clk); - } - - err = clk_prepare(hdmi->clk); - if (err < 0) - return err; - - hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent"); - if (IS_ERR(hdmi->clk_parent)) - return PTR_ERR(hdmi->clk_parent); - - err = clk_prepare(hdmi->clk_parent); - if (err < 0) - return err; - - err = clk_set_parent(hdmi->clk, hdmi->clk_parent); - if (err < 0) { - dev_err(&pdev->dev, "failed to setup clocks: %d\n", err); - return err; - } - - hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd"); - if (IS_ERR(hdmi->vdd)) { - dev_err(&pdev->dev, "failed to get VDD regulator\n"); - return PTR_ERR(hdmi->vdd); - } - - hdmi->pll = devm_regulator_get(&pdev->dev, "pll"); - if (IS_ERR(hdmi->pll)) { - dev_err(&pdev->dev, "failed to get PLL regulator\n"); - return PTR_ERR(hdmi->pll); - } - - hdmi->output.dev = &pdev->dev; - - err = tegra_output_parse_dt(&hdmi->output); - if (err < 0) - return err; - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) - return -ENXIO; - - hdmi->regs = devm_request_and_ioremap(&pdev->dev, regs); - if (!hdmi->regs) - return -EADDRNOTAVAIL; - - err = platform_get_irq(pdev, 0); - if (err < 0) - return err; - - hdmi->irq = err; - - hdmi->client.ops = &hdmi_client_ops; - INIT_LIST_HEAD(&hdmi->client.list); - hdmi->client.dev = &pdev->dev; - - err = host1x_register_client(host1x, &hdmi->client); - if (err < 0) { - dev_err(&pdev->dev, "failed to register host1x client: %d\n", - err); - return err; - } - - platform_set_drvdata(pdev, hdmi); - - return 0; -} - -static int tegra_hdmi_remove(struct platform_device *pdev) -{ - struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); - struct tegra_hdmi *hdmi = platform_get_drvdata(pdev); - int err; - - err = host1x_unregister_client(host1x, &hdmi->client); - if (err < 0) { - dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", - err); - return err; - } - - clk_unprepare(hdmi->clk_parent); - clk_unprepare(hdmi->clk); - - return 0; -} - -static struct of_device_id tegra_hdmi_of_match[] = { - { .compatible = "nvidia,tegra30-hdmi", }, - { .compatible = "nvidia,tegra20-hdmi", }, - { }, -}; - -struct platform_driver tegra_hdmi_driver = { - .driver = { - .name = "tegra-hdmi", - .owner = THIS_MODULE, - .of_match_table = tegra_hdmi_of_match, - }, - .probe = tegra_hdmi_probe, - .remove = tegra_hdmi_remove, -}; diff --git a/trunk/drivers/gpu/drm/tegra/hdmi.h b/trunk/drivers/gpu/drm/tegra/hdmi.h deleted file mode 100644 index 1477f36eb45a..000000000000 --- a/trunk/drivers/gpu/drm/tegra/hdmi.h +++ /dev/null @@ -1,575 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef TEGRA_HDMI_H -#define TEGRA_HDMI_H 1 - -#define HDMI_INFOFRAME_TYPE_VENDOR 0x81 -#define HDMI_INFOFRAME_TYPE_AVI 0x82 -#define HDMI_INFOFRAME_TYPE_SPD 0x83 -#define HDMI_INFOFRAME_TYPE_AUDIO 0x84 -#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85 -#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86 - -/* all fields little endian */ -struct hdmi_avi_infoframe { - /* PB0 */ - u8 csum; - - /* PB1 */ - unsigned s:2; /* scan information */ - unsigned b:2; /* bar info data valid */ - unsigned a:1; /* active info present */ - unsigned y:2; /* RGB or YCbCr */ - unsigned res1:1; - - /* PB2 */ - unsigned r:4; /* active format aspect ratio */ - unsigned m:2; /* picture aspect ratio */ - unsigned c:2; /* colorimetry */ - - /* PB3 */ - unsigned sc:2; /* scan information */ - unsigned q:2; /* quantization range */ - unsigned ec:3; /* extended colorimetry */ - unsigned itc:1; /* it content */ - - /* PB4 */ - unsigned vic:7; /* video format id code */ - unsigned res4:1; - - /* PB5 */ - unsigned pr:4; /* pixel repetition factor */ - unsigned cn:2; /* it content type*/ - unsigned yq:2; /* ycc quantization range */ - - /* PB6-7 */ - u16 top_bar_end_line; - - /* PB8-9 */ - u16 bot_bar_start_line; - - /* PB10-11 */ - u16 left_bar_end_pixel; - - /* PB12-13 */ - u16 right_bar_start_pixel; -} __packed; - -#define HDMI_AVI_VERSION 0x02 - -#define HDMI_AVI_Y_RGB 0x0 -#define HDMI_AVI_Y_YCBCR_422 0x1 -#define HDMI_AVI_Y_YCBCR_444 0x2 - -#define HDMI_AVI_B_VERT 0x1 -#define HDMI_AVI_B_HORIZ 0x2 - -#define HDMI_AVI_S_NONE 0x0 -#define HDMI_AVI_S_OVERSCAN 0x1 -#define HDMI_AVI_S_UNDERSCAN 0x2 - -#define HDMI_AVI_C_NONE 0x0 -#define HDMI_AVI_C_SMPTE 0x1 -#define HDMI_AVI_C_ITU_R 0x2 -#define HDMI_AVI_C_EXTENDED 0x4 - -#define HDMI_AVI_M_4_3 0x1 -#define HDMI_AVI_M_16_9 0x2 - -#define HDMI_AVI_R_SAME 0x8 -#define HDMI_AVI_R_4_3_CENTER 0x9 -#define HDMI_AVI_R_16_9_CENTER 0xa -#define HDMI_AVI_R_14_9_CENTER 0xb - -/* all fields little endian */ -struct hdmi_audio_infoframe { - /* PB0 */ - u8 csum; - - /* PB1 */ - unsigned cc:3; /* channel count */ - unsigned res1:1; - unsigned ct:4; /* coding type */ - - /* PB2 */ - unsigned ss:2; /* sample size */ - unsigned sf:3; /* sample frequency */ - unsigned res2:3; - - /* PB3 */ - unsigned cxt:5; /* coding extention type */ - unsigned res3:3; - - /* PB4 */ - u8 ca; /* channel/speaker allocation */ - - /* PB5 */ - unsigned res5:3; - unsigned lsv:4; /* level shift value */ - unsigned dm_inh:1; /* downmix inhibit */ - - /* PB6-10 reserved */ - u8 res6; - u8 res7; - u8 res8; - u8 res9; - u8 res10; -} __packed; - -#define HDMI_AUDIO_VERSION 0x01 - -#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */ -#define HDMI_AUDIO_CC_2 0x1 -#define HDMI_AUDIO_CC_3 0x2 -#define HDMI_AUDIO_CC_4 0x3 -#define HDMI_AUDIO_CC_5 0x4 -#define HDMI_AUDIO_CC_6 0x5 -#define HDMI_AUDIO_CC_7 0x6 -#define HDMI_AUDIO_CC_8 0x7 - -#define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */ -#define HDMI_AUDIO_CT_PCM 0x1 -#define HDMI_AUDIO_CT_AC3 0x2 -#define HDMI_AUDIO_CT_MPEG1 0x3 -#define HDMI_AUDIO_CT_MP3 0x4 -#define HDMI_AUDIO_CT_MPEG2 0x5 -#define HDMI_AUDIO_CT_AAC_LC 0x6 -#define HDMI_AUDIO_CT_DTS 0x7 -#define HDMI_AUDIO_CT_ATRAC 0x8 -#define HDMI_AUDIO_CT_DSD 0x9 -#define HDMI_AUDIO_CT_E_AC3 0xa -#define HDMI_AUDIO_CT_DTS_HD 0xb -#define HDMI_AUDIO_CT_MLP 0xc -#define HDMI_AUDIO_CT_DST 0xd -#define HDMI_AUDIO_CT_WMA_PRO 0xe -#define HDMI_AUDIO_CT_CXT 0xf - -#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */ -#define HDMI_AUIDO_SF_32K 0x1 -#define HDMI_AUDIO_SF_44_1K 0x2 -#define HDMI_AUDIO_SF_48K 0x3 -#define HDMI_AUDIO_SF_88_2K 0x4 -#define HDMI_AUDIO_SF_96K 0x5 -#define HDMI_AUDIO_SF_176_4K 0x6 -#define HDMI_AUDIO_SF_192K 0x7 - -#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */ -#define HDMI_AUDIO_SS_16BIT 0x1 -#define HDMI_AUDIO_SS_20BIT 0x2 -#define HDMI_AUDIO_SS_24BIT 0x3 - -#define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */ -#define HDMI_AUDIO_CXT_HE_AAC 0x1 -#define HDMI_AUDIO_CXT_HE_AAC_V2 0x2 -#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3 - -/* all fields little endian */ -struct hdmi_stereo_infoframe { - /* PB0 */ - u8 csum; - - /* PB1 */ - u8 regid0; - - /* PB2 */ - u8 regid1; - - /* PB3 */ - u8 regid2; - - /* PB4 */ - unsigned res1:5; - unsigned hdmi_video_format:3; - - /* PB5 */ - unsigned res2:4; - unsigned _3d_structure:4; - - /* PB6*/ - unsigned res3:4; - unsigned _3d_ext_data:4; -} __packed; - -#define HDMI_VENDOR_VERSION 0x01 - -/* register definitions */ -#define HDMI_CTXSW 0x00 - -#define HDMI_NV_PDISP_SOR_STATE0 0x01 -#define SOR_STATE_UPDATE (1 << 0) - -#define HDMI_NV_PDISP_SOR_STATE1 0x02 -#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0) -#define SOR_STATE_ASY_ORMODE_NORMAL (1 << 2) -#define SOR_STATE_ATTACHED (1 << 3) - -#define HDMI_NV_PDISP_SOR_STATE2 0x03 -#define SOR_STATE_ASY_OWNER_NONE (0 << 0) -#define SOR_STATE_ASY_OWNER_HEAD0 (1 << 0) -#define SOR_STATE_ASY_SUBOWNER_NONE (0 << 4) -#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0 (1 << 4) -#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1 (2 << 4) -#define SOR_STATE_ASY_SUBOWNER_BOTH (3 << 4) -#define SOR_STATE_ASY_CRCMODE_ACTIVE (0 << 6) -#define SOR_STATE_ASY_CRCMODE_COMPLETE (1 << 6) -#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 << 6) -#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8) -#define SOR_STATE_ASY_PROTOCOL_CUSTOM (15 << 8) -#define SOR_STATE_ASY_HSYNCPOL_POS (0 << 12) -#define SOR_STATE_ASY_HSYNCPOL_NEG (1 << 12) -#define SOR_STATE_ASY_VSYNCPOL_POS (0 << 13) -#define SOR_STATE_ASY_VSYNCPOL_NEG (1 << 13) -#define SOR_STATE_ASY_DEPOL_POS (0 << 14) -#define SOR_STATE_ASY_DEPOL_NEG (1 << 14) - -#define HDMI_NV_PDISP_RG_HDCP_AN_MSB 0x04 -#define HDMI_NV_PDISP_RG_HDCP_AN_LSB 0x05 -#define HDMI_NV_PDISP_RG_HDCP_CN_MSB 0x06 -#define HDMI_NV_PDISP_RG_HDCP_CN_LSB 0x07 -#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB 0x08 -#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB 0x09 -#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB 0x0a -#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB 0x0b -#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB 0x0c -#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB 0x0d -#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB 0x0e -#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB 0x0f -#define HDMI_NV_PDISP_RG_HDCP_CTRL 0x10 -#define HDMI_NV_PDISP_RG_HDCP_CMODE 0x11 -#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB 0x12 -#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB 0x13 -#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB 0x14 -#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2 0x15 -#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1 0x16 -#define HDMI_NV_PDISP_RG_HDCP_RI 0x17 -#define HDMI_NV_PDISP_RG_HDCP_CS_MSB 0x18 -#define HDMI_NV_PDISP_RG_HDCP_CS_LSB 0x19 -#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0 0x1a -#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0 0x1b -#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1 0x1c -#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2 0x1d - -#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL 0x1e -#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS 0x1f -#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER 0x20 -#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW 0x21 -#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH 0x22 -#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL 0x23 -#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS 0x24 -#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER 0x25 -#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW 0x26 -#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH 0x27 -#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW 0x28 -#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH 0x29 - -#define INFOFRAME_CTRL_ENABLE (1 << 0) - -#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0) -#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8) -#define INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16) - -#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL 0x2a -#define GENERIC_CTRL_ENABLE (1 << 0) -#define GENERIC_CTRL_OTHER (1 << 4) -#define GENERIC_CTRL_SINGLE (1 << 8) -#define GENERIC_CTRL_HBLANK (1 << 12) -#define GENERIC_CTRL_AUDIO (1 << 16) - -#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS 0x2b -#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER 0x2c -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW 0x2d -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH 0x2e -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW 0x2f -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH 0x30 -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW 0x31 -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH 0x32 -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW 0x33 -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH 0x34 - -#define HDMI_NV_PDISP_HDMI_ACR_CTRL 0x35 -#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW 0x36 -#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH 0x37 -#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW 0x38 -#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH 0x39 -#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW 0x3a -#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH 0x3b -#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW 0x3c -#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH 0x3d -#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW 0x3e -#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH 0x3f -#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW 0x40 -#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH 0x41 -#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW 0x42 -#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH 0x43 - -#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8) -#define ACR_SUBPACK_N(x) (((x) & 0xffffff) << 0) -#define ACR_ENABLE (1 << 31) - -#define HDMI_NV_PDISP_HDMI_CTRL 0x44 -#define HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0) -#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16) -#define HDMI_CTRL_ENABLE (1 << 30) - -#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT 0x45 -#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW 0x46 -#define VSYNC_WINDOW_END(x) (((x) & 0x3ff) << 0) -#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16) -#define VSYNC_WINDOW_ENABLE (1 << 31) - -#define HDMI_NV_PDISP_HDMI_GCP_CTRL 0x47 -#define HDMI_NV_PDISP_HDMI_GCP_STATUS 0x48 -#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK 0x49 -#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1 0x4a -#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2 0x4b -#define HDMI_NV_PDISP_HDMI_EMU0 0x4c -#define HDMI_NV_PDISP_HDMI_EMU1 0x4d -#define HDMI_NV_PDISP_HDMI_EMU1_RDATA 0x4e - -#define HDMI_NV_PDISP_HDMI_SPARE 0x4f -#define SPARE_HW_CTS (1 << 0) -#define SPARE_FORCE_SW_CTS (1 << 1) -#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16) - -#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1 0x50 -#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2 0x51 -#define HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL 0x53 -#define HDMI_NV_PDISP_SOR_CAP 0x54 -#define HDMI_NV_PDISP_SOR_PWR 0x55 -#define SOR_PWR_NORMAL_STATE_PD (0 << 0) -#define SOR_PWR_NORMAL_STATE_PU (1 << 0) -#define SOR_PWR_NORMAL_START_NORMAL (0 << 1) -#define SOR_PWR_NORMAL_START_ALT (1 << 1) -#define SOR_PWR_SAFE_STATE_PD (0 << 16) -#define SOR_PWR_SAFE_STATE_PU (1 << 16) -#define SOR_PWR_SETTING_NEW_DONE (0 << 31) -#define SOR_PWR_SETTING_NEW_PENDING (1 << 31) -#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31) - -#define HDMI_NV_PDISP_SOR_TEST 0x56 -#define HDMI_NV_PDISP_SOR_PLL0 0x57 -#define SOR_PLL_PWR (1 << 0) -#define SOR_PLL_PDBG (1 << 1) -#define SOR_PLL_VCAPD (1 << 2) -#define SOR_PLL_PDPORT (1 << 3) -#define SOR_PLL_RESISTORSEL (1 << 4) -#define SOR_PLL_PULLDOWN (1 << 5) -#define SOR_PLL_VCOCAP(x) (((x) & 0xf) << 8) -#define SOR_PLL_BG_V17_S(x) (((x) & 0xf) << 12) -#define SOR_PLL_FILTER(x) (((x) & 0xf) << 16) -#define SOR_PLL_ICHPMP(x) (((x) & 0xf) << 24) -#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0xf) << 28) - -#define HDMI_NV_PDISP_SOR_PLL1 0x58 -#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8) -#define SOR_PLL_TMDS_TERMADJ(x) (((x) & 0xf) << 9) -#define SOR_PLL_LOADADJ(x) (((x) & 0xf) << 20) -#define SOR_PLL_PE_EN (1 << 28) -#define SOR_PLL_HALF_FULL_PE (1 << 29) -#define SOR_PLL_S_D_PIN_PE (1 << 30) - -#define HDMI_NV_PDISP_SOR_PLL2 0x59 - -#define HDMI_NV_PDISP_SOR_CSTM 0x5a -#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24) - -#define HDMI_NV_PDISP_SOR_LVDS 0x5b -#define HDMI_NV_PDISP_SOR_CRCA 0x5c -#define HDMI_NV_PDISP_SOR_CRCB 0x5d -#define HDMI_NV_PDISP_SOR_BLANK 0x5e -#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f -#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0) -#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4) -#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8) -#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12) -#define SOR_SEQ_PC(x) (((x) & 0xf) << 16) -#define SOR_SEQ_STATUS (1 << 28) -#define SOR_SEQ_SWITCH (1 << 30) - -#define HDMI_NV_PDISP_SOR_SEQ_INST(x) (0x60 + (x)) - -#define SOR_SEQ_INST_WAIT_TIME(x) (((x) & 0x3ff) << 0) -#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12) -#define SOR_SEQ_INST_HALT (1 << 15) -#define SOR_SEQ_INST_PIN_A_LOW (0 << 21) -#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21) -#define SOR_SEQ_INST_PIN_B_LOW (0 << 22) -#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22) -#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23) - -#define HDMI_NV_PDISP_SOR_VCRCA0 0x72 -#define HDMI_NV_PDISP_SOR_VCRCA1 0x73 -#define HDMI_NV_PDISP_SOR_CCRCA0 0x74 -#define HDMI_NV_PDISP_SOR_CCRCA1 0x75 -#define HDMI_NV_PDISP_SOR_EDATAA0 0x76 -#define HDMI_NV_PDISP_SOR_EDATAA1 0x77 -#define HDMI_NV_PDISP_SOR_COUNTA0 0x78 -#define HDMI_NV_PDISP_SOR_COUNTA1 0x79 -#define HDMI_NV_PDISP_SOR_DEBUGA0 0x7a -#define HDMI_NV_PDISP_SOR_DEBUGA1 0x7b -#define HDMI_NV_PDISP_SOR_TRIG 0x7c -#define HDMI_NV_PDISP_SOR_MSCHECK 0x7d - -#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT 0x7e -#define DRIVE_CURRENT_LANE0(x) (((x) & 0x3f) << 0) -#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8) -#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16) -#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24) -#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31) - -#define DRIVE_CURRENT_1_500_mA 0x00 -#define DRIVE_CURRENT_1_875_mA 0x01 -#define DRIVE_CURRENT_2_250_mA 0x02 -#define DRIVE_CURRENT_2_625_mA 0x03 -#define DRIVE_CURRENT_3_000_mA 0x04 -#define DRIVE_CURRENT_3_375_mA 0x05 -#define DRIVE_CURRENT_3_750_mA 0x06 -#define DRIVE_CURRENT_4_125_mA 0x07 -#define DRIVE_CURRENT_4_500_mA 0x08 -#define DRIVE_CURRENT_4_875_mA 0x09 -#define DRIVE_CURRENT_5_250_mA 0x0a -#define DRIVE_CURRENT_5_625_mA 0x0b -#define DRIVE_CURRENT_6_000_mA 0x0c -#define DRIVE_CURRENT_6_375_mA 0x0d -#define DRIVE_CURRENT_6_750_mA 0x0e -#define DRIVE_CURRENT_7_125_mA 0x0f -#define DRIVE_CURRENT_7_500_mA 0x10 -#define DRIVE_CURRENT_7_875_mA 0x11 -#define DRIVE_CURRENT_8_250_mA 0x12 -#define DRIVE_CURRENT_8_625_mA 0x13 -#define DRIVE_CURRENT_9_000_mA 0x14 -#define DRIVE_CURRENT_9_375_mA 0x15 -#define DRIVE_CURRENT_9_750_mA 0x16 -#define DRIVE_CURRENT_10_125_mA 0x17 -#define DRIVE_CURRENT_10_500_mA 0x18 -#define DRIVE_CURRENT_10_875_mA 0x19 -#define DRIVE_CURRENT_11_250_mA 0x1a -#define DRIVE_CURRENT_11_625_mA 0x1b -#define DRIVE_CURRENT_12_000_mA 0x1c -#define DRIVE_CURRENT_12_375_mA 0x1d -#define DRIVE_CURRENT_12_750_mA 0x1e -#define DRIVE_CURRENT_13_125_mA 0x1f -#define DRIVE_CURRENT_13_500_mA 0x20 -#define DRIVE_CURRENT_13_875_mA 0x21 -#define DRIVE_CURRENT_14_250_mA 0x22 -#define DRIVE_CURRENT_14_625_mA 0x23 -#define DRIVE_CURRENT_15_000_mA 0x24 -#define DRIVE_CURRENT_15_375_mA 0x25 -#define DRIVE_CURRENT_15_750_mA 0x26 -#define DRIVE_CURRENT_16_125_mA 0x27 -#define DRIVE_CURRENT_16_500_mA 0x28 -#define DRIVE_CURRENT_16_875_mA 0x29 -#define DRIVE_CURRENT_17_250_mA 0x2a -#define DRIVE_CURRENT_17_625_mA 0x2b -#define DRIVE_CURRENT_18_000_mA 0x2c -#define DRIVE_CURRENT_18_375_mA 0x2d -#define DRIVE_CURRENT_18_750_mA 0x2e -#define DRIVE_CURRENT_19_125_mA 0x2f -#define DRIVE_CURRENT_19_500_mA 0x30 -#define DRIVE_CURRENT_19_875_mA 0x31 -#define DRIVE_CURRENT_20_250_mA 0x32 -#define DRIVE_CURRENT_20_625_mA 0x33 -#define DRIVE_CURRENT_21_000_mA 0x34 -#define DRIVE_CURRENT_21_375_mA 0x35 -#define DRIVE_CURRENT_21_750_mA 0x36 -#define DRIVE_CURRENT_22_125_mA 0x37 -#define DRIVE_CURRENT_22_500_mA 0x38 -#define DRIVE_CURRENT_22_875_mA 0x39 -#define DRIVE_CURRENT_23_250_mA 0x3a -#define DRIVE_CURRENT_23_625_mA 0x3b -#define DRIVE_CURRENT_24_000_mA 0x3c -#define DRIVE_CURRENT_24_375_mA 0x3d -#define DRIVE_CURRENT_24_750_mA 0x3e - -#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f -#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80 -#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81 - -#define HDMI_NV_PDISP_AUDIO_FS(x) (0x82 + (x)) -#define AUDIO_FS_LOW(x) (((x) & 0xfff) << 0) -#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16) - -#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH 0x89 -#define HDMI_NV_PDISP_AUDIO_THRESHOLD 0x8a -#define HDMI_NV_PDISP_AUDIO_CNTRL0 0x8b -#define AUDIO_CNTRL0_ERROR_TOLERANCE(x) (((x) & 0xff) << 0) -#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20) -#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20) -#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20) -#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24) - -#define HDMI_NV_PDISP_AUDIO_N 0x8c -#define AUDIO_N_VALUE(x) (((x) & 0xfffff) << 0) -#define AUDIO_N_RESETF (1 << 20) -#define AUDIO_N_GENERATE_NORMAL (0 << 24) -#define AUDIO_N_GENERATE_ALTERNATE (1 << 24) - -#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING 0x94 -#define HDMI_NV_PDISP_SOR_REFCLK 0x95 -#define SOR_REFCLK_DIV_INT(x) (((x) & 0xff) << 8) -#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x03) << 6) - -#define HDMI_NV_PDISP_CRC_CONTROL 0x96 -#define HDMI_NV_PDISP_INPUT_CONTROL 0x97 -#define HDMI_SRC_DISPLAYA (0 << 0) -#define HDMI_SRC_DISPLAYB (1 << 0) -#define ARM_VIDEO_RANGE_FULL (0 << 1) -#define ARM_VIDEO_RANGE_LIMITED (1 << 1) - -#define HDMI_NV_PDISP_SCRATCH 0x98 -#define HDMI_NV_PDISP_PE_CURRENT 0x99 -#define PE_CURRENT0(x) (((x) & 0xf) << 0) -#define PE_CURRENT1(x) (((x) & 0xf) << 8) -#define PE_CURRENT2(x) (((x) & 0xf) << 16) -#define PE_CURRENT3(x) (((x) & 0xf) << 24) - -#define PE_CURRENT_0_0_mA 0x0 -#define PE_CURRENT_0_5_mA 0x1 -#define PE_CURRENT_1_0_mA 0x2 -#define PE_CURRENT_1_5_mA 0x3 -#define PE_CURRENT_2_0_mA 0x4 -#define PE_CURRENT_2_5_mA 0x5 -#define PE_CURRENT_3_0_mA 0x6 -#define PE_CURRENT_3_5_mA 0x7 -#define PE_CURRENT_4_0_mA 0x8 -#define PE_CURRENT_4_5_mA 0x9 -#define PE_CURRENT_5_0_mA 0xa -#define PE_CURRENT_5_5_mA 0xb -#define PE_CURRENT_6_0_mA 0xc -#define PE_CURRENT_6_5_mA 0xd -#define PE_CURRENT_7_0_mA 0xe -#define PE_CURRENT_7_5_mA 0xf - -#define HDMI_NV_PDISP_KEY_CTRL 0x9a -#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b -#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c -#define HDMI_NV_PDISP_KEY_DEBUG2 0x9d -#define HDMI_NV_PDISP_KEY_HDCP_KEY_0 0x9e -#define HDMI_NV_PDISP_KEY_HDCP_KEY_1 0x9f -#define HDMI_NV_PDISP_KEY_HDCP_KEY_2 0xa0 -#define HDMI_NV_PDISP_KEY_HDCP_KEY_3 0xa1 -#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG 0xa2 -#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3 - -#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac -#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29) -#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc -#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd - -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0 -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 0xc1 -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 0xc2 -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 0xc3 -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 0xc4 -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5 -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5 - -#endif /* TEGRA_HDMI_H */ diff --git a/trunk/drivers/gpu/drm/tegra/host1x.c b/trunk/drivers/gpu/drm/tegra/host1x.c deleted file mode 100644 index bdb97a564d82..000000000000 --- a/trunk/drivers/gpu/drm/tegra/host1x.c +++ /dev/null @@ -1,325 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include - -#include "drm.h" - -struct host1x_drm_client { - struct host1x_client *client; - struct device_node *np; - struct list_head list; -}; - -static int host1x_add_drm_client(struct host1x *host1x, struct device_node *np) -{ - struct host1x_drm_client *client; - - client = kzalloc(sizeof(*client), GFP_KERNEL); - if (!client) - return -ENOMEM; - - INIT_LIST_HEAD(&client->list); - client->np = of_node_get(np); - - list_add_tail(&client->list, &host1x->drm_clients); - - return 0; -} - -static int host1x_activate_drm_client(struct host1x *host1x, - struct host1x_drm_client *drm, - struct host1x_client *client) -{ - mutex_lock(&host1x->drm_clients_lock); - list_del_init(&drm->list); - list_add_tail(&drm->list, &host1x->drm_active); - drm->client = client; - mutex_unlock(&host1x->drm_clients_lock); - - return 0; -} - -static int host1x_remove_drm_client(struct host1x *host1x, - struct host1x_drm_client *client) -{ - mutex_lock(&host1x->drm_clients_lock); - list_del_init(&client->list); - mutex_unlock(&host1x->drm_clients_lock); - - of_node_put(client->np); - kfree(client); - - return 0; -} - -static int host1x_parse_dt(struct host1x *host1x) -{ - static const char * const compat[] = { - "nvidia,tegra20-dc", - "nvidia,tegra20-hdmi", - "nvidia,tegra30-dc", - "nvidia,tegra30-hdmi", - }; - unsigned int i; - int err; - - for (i = 0; i < ARRAY_SIZE(compat); i++) { - struct device_node *np; - - for_each_child_of_node(host1x->dev->of_node, np) { - if (of_device_is_compatible(np, compat[i]) && - of_device_is_available(np)) { - err = host1x_add_drm_client(host1x, np); - if (err < 0) - return err; - } - } - } - - return 0; -} - -static int tegra_host1x_probe(struct platform_device *pdev) -{ - struct host1x *host1x; - struct resource *regs; - int err; - - host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL); - if (!host1x) - return -ENOMEM; - - mutex_init(&host1x->drm_clients_lock); - INIT_LIST_HEAD(&host1x->drm_clients); - INIT_LIST_HEAD(&host1x->drm_active); - mutex_init(&host1x->clients_lock); - INIT_LIST_HEAD(&host1x->clients); - host1x->dev = &pdev->dev; - - err = host1x_parse_dt(host1x); - if (err < 0) { - dev_err(&pdev->dev, "failed to parse DT: %d\n", err); - return err; - } - - host1x->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(host1x->clk)) - return PTR_ERR(host1x->clk); - - err = clk_prepare_enable(host1x->clk); - if (err < 0) - return err; - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) { - err = -ENXIO; - goto err; - } - - err = platform_get_irq(pdev, 0); - if (err < 0) - goto err; - - host1x->syncpt = err; - - err = platform_get_irq(pdev, 1); - if (err < 0) - goto err; - - host1x->irq = err; - - host1x->regs = devm_request_and_ioremap(&pdev->dev, regs); - if (!host1x->regs) { - err = -EADDRNOTAVAIL; - goto err; - } - - platform_set_drvdata(pdev, host1x); - - return 0; - -err: - clk_disable_unprepare(host1x->clk); - return err; -} - -static int tegra_host1x_remove(struct platform_device *pdev) -{ - struct host1x *host1x = platform_get_drvdata(pdev); - - clk_disable_unprepare(host1x->clk); - - return 0; -} - -int host1x_drm_init(struct host1x *host1x, struct drm_device *drm) -{ - struct host1x_client *client; - - mutex_lock(&host1x->clients_lock); - - list_for_each_entry(client, &host1x->clients, list) { - if (client->ops && client->ops->drm_init) { - int err = client->ops->drm_init(client, drm); - if (err < 0) { - dev_err(host1x->dev, - "DRM setup failed for %s: %d\n", - dev_name(client->dev), err); - return err; - } - } - } - - mutex_unlock(&host1x->clients_lock); - - return 0; -} - -int host1x_drm_exit(struct host1x *host1x) -{ - struct platform_device *pdev = to_platform_device(host1x->dev); - struct host1x_client *client; - - if (!host1x->drm) - return 0; - - mutex_lock(&host1x->clients_lock); - - list_for_each_entry_reverse(client, &host1x->clients, list) { - if (client->ops && client->ops->drm_exit) { - int err = client->ops->drm_exit(client); - if (err < 0) { - dev_err(host1x->dev, - "DRM cleanup failed for %s: %d\n", - dev_name(client->dev), err); - return err; - } - } - } - - mutex_unlock(&host1x->clients_lock); - - drm_platform_exit(&tegra_drm_driver, pdev); - host1x->drm = NULL; - - return 0; -} - -int host1x_register_client(struct host1x *host1x, struct host1x_client *client) -{ - struct host1x_drm_client *drm, *tmp; - int err; - - mutex_lock(&host1x->clients_lock); - list_add_tail(&client->list, &host1x->clients); - mutex_unlock(&host1x->clients_lock); - - list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list) - if (drm->np == client->dev->of_node) - host1x_activate_drm_client(host1x, drm, client); - - if (list_empty(&host1x->drm_clients)) { - struct platform_device *pdev = to_platform_device(host1x->dev); - - err = drm_platform_init(&tegra_drm_driver, pdev); - if (err < 0) { - dev_err(host1x->dev, "drm_platform_init(): %d\n", err); - return err; - } - } - - return 0; -} - -int host1x_unregister_client(struct host1x *host1x, - struct host1x_client *client) -{ - struct host1x_drm_client *drm, *tmp; - int err; - - list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) { - if (drm->client == client) { - err = host1x_drm_exit(host1x); - if (err < 0) { - dev_err(host1x->dev, "host1x_drm_exit(): %d\n", - err); - return err; - } - - host1x_remove_drm_client(host1x, drm); - break; - } - } - - mutex_lock(&host1x->clients_lock); - list_del_init(&client->list); - mutex_unlock(&host1x->clients_lock); - - return 0; -} - -static struct of_device_id tegra_host1x_of_match[] = { - { .compatible = "nvidia,tegra30-host1x", }, - { .compatible = "nvidia,tegra20-host1x", }, - { }, -}; -MODULE_DEVICE_TABLE(of, tegra_host1x_of_match); - -struct platform_driver tegra_host1x_driver = { - .driver = { - .name = "tegra-host1x", - .owner = THIS_MODULE, - .of_match_table = tegra_host1x_of_match, - }, - .probe = tegra_host1x_probe, - .remove = tegra_host1x_remove, -}; - -static int __init tegra_host1x_init(void) -{ - int err; - - err = platform_driver_register(&tegra_host1x_driver); - if (err < 0) - return err; - - err = platform_driver_register(&tegra_dc_driver); - if (err < 0) - goto unregister_host1x; - - err = platform_driver_register(&tegra_hdmi_driver); - if (err < 0) - goto unregister_dc; - - return 0; - -unregister_dc: - platform_driver_unregister(&tegra_dc_driver); -unregister_host1x: - platform_driver_unregister(&tegra_host1x_driver); - return err; -} -module_init(tegra_host1x_init); - -static void __exit tegra_host1x_exit(void) -{ - platform_driver_unregister(&tegra_hdmi_driver); - platform_driver_unregister(&tegra_dc_driver); - platform_driver_unregister(&tegra_host1x_driver); -} -module_exit(tegra_host1x_exit); - -MODULE_AUTHOR("Thierry Reding "); -MODULE_DESCRIPTION("NVIDIA Tegra DRM driver"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/gpu/drm/tegra/output.c b/trunk/drivers/gpu/drm/tegra/output.c deleted file mode 100644 index 8140fc6c34d8..000000000000 --- a/trunk/drivers/gpu/drm/tegra/output.c +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include - -#include "drm.h" - -static int tegra_connector_get_modes(struct drm_connector *connector) -{ - struct tegra_output *output = connector_to_output(connector); - struct edid *edid = NULL; - int err = 0; - - if (output->edid) - edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL); - else if (output->ddc) - edid = drm_get_edid(connector, output->ddc); - - drm_mode_connector_update_edid_property(connector, edid); - - if (edid) { - err = drm_add_edid_modes(connector, edid); - kfree(edid); - } - - return err; -} - -static int tegra_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct tegra_output *output = connector_to_output(connector); - enum drm_mode_status status = MODE_OK; - int err; - - err = tegra_output_check_mode(output, mode, &status); - if (err < 0) - return MODE_ERROR; - - return status; -} - -static struct drm_encoder * -tegra_connector_best_encoder(struct drm_connector *connector) -{ - struct tegra_output *output = connector_to_output(connector); - - return &output->encoder; -} - -static const struct drm_connector_helper_funcs connector_helper_funcs = { - .get_modes = tegra_connector_get_modes, - .mode_valid = tegra_connector_mode_valid, - .best_encoder = tegra_connector_best_encoder, -}; - -static enum drm_connector_status -tegra_connector_detect(struct drm_connector *connector, bool force) -{ - struct tegra_output *output = connector_to_output(connector); - enum drm_connector_status status = connector_status_unknown; - - if (gpio_is_valid(output->hpd_gpio)) { - if (gpio_get_value(output->hpd_gpio) == 0) - status = connector_status_disconnected; - else - status = connector_status_connected; - } else { - if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) - status = connector_status_connected; - } - - return status; -} - -static void tegra_connector_destroy(struct drm_connector *connector) -{ - drm_sysfs_connector_remove(connector); - drm_connector_cleanup(connector); -} - -static const struct drm_connector_funcs connector_funcs = { - .dpms = drm_helper_connector_dpms, - .detect = tegra_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = tegra_connector_destroy, -}; - -static void tegra_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs encoder_funcs = { - .destroy = tegra_encoder_destroy, -}; - -static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode) -{ -} - -static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted) -{ - return true; -} - -static void tegra_encoder_prepare(struct drm_encoder *encoder) -{ -} - -static void tegra_encoder_commit(struct drm_encoder *encoder) -{ -} - -static void tegra_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted) -{ - struct tegra_output *output = encoder_to_output(encoder); - int err; - - err = tegra_output_enable(output); - if (err < 0) - dev_err(encoder->dev->dev, "tegra_output_enable(): %d\n", err); -} - -static const struct drm_encoder_helper_funcs encoder_helper_funcs = { - .dpms = tegra_encoder_dpms, - .mode_fixup = tegra_encoder_mode_fixup, - .prepare = tegra_encoder_prepare, - .commit = tegra_encoder_commit, - .mode_set = tegra_encoder_mode_set, -}; - -static irqreturn_t hpd_irq(int irq, void *data) -{ - struct tegra_output *output = data; - - drm_helper_hpd_irq_event(output->connector.dev); - - return IRQ_HANDLED; -} - -int tegra_output_parse_dt(struct tegra_output *output) -{ - enum of_gpio_flags flags; - struct device_node *ddc; - size_t size; - int err; - - if (!output->of_node) - output->of_node = output->dev->of_node; - - output->edid = of_get_property(output->of_node, "nvidia,edid", &size); - - ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0); - if (ddc) { - output->ddc = of_find_i2c_adapter_by_node(ddc); - if (!output->ddc) { - err = -EPROBE_DEFER; - of_node_put(ddc); - return err; - } - - of_node_put(ddc); - } - - if (!output->edid && !output->ddc) - return -ENODEV; - - output->hpd_gpio = of_get_named_gpio_flags(output->of_node, - "nvidia,hpd-gpio", 0, - &flags); - - return 0; -} - -int tegra_output_init(struct drm_device *drm, struct tegra_output *output) -{ - int connector, encoder, err; - - if (gpio_is_valid(output->hpd_gpio)) { - unsigned long flags; - - err = gpio_request_one(output->hpd_gpio, GPIOF_DIR_IN, - "HDMI hotplug detect"); - if (err < 0) { - dev_err(output->dev, "gpio_request_one(): %d\n", err); - return err; - } - - err = gpio_to_irq(output->hpd_gpio); - if (err < 0) { - dev_err(output->dev, "gpio_to_irq(): %d\n", err); - goto free_hpd; - } - - output->hpd_irq = err; - - flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | - IRQF_ONESHOT; - - err = request_threaded_irq(output->hpd_irq, NULL, hpd_irq, - flags, "hpd", output); - if (err < 0) { - dev_err(output->dev, "failed to request IRQ#%u: %d\n", - output->hpd_irq, err); - goto free_hpd; - } - - output->connector.polled = DRM_CONNECTOR_POLL_HPD; - } - - switch (output->type) { - case TEGRA_OUTPUT_RGB: - connector = DRM_MODE_CONNECTOR_LVDS; - encoder = DRM_MODE_ENCODER_LVDS; - break; - - case TEGRA_OUTPUT_HDMI: - connector = DRM_MODE_CONNECTOR_HDMIA; - encoder = DRM_MODE_ENCODER_TMDS; - break; - - default: - connector = DRM_MODE_CONNECTOR_Unknown; - encoder = DRM_MODE_ENCODER_NONE; - break; - } - - drm_connector_init(drm, &output->connector, &connector_funcs, - connector); - drm_connector_helper_add(&output->connector, &connector_helper_funcs); - - drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder); - drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs); - - drm_mode_connector_attach_encoder(&output->connector, &output->encoder); - drm_sysfs_connector_add(&output->connector); - - output->encoder.possible_crtcs = 0x3; - - return 0; - -free_hpd: - gpio_free(output->hpd_gpio); - - return err; -} - -int tegra_output_exit(struct tegra_output *output) -{ - if (gpio_is_valid(output->hpd_gpio)) { - free_irq(output->hpd_irq, output); - gpio_free(output->hpd_gpio); - } - - if (output->ddc) - put_device(&output->ddc->dev); - - return 0; -} diff --git a/trunk/drivers/gpu/drm/tegra/rgb.c b/trunk/drivers/gpu/drm/tegra/rgb.c deleted file mode 100644 index ed4416f20260..000000000000 --- a/trunk/drivers/gpu/drm/tegra/rgb.c +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include - -#include "drm.h" -#include "dc.h" - -struct tegra_rgb { - struct tegra_output output; - struct clk *clk_parent; - struct clk *clk; -}; - -static inline struct tegra_rgb *to_rgb(struct tegra_output *output) -{ - return container_of(output, struct tegra_rgb, output); -} - -struct reg_entry { - unsigned long offset; - unsigned long value; -}; - -static const struct reg_entry rgb_enable[] = { - { DC_COM_PIN_OUTPUT_ENABLE(0), 0x00000000 }, - { DC_COM_PIN_OUTPUT_ENABLE(1), 0x00000000 }, - { DC_COM_PIN_OUTPUT_ENABLE(2), 0x00000000 }, - { DC_COM_PIN_OUTPUT_ENABLE(3), 0x00000000 }, - { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 }, - { DC_COM_PIN_OUTPUT_POLARITY(1), 0x01000000 }, - { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 }, - { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 }, - { DC_COM_PIN_OUTPUT_DATA(0), 0x00000000 }, - { DC_COM_PIN_OUTPUT_DATA(1), 0x00000000 }, - { DC_COM_PIN_OUTPUT_DATA(2), 0x00000000 }, - { DC_COM_PIN_OUTPUT_DATA(3), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(4), 0x00210222 }, - { DC_COM_PIN_OUTPUT_SELECT(5), 0x00002200 }, - { DC_COM_PIN_OUTPUT_SELECT(6), 0x00020000 }, -}; - -static const struct reg_entry rgb_disable[] = { - { DC_COM_PIN_OUTPUT_SELECT(6), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(5), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(4), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 }, - { DC_COM_PIN_OUTPUT_DATA(3), 0xaaaaaaaa }, - { DC_COM_PIN_OUTPUT_DATA(2), 0xaaaaaaaa }, - { DC_COM_PIN_OUTPUT_DATA(1), 0xaaaaaaaa }, - { DC_COM_PIN_OUTPUT_DATA(0), 0xaaaaaaaa }, - { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 }, - { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 }, - { DC_COM_PIN_OUTPUT_POLARITY(1), 0x00000000 }, - { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 }, - { DC_COM_PIN_OUTPUT_ENABLE(3), 0x55555555 }, - { DC_COM_PIN_OUTPUT_ENABLE(2), 0x55555555 }, - { DC_COM_PIN_OUTPUT_ENABLE(1), 0x55150005 }, - { DC_COM_PIN_OUTPUT_ENABLE(0), 0x55555555 }, -}; - -static void tegra_dc_write_regs(struct tegra_dc *dc, - const struct reg_entry *table, - unsigned int num) -{ - unsigned int i; - - for (i = 0; i < num; i++) - tegra_dc_writel(dc, table[i].value, table[i].offset); -} - -static int tegra_output_rgb_enable(struct tegra_output *output) -{ - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - - tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable)); - - return 0; -} - -static int tegra_output_rgb_disable(struct tegra_output *output) -{ - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - - tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable)); - - return 0; -} - -static int tegra_output_rgb_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk) -{ - struct tegra_rgb *rgb = to_rgb(output); - - return clk_set_parent(clk, rgb->clk_parent); -} - -static int tegra_output_rgb_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - /* - * FIXME: For now, always assume that the mode is okay. There are - * unresolved issues with clk_round_rate(), which doesn't always - * reliably report whether a frequency can be set or not. - */ - - *status = MODE_OK; - - return 0; -} - -static const struct tegra_output_ops rgb_ops = { - .enable = tegra_output_rgb_enable, - .disable = tegra_output_rgb_disable, - .setup_clock = tegra_output_rgb_setup_clock, - .check_mode = tegra_output_rgb_check_mode, -}; - -int tegra_dc_rgb_probe(struct tegra_dc *dc) -{ - struct device_node *np; - struct tegra_rgb *rgb; - int err; - - np = of_get_child_by_name(dc->dev->of_node, "rgb"); - if (!np || !of_device_is_available(np)) - return -ENODEV; - - rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL); - if (!rgb) - return -ENOMEM; - - rgb->clk = devm_clk_get(dc->dev, NULL); - if (IS_ERR(rgb->clk)) { - dev_err(dc->dev, "failed to get clock\n"); - return PTR_ERR(rgb->clk); - } - - rgb->clk_parent = devm_clk_get(dc->dev, "parent"); - if (IS_ERR(rgb->clk_parent)) { - dev_err(dc->dev, "failed to get parent clock\n"); - return PTR_ERR(rgb->clk_parent); - } - - err = clk_set_parent(rgb->clk, rgb->clk_parent); - if (err < 0) { - dev_err(dc->dev, "failed to set parent clock: %d\n", err); - return err; - } - - rgb->output.dev = dc->dev; - rgb->output.of_node = np; - - err = tegra_output_parse_dt(&rgb->output); - if (err < 0) - return err; - - dc->rgb = &rgb->output; - - return 0; -} - -int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) -{ - struct tegra_rgb *rgb = to_rgb(dc->rgb); - int err; - - if (!dc->rgb) - return -ENODEV; - - rgb->output.type = TEGRA_OUTPUT_RGB; - rgb->output.ops = &rgb_ops; - - err = tegra_output_init(dc->base.dev, &rgb->output); - if (err < 0) { - dev_err(dc->dev, "output setup failed: %d\n", err); - return err; - } - - /* - * By default, outputs can be associated with each display controller. - * RGB outputs are an exception, so we make sure they can be attached - * to only their parent display controller. - */ - rgb->output.encoder.possible_crtcs = 1 << dc->pipe; - - return 0; -} - -int tegra_dc_rgb_exit(struct tegra_dc *dc) -{ - if (dc->rgb) { - int err; - - err = tegra_output_disable(dc->rgb); - if (err < 0) { - dev_err(dc->dev, "output failed to disable: %d\n", err); - return err; - } - - err = tegra_output_exit(dc->rgb); - if (err < 0) { - dev_err(dc->dev, "output cleanup failed: %d\n", err); - return err; - } - - dc->rgb = NULL; - } - - return 0; -} diff --git a/trunk/drivers/gpu/drm/ttm/ttm_bo.c b/trunk/drivers/gpu/drm/ttm/ttm_bo.c index 2c54c3d414b3..bf6e4b5a73b5 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_bo.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_bo.c @@ -162,9 +162,9 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) { if (interruptible) { return wait_event_interruptible(bo->event_queue, - !ttm_bo_is_reserved(bo)); + atomic_read(&bo->reserved) == 0); } else { - wait_event(bo->event_queue, !ttm_bo_is_reserved(bo)); + wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); return 0; } } @@ -175,7 +175,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man; - BUG_ON(!ttm_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->reserved)); if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { @@ -220,7 +220,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, struct ttm_bo_global *glob = bo->glob; int ret; - while (unlikely(atomic_read(&bo->reserved) != 0)) { + while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { /** * Deadlock avoidance for multi-bo reserving. */ @@ -249,7 +249,6 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, return ret; } - atomic_set(&bo->reserved, 1); if (use_sequence) { /** * Wake up waiters that may need to recheck for deadlock, @@ -502,6 +501,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) struct ttm_bo_global *glob = bo->glob; struct ttm_bo_driver *driver; void *sync_obj = NULL; + void *sync_obj_arg; int put_count; int ret; @@ -537,6 +537,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) driver = bdev->driver; if (bo->sync_obj) sync_obj = driver->sync_obj_ref(bo->sync_obj); + sync_obj_arg = bo->sync_obj_arg; kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); @@ -544,7 +545,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) spin_unlock(&bdev->fence_lock); if (sync_obj) { - driver->sync_obj_flush(sync_obj); + driver->sync_obj_flush(sync_obj, sync_obj_arg); driver->sync_obj_unref(&sync_obj); } schedule_delayed_work(&bdev->wq, @@ -696,7 +697,6 @@ static void ttm_bo_release(struct kref *kref) struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; - write_lock(&bdev->vm_lock); if (likely(bo->vm_node != NULL)) { rb_erase(&bo->vm_rb, &bdev->addr_space_rb); drm_mm_put_block(bo->vm_node); @@ -708,14 +708,18 @@ static void ttm_bo_release(struct kref *kref) ttm_mem_io_unlock(man); ttm_bo_cleanup_refs_or_queue(bo); kref_put(&bo->list_kref, ttm_bo_release_list); + write_lock(&bdev->vm_lock); } void ttm_bo_unref(struct ttm_buffer_object **p_bo) { struct ttm_buffer_object *bo = *p_bo; + struct ttm_bo_device *bdev = bo->bdev; *p_bo = NULL; + write_lock(&bdev->vm_lock); kref_put(&bo->kref, ttm_bo_release); + write_unlock(&bdev->vm_lock); } EXPORT_SYMBOL(ttm_bo_unref); @@ -752,7 +756,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, goto out; } - BUG_ON(!ttm_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->reserved)); evict_mem = bo->mem; evict_mem.mm_node = NULL; @@ -1050,6 +1054,16 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_mem_space); +int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) +{ + if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) + return -EBUSY; + + return wait_event_interruptible(bo->event_queue, + atomic_read(&bo->cpu_writers) == 0); +} +EXPORT_SYMBOL(ttm_bo_wait_cpu); + int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_placement *placement, bool interruptible, bool no_wait_reserve, @@ -1059,7 +1073,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_mem_reg mem; struct ttm_bo_device *bdev = bo->bdev; - BUG_ON(!ttm_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->reserved)); /* * FIXME: It's possible to pipeline buffer moves. @@ -1116,7 +1130,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, { int ret; - BUG_ON(!ttm_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->reserved)); /* Check that range is valid */ if (placement->lpfn || placement->fpfn) if (placement->fpfn > placement->lpfn || @@ -1165,6 +1179,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, + unsigned long buffer_start, bool interruptible, struct file *persistent_swap_storage, size_t acc_size, @@ -1185,6 +1200,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, return -ENOMEM; } + size += buffer_start & ~PAGE_MASK; num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; if (num_pages == 0) { pr_err("Illegal buffer object size\n"); @@ -1217,6 +1233,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, bo->mem.page_alignment = page_alignment; bo->mem.bus.io_reserved_vm = false; bo->mem.bus.io_reserved_count = 0; + bo->buffer_start = buffer_start & PAGE_MASK; bo->priv_flags = 0; bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); bo->seq_valid = false; @@ -1289,6 +1306,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, + unsigned long buffer_start, bool interruptible, struct file *persistent_swap_storage, struct ttm_buffer_object **p_bo) @@ -1303,8 +1321,8 @@ int ttm_bo_create(struct ttm_bo_device *bdev, acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, - interruptible, persistent_swap_storage, acc_size, - NULL, NULL); + buffer_start, interruptible, + persistent_swap_storage, acc_size, NULL, NULL); if (likely(ret == 0)) *p_bo = bo; @@ -1559,6 +1577,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, goto out_no_addr_mm; INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); + bdev->nice_mode = true; INIT_LIST_HEAD(&bdev->ddestroy); bdev->dev_mapping = NULL; bdev->glob = glob; @@ -1702,6 +1721,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, struct ttm_bo_driver *driver = bo->bdev->driver; struct ttm_bo_device *bdev = bo->bdev; void *sync_obj; + void *sync_obj_arg; int ret = 0; if (likely(bo->sync_obj == NULL)) @@ -1709,7 +1729,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, while (bo->sync_obj) { - if (driver->sync_obj_signaled(bo->sync_obj)) { + if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) { void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); @@ -1723,8 +1743,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, return -EBUSY; sync_obj = driver->sync_obj_ref(bo->sync_obj); + sync_obj_arg = bo->sync_obj_arg; spin_unlock(&bdev->fence_lock); - ret = driver->sync_obj_wait(sync_obj, + ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, lazy, interruptible); if (unlikely(ret != 0)) { driver->sync_obj_unref(&sync_obj); @@ -1732,7 +1753,8 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, return ret; } spin_lock(&bdev->fence_lock); - if (likely(bo->sync_obj == sync_obj)) { + if (likely(bo->sync_obj == sync_obj && + bo->sync_obj_arg == sync_obj_arg)) { void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, @@ -1775,7 +1797,8 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) { - atomic_dec(&bo->cpu_writers); + if (atomic_dec_and_test(&bo->cpu_writers)) + wake_up_all(&bo->event_queue); } EXPORT_SYMBOL(ttm_bo_synccpu_write_release); diff --git a/trunk/drivers/gpu/drm/ttm/ttm_bo_util.c b/trunk/drivers/gpu/drm/ttm/ttm_bo_util.c index b9c4e515b1d8..2026060f03e0 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -611,6 +611,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap); int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, void *sync_obj, + void *sync_obj_arg, bool evict, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) @@ -629,6 +630,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, bo->sync_obj = NULL; } bo->sync_obj = driver->sync_obj_ref(sync_obj); + bo->sync_obj_arg = sync_obj_arg; if (evict) { ret = ttm_bo_wait(bo, false, false, false); spin_unlock(&bdev->fence_lock); diff --git a/trunk/drivers/gpu/drm/ttm/ttm_bo_vm.c b/trunk/drivers/gpu/drm/ttm/ttm_bo_vm.c index 74705f329d99..3ba72dbdc4bd 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -259,8 +259,8 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, read_lock(&bdev->vm_lock); bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, (vma->vm_end - vma->vm_start) >> PAGE_SHIFT); - if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref)) - bo = NULL; + if (likely(bo != NULL)) + ttm_bo_reference(bo); read_unlock(&bdev->vm_lock); if (unlikely(bo == NULL)) { diff --git a/trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 1986d006c264..1937069432c5 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -185,7 +185,10 @@ int ttm_eu_reserve_buffers(struct list_head *list) ttm_eu_backoff_reservation_locked(list); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); - return -EBUSY; + ret = ttm_bo_wait_cpu(bo, false); + if (ret) + return ret; + goto retry; } } @@ -220,6 +223,7 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) bo = entry->bo; entry->old_sync_obj = bo->sync_obj; bo->sync_obj = driver->sync_obj_ref(sync_obj); + bo->sync_obj_arg = entry->new_sync_obj_arg; ttm_bo_unreserve_locked(bo); entry->reserved = false; } diff --git a/trunk/drivers/gpu/drm/ttm/ttm_memory.c b/trunk/drivers/gpu/drm/ttm/ttm_memory.c index dbc2def887cd..479c6b0467ca 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_memory.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_memory.c @@ -367,6 +367,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) spin_lock_init(&glob->lock); glob->swap_queue = create_singlethread_workqueue("ttm_swap"); INIT_WORK(&glob->work, ttm_shrink_work); + init_waitqueue_head(&glob->queue); ret = kobject_init_and_add( &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting"); if (unlikely(ret != 0)) { diff --git a/trunk/drivers/gpu/drm/ttm/ttm_object.c b/trunk/drivers/gpu/drm/ttm/ttm_object.c index 58a5f3261c0b..c7857874956a 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_object.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_object.c @@ -80,7 +80,7 @@ struct ttm_object_file { */ struct ttm_object_device { - spinlock_t object_lock; + rwlock_t object_lock; struct drm_open_hash object_hash; atomic_t object_count; struct ttm_mem_global *mem_glob; @@ -157,12 +157,12 @@ int ttm_base_object_init(struct ttm_object_file *tfile, base->refcount_release = refcount_release; base->ref_obj_release = ref_obj_release; base->object_type = object_type; + write_lock(&tdev->object_lock); kref_init(&base->refcount); - spin_lock(&tdev->object_lock); - ret = drm_ht_just_insert_please_rcu(&tdev->object_hash, - &base->hash, - (unsigned long)base, 31, 0, 0); - spin_unlock(&tdev->object_lock); + ret = drm_ht_just_insert_please(&tdev->object_hash, + &base->hash, + (unsigned long)base, 31, 0, 0); + write_unlock(&tdev->object_lock); if (unlikely(ret != 0)) goto out_err0; @@ -174,9 +174,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, return 0; out_err1: - spin_lock(&tdev->object_lock); - (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash); - spin_unlock(&tdev->object_lock); + (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); out_err0: return ret; } @@ -188,29 +186,30 @@ static void ttm_release_base(struct kref *kref) container_of(kref, struct ttm_base_object, refcount); struct ttm_object_device *tdev = base->tfile->tdev; - spin_lock(&tdev->object_lock); - (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash); - spin_unlock(&tdev->object_lock); - - /* - * Note: We don't use synchronize_rcu() here because it's far - * too slow. It's up to the user to free the object using - * call_rcu() or ttm_base_object_kfree(). - */ - + (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); + write_unlock(&tdev->object_lock); if (base->refcount_release) { ttm_object_file_unref(&base->tfile); base->refcount_release(&base); } + write_lock(&tdev->object_lock); } void ttm_base_object_unref(struct ttm_base_object **p_base) { struct ttm_base_object *base = *p_base; + struct ttm_object_device *tdev = base->tfile->tdev; *p_base = NULL; + /* + * Need to take the lock here to avoid racing with + * users trying to look up the object. + */ + + write_lock(&tdev->object_lock); kref_put(&base->refcount, ttm_release_base); + write_unlock(&tdev->object_lock); } EXPORT_SYMBOL(ttm_base_object_unref); @@ -222,14 +221,14 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, struct drm_hash_item *hash; int ret; - rcu_read_lock(); - ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash); + read_lock(&tdev->object_lock); + ret = drm_ht_find_item(&tdev->object_hash, key, &hash); if (likely(ret == 0)) { base = drm_hash_entry(hash, struct ttm_base_object, hash); - ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL; + kref_get(&base->refcount); } - rcu_read_unlock(); + read_unlock(&tdev->object_lock); if (unlikely(ret != 0)) return NULL; @@ -427,7 +426,7 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global return NULL; tdev->mem_glob = mem_glob; - spin_lock_init(&tdev->object_lock); + rwlock_init(&tdev->object_lock); atomic_set(&tdev->object_count, 0); ret = drm_ht_create(&tdev->object_hash, hash_order); @@ -445,9 +444,9 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) *p_tdev = NULL; - spin_lock(&tdev->object_lock); + write_lock(&tdev->object_lock); drm_ht_remove(&tdev->object_hash); - spin_unlock(&tdev->object_lock); + write_unlock(&tdev->object_lock); kfree(tdev); } diff --git a/trunk/drivers/gpu/drm/ttm/ttm_page_alloc.c b/trunk/drivers/gpu/drm/ttm/ttm_page_alloc.c index 860dc4813e99..bd2a3b40cd12 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -749,7 +749,10 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, /* clear the pages coming from the pool if requested */ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { list_for_each_entry(p, &plist, lru) { - clear_page(page_address(p)); + if (PageHighMem(p)) + clear_highpage(p); + else + clear_page(page_address(p)); } } diff --git a/trunk/drivers/gpu/drm/ttm/ttm_tt.c b/trunk/drivers/gpu/drm/ttm/ttm_tt.c index bf8260133ea9..7d759a430294 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_tt.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_tt.c @@ -308,9 +308,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm) if (unlikely(to_page == NULL)) goto out_err; - preempt_disable(); copy_highpage(to_page, from_page); - preempt_enable(); page_cache_release(from_page); } @@ -358,9 +356,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) ret = PTR_ERR(to_page); goto out_err; } - preempt_disable(); copy_highpage(to_page, from_page); - preempt_enable(); set_page_dirty(to_page); mark_page_accessed(to_page); page_cache_release(to_page); diff --git a/trunk/drivers/gpu/drm/udl/udl_connector.c b/trunk/drivers/gpu/drm/udl/udl_connector.c index 512f44add89f..b3b2cedf6745 100644 --- a/trunk/drivers/gpu/drm/udl/udl_connector.c +++ b/trunk/drivers/gpu/drm/udl/udl_connector.c @@ -84,8 +84,7 @@ udl_detect(struct drm_connector *connector, bool force) return connector_status_connected; } -static struct drm_encoder* -udl_best_single_encoder(struct drm_connector *connector) +struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; struct drm_mode_object *obj; @@ -98,9 +97,8 @@ udl_best_single_encoder(struct drm_connector *connector) return encoder; } -static int udl_connector_set_property(struct drm_connector *connector, - struct drm_property *property, - uint64_t val) +int udl_connector_set_property(struct drm_connector *connector, struct drm_property *property, + uint64_t val) { return 0; } @@ -112,13 +110,13 @@ static void udl_connector_destroy(struct drm_connector *connector) kfree(connector); } -static struct drm_connector_helper_funcs udl_connector_helper_funcs = { +struct drm_connector_helper_funcs udl_connector_helper_funcs = { .get_modes = udl_get_modes, .mode_valid = udl_mode_valid, .best_encoder = udl_best_single_encoder, }; -static struct drm_connector_funcs udl_connector_funcs = { +struct drm_connector_funcs udl_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = udl_detect, .fill_modes = drm_helper_probe_single_connector_modes, @@ -140,7 +138,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder) drm_sysfs_connector_add(connector); drm_mode_connector_attach_encoder(connector, encoder); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.dirty_info_property, 1); return 0; diff --git a/trunk/drivers/gpu/drm/vmwgfx/Makefile b/trunk/drivers/gpu/drm/vmwgfx/Makefile index 2cc6cd91ac11..586869c8c11f 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/Makefile +++ b/trunk/drivers/gpu/drm/vmwgfx/Makefile @@ -5,7 +5,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ - vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ - vmwgfx_surface.o + vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/trunk/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/trunk/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h deleted file mode 100644 index 8369c3ba10fe..000000000000 --- a/trunk/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h +++ /dev/null @@ -1,909 +0,0 @@ -/************************************************************************** - * - * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#ifdef __KERNEL__ - -#include -#define surf_size_struct struct drm_vmw_size - -#else /* __KERNEL__ */ - -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0])) -#endif /* ARRAY_SIZE */ - -#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) -#define max_t(type, x, y) ((x) > (y) ? (x) : (y)) -#define surf_size_struct SVGA3dSize -#define u32 uint32 - -#endif /* __KERNEL__ */ - -#include "svga3d_reg.h" - -/* - * enum svga3d_block_desc describes the active data channels in a block. - * - * There can be at-most four active channels in a block: - * 1. Red, bump W, luminance and depth are stored in the first channel. - * 2. Green, bump V and stencil are stored in the second channel. - * 3. Blue and bump U are stored in the third channel. - * 4. Alpha and bump Q are stored in the fourth channel. - * - * Block channels can be used to store compressed and buffer data: - * 1. For compressed formats, only the data channel is used and its size - * is equal to that of a singular block in the compression scheme. - * 2. For buffer formats, only the data channel is used and its size is - * exactly one byte in length. - * 3. In each case the bit depth represent the size of a singular block. - * - * Note: Compressed and IEEE formats do not use the bitMask structure. - */ - -enum svga3d_block_desc { - SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */ - SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel - data */ - SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel - data */ - SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video - U and V */ - SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel - data */ - SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel - data */ - SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil - channel */ - SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel - data */ - SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel - data */ - SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel - data */ - SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance - data */ - SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */ - SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha - channel */ - SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel - data */ - SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of - data */ - SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of - data depending on the - compression method used */ - SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE - floating point - representation in - all channels */ - SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store - data. */ - SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */ - SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */ - SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */ - SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */ - SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV, - e.g., NV12. */ - SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate - Y, U, V, e.g., YV12. */ - - SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_GREEN, - SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG | - SVGA3DBLOCKDESC_BLUE, - SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB | - SVGA3DBLOCKDESC_ALPHA, - SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U | - SVGA3DBLOCKDESC_V, - SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV | - SVGA3DBLOCKDESC_LUMINANCE, - SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV | - SVGA3DBLOCKDESC_W, - SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW | - SVGA3DBLOCKDESC_ALPHA, - SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U | - SVGA3DBLOCKDESC_V | - SVGA3DBLOCKDESC_W | - SVGA3DBLOCKDESC_Q, - SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE | - SVGA3DBLOCKDESC_ALPHA, - SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_IEEE_FP, - SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP | - SVGA3DBLOCKDESC_GREEN, - SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP | - SVGA3DBLOCKDESC_BLUE, - SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP | - SVGA3DBLOCKDESC_ALPHA, - SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH | - SVGA3DBLOCKDESC_STENCIL, - SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO | - SVGA3DBLOCKDESC_Y, - SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA | - SVGA3DBLOCKDESC_Y | - SVGA3DBLOCKDESC_U_VIDEO | - SVGA3DBLOCKDESC_V_VIDEO, - SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB | - SVGA3DBLOCKDESC_EXP, - SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV | - SVGA3DBLOCKDESC_2PLANAR_YUV, - SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV | - SVGA3DBLOCKDESC_3PLANAR_YUV, -}; - -/* - * SVGA3dSurfaceDesc describes the actual pixel data. - * - * This structure provides the following information: - * 1. Block description. - * 2. Dimensions of a block in the surface. - * 3. Size of block in bytes. - * 4. Bit depth of the pixel data. - * 5. Channel bit depths and masks (if applicable). - */ -#define SVGA3D_CHANNEL_DEF(type) \ - struct { \ - union { \ - type blue; \ - type u; \ - type uv_video; \ - type u_video; \ - }; \ - union { \ - type green; \ - type v; \ - type stencil; \ - type v_video; \ - }; \ - union { \ - type red; \ - type w; \ - type luminance; \ - type y; \ - type depth; \ - type data; \ - }; \ - union { \ - type alpha; \ - type q; \ - type exp; \ - }; \ - } - -struct svga3d_surface_desc { - enum svga3d_block_desc block_desc; - surf_size_struct block_size; - u32 bytes_per_block; - u32 pitch_bytes_per_block; - - struct { - u32 total; - SVGA3D_CHANNEL_DEF(uint8); - } bit_depth; - - struct { - SVGA3D_CHANNEL_DEF(uint8); - } bit_offset; -}; - -static const struct svga3d_surface_desc svga3d_surface_descs[] = { - {SVGA3DBLOCKDESC_NONE, - {1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } }, - {{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } }, - {{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } }, - {{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } }, - {{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } }, - {{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */ - - {SVGA3DBLOCKDESC_LUMINANCE, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */ - - {SVGA3DBLOCKDESC_LA, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } }, - {{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */ - - {SVGA3DBLOCKDESC_LUMINANCE, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */ - - {SVGA3DBLOCKDESC_LA, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } }, - {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } }, - {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */ - - {SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } }, - {{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */ - - {SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } }, - {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */ - - {SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } }, - {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */ - - {SVGA3DBLOCKDESC_RGBA_FP, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */ - - {SVGA3DBLOCKDESC_RGBA_FP, - {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } }, - {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } }, - {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */ - - {SVGA3DBLOCKDESC_UVWQ, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } }, - {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */ - - {SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } }, - {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */ - - {SVGA3DBLOCKDESC_UVWA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */ - - {SVGA3DBLOCKDESC_ALPHA, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */ - - {SVGA3DBLOCKDESC_R_FP, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */ - - {SVGA3DBLOCKDESC_R_FP, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */ - - {SVGA3DBLOCKDESC_RG_FP, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */ - - {SVGA3DBLOCKDESC_RG_FP, - {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */ - - {SVGA3DBLOCKDESC_BUFFER, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } }, - {{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */ - - {SVGA3DBLOCKDESC_YUV, - {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } }, - {{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */ - - {SVGA3DBLOCKDESC_YUV, - {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } }, - {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */ - - {SVGA3DBLOCKDESC_NV12, - {2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */ - - {SVGA3DBLOCKDESC_AYUV, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } }, - {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } }, - {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */ - - {SVGA3DBLOCKDESC_UVWQ, - {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } }, - {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } }, - {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */ - - {SVGA3DBLOCKDESC_RGB_FP, - {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } }, - {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } }, - {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */ - - {SVGA3DBLOCKDESC_UVW, - {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } }, - {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */ - - {SVGA3DBLOCKDESC_UVWQ, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */ - - {SVGA3DBLOCKDESC_UVWQ, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */ - - {SVGA3DBLOCKDESC_R_FP, - {1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */ - - {SVGA3DBLOCKDESC_GREEN, - {1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */ - - {SVGA3DBLOCKDESC_RGB_FP, - {1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } }, - {{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */ - - {SVGA3DBLOCKDESC_RGBA_SRGB, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */ - - {SVGA3DBLOCKDESC_RG_FP, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */ - - {SVGA3DBLOCKDESC_GREEN, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */ - - {SVGA3DBLOCKDESC_U, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */ - - {SVGA3DBLOCKDESC_U, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */ - - {SVGA3DBLOCKDESC_U, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */ - - {SVGA3DBLOCKDESC_U, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */ - - {SVGA3DBLOCKDESC_RED, - {8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */ - - {SVGA3DBLOCKDESC_RGBE, - {1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } }, - {{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED_SRGB, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED_SRGB, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED_SRGB, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA_SRGB, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */ - - {SVGA3DBLOCKDESC_RGB_SRGB, - {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */ -}; - -static inline u32 clamped_umul32(u32 a, u32 b) -{ - uint64_t tmp = (uint64_t) a*b; - return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; -} - -static inline const struct svga3d_surface_desc * -svga3dsurface_get_desc(SVGA3dSurfaceFormat format) -{ - if (format < ARRAY_SIZE(svga3d_surface_descs)) - return &svga3d_surface_descs[format]; - - return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID]; -} - -/* - *---------------------------------------------------------------------- - * - * svga3dsurface_get_mip_size -- - * - * Given a base level size and the mip level, compute the size of - * the mip level. - * - * Results: - * See above. - * - * Side effects: - * None. - * - *---------------------------------------------------------------------- - */ - -static inline surf_size_struct -svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level) -{ - surf_size_struct size; - - size.width = max_t(u32, base_level.width >> mip_level, 1); - size.height = max_t(u32, base_level.height >> mip_level, 1); - size.depth = max_t(u32, base_level.depth >> mip_level, 1); - return size; -} - -static inline void -svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc, - const surf_size_struct *pixel_size, - surf_size_struct *block_size) -{ - block_size->width = DIV_ROUND_UP(pixel_size->width, - desc->block_size.width); - block_size->height = DIV_ROUND_UP(pixel_size->height, - desc->block_size.height); - block_size->depth = DIV_ROUND_UP(pixel_size->depth, - desc->block_size.depth); -} - -static inline bool -svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc) -{ - return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0; -} - -static inline u32 -svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc, - const surf_size_struct *size) -{ - u32 pitch; - surf_size_struct blocks; - - svga3dsurface_get_size_in_blocks(desc, size, &blocks); - - pitch = blocks.width * desc->pitch_bytes_per_block; - - return pitch; -} - -/* - *----------------------------------------------------------------------------- - * - * svga3dsurface_get_image_buffer_size -- - * - * Return the number of bytes of buffer space required to store - * one image of a surface, optionally using the specified pitch. - * - * If pitch is zero, it is assumed that rows are tightly packed. - * - * This function is overflow-safe. If the result would have - * overflowed, instead we return MAX_UINT32. - * - * Results: - * Byte count. - * - * Side effects: - * None. - * - *----------------------------------------------------------------------------- - */ - -static inline u32 -svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc, - const surf_size_struct *size, - u32 pitch) -{ - surf_size_struct image_blocks; - u32 slice_size, total_size; - - svga3dsurface_get_size_in_blocks(desc, size, &image_blocks); - - if (svga3dsurface_is_planar_surface(desc)) { - total_size = clamped_umul32(image_blocks.width, - image_blocks.height); - total_size = clamped_umul32(total_size, image_blocks.depth); - total_size = clamped_umul32(total_size, desc->bytes_per_block); - return total_size; - } - - if (pitch == 0) - pitch = svga3dsurface_calculate_pitch(desc, size); - - slice_size = clamped_umul32(image_blocks.height, pitch); - total_size = clamped_umul32(slice_size, image_blocks.depth); - - return total_size; -} - -static inline u32 -svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, - surf_size_struct base_level_size, - u32 num_mip_levels, - bool cubemap) -{ - const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); - u32 total_size = 0; - u32 mip; - - for (mip = 0; mip < num_mip_levels; mip++) { - surf_size_struct size = - svga3dsurface_get_mip_size(base_level_size, mip); - total_size += svga3dsurface_get_image_buffer_size(desc, - &size, 0); - } - - if (cubemap) - total_size *= SVGA3D_MAX_SURFACE_FACES; - - return total_size; -} - - -/** - * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel - * in an image (or volume). - * - * @width: The image width in pixels. - * @height: The image height in pixels - */ -static inline u32 -svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format, - u32 width, u32 height, - u32 x, u32 y, u32 z) -{ - const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); - const u32 bw = desc->block_size.width, bh = desc->block_size.height; - const u32 bd = desc->block_size.depth; - const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block; - const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride; - const u32 offset = (z / bd * imgstride + - y / bh * rowstride + - x / bw * desc->bytes_per_block); - return offset; -} - - -static inline u32 -svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format, - surf_size_struct baseLevelSize, - u32 numMipLevels, - u32 face, - u32 mip) - -{ - u32 offset; - u32 mipChainBytes; - u32 mipChainBytesToLevel; - u32 i; - const struct svga3d_surface_desc *desc; - surf_size_struct mipSize; - u32 bytes; - - desc = svga3dsurface_get_desc(format); - - mipChainBytes = 0; - mipChainBytesToLevel = 0; - for (i = 0; i < numMipLevels; i++) { - mipSize = svga3dsurface_get_mip_size(baseLevelSize, i); - bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0); - mipChainBytes += bytes; - if (i < mip) - mipChainBytesToLevel += bytes; - } - - offset = mipChainBytes * face + mipChainBytesToLevel; - - return offset; -} diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 96dc84dc34d0..9826fbc88154 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -248,12 +248,13 @@ void vmw_evict_flags(struct ttm_buffer_object *bo, *placement = vmw_sys_placement; } +/** + * FIXME: Proper access checks on buffers. + */ + static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) { - struct ttm_object_file *tfile = - vmw_fpriv((struct drm_file *)filp->private_data)->tfile; - - return vmw_user_dmabuf_verify_access(bo, tfile); + return 0; } static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) @@ -309,23 +310,27 @@ static void vmw_sync_obj_unref(void **sync_obj) vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj); } -static int vmw_sync_obj_flush(void *sync_obj) +static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg) { vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj); return 0; } -static bool vmw_sync_obj_signaled(void *sync_obj) +static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg) { + unsigned long flags = (unsigned long) sync_arg; return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj, - DRM_VMW_FENCE_FLAG_EXEC); + (uint32_t) flags); } -static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) +static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, + bool lazy, bool interruptible) { + unsigned long flags = (unsigned long) sync_arg; + return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, - DRM_VMW_FENCE_FLAG_EXEC, + (uint32_t) flags, lazy, interruptible, VMW_FENCE_WAIT_TIMEOUT); } diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_context.c deleted file mode 100644 index 00ae0925aca8..000000000000 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ /dev/null @@ -1,274 +0,0 @@ -/************************************************************************** - * - * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#include "vmwgfx_drv.h" -#include "vmwgfx_resource_priv.h" -#include "ttm/ttm_placement.h" - -struct vmw_user_context { - struct ttm_base_object base; - struct vmw_resource res; -}; - -static void vmw_user_context_free(struct vmw_resource *res); -static struct vmw_resource * -vmw_user_context_base_to_res(struct ttm_base_object *base); - -static uint64_t vmw_user_context_size; - -static const struct vmw_user_resource_conv user_context_conv = { - .object_type = VMW_RES_CONTEXT, - .base_obj_to_res = vmw_user_context_base_to_res, - .res_free = vmw_user_context_free -}; - -const struct vmw_user_resource_conv *user_context_converter = - &user_context_conv; - - -static const struct vmw_res_func vmw_legacy_context_func = { - .res_type = vmw_res_context, - .needs_backup = false, - .may_evict = false, - .type_name = "legacy contexts", - .backup_placement = NULL, - .create = NULL, - .destroy = NULL, - .bind = NULL, - .unbind = NULL -}; - -/** - * Context management: - */ - -static void vmw_hw_context_destroy(struct vmw_resource *res) -{ - - struct vmw_private *dev_priv = res->dev_priv; - struct { - SVGA3dCmdHeader header; - SVGA3dCmdDestroyContext body; - } *cmd; - - - vmw_execbuf_release_pinned_bo(dev_priv); - cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed reserving FIFO space for surface " - "destruction.\n"); - return; - } - - cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); - cmd->header.size = cpu_to_le32(sizeof(cmd->body)); - cmd->body.cid = cpu_to_le32(res->id); - - vmw_fifo_commit(dev_priv, sizeof(*cmd)); - vmw_3d_resource_dec(dev_priv, false); -} - -static int vmw_context_init(struct vmw_private *dev_priv, - struct vmw_resource *res, - void (*res_free) (struct vmw_resource *res)) -{ - int ret; - - struct { - SVGA3dCmdHeader header; - SVGA3dCmdDefineContext body; - } *cmd; - - ret = vmw_resource_init(dev_priv, res, false, - res_free, &vmw_legacy_context_func); - - if (unlikely(ret != 0)) { - DRM_ERROR("Failed to allocate a resource id.\n"); - goto out_early; - } - - if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) { - DRM_ERROR("Out of hw context ids.\n"); - vmw_resource_unreference(&res); - return -ENOMEM; - } - - cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Fifo reserve failed.\n"); - vmw_resource_unreference(&res); - return -ENOMEM; - } - - cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); - cmd->header.size = cpu_to_le32(sizeof(cmd->body)); - cmd->body.cid = cpu_to_le32(res->id); - - vmw_fifo_commit(dev_priv, sizeof(*cmd)); - (void) vmw_3d_resource_inc(dev_priv, false); - vmw_resource_activate(res, vmw_hw_context_destroy); - return 0; - -out_early: - if (res_free == NULL) - kfree(res); - else - res_free(res); - return ret; -} - -struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) -{ - struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); - int ret; - - if (unlikely(res == NULL)) - return NULL; - - ret = vmw_context_init(dev_priv, res, NULL); - - return (ret == 0) ? res : NULL; -} - -/** - * User-space context management: - */ - -static struct vmw_resource * -vmw_user_context_base_to_res(struct ttm_base_object *base) -{ - return &(container_of(base, struct vmw_user_context, base)->res); -} - -static void vmw_user_context_free(struct vmw_resource *res) -{ - struct vmw_user_context *ctx = - container_of(res, struct vmw_user_context, res); - struct vmw_private *dev_priv = res->dev_priv; - - ttm_base_object_kfree(ctx, base); - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_context_size); -} - -/** - * This function is called when user space has no more references on the - * base object. It releases the base-object's reference on the resource object. - */ - -static void vmw_user_context_base_release(struct ttm_base_object **p_base) -{ - struct ttm_base_object *base = *p_base; - struct vmw_user_context *ctx = - container_of(base, struct vmw_user_context, base); - struct vmw_resource *res = &ctx->res; - - *p_base = NULL; - vmw_resource_unreference(&res); -} - -int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - - return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); -} - -int vmw_context_define_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct vmw_private *dev_priv = vmw_priv(dev); - struct vmw_user_context *ctx; - struct vmw_resource *res; - struct vmw_resource *tmp; - struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct vmw_master *vmaster = vmw_master(file_priv->master); - int ret; - - - /* - * Approximate idr memory usage with 128 bytes. It will be limited - * by maximum number_of contexts anyway. - */ - - if (unlikely(vmw_user_context_size == 0)) - vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; - - ret = ttm_read_lock(&vmaster->lock, true); - if (unlikely(ret != 0)) - return ret; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_user_context_size, - false, true); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for context" - " creation.\n"); - goto out_unlock; - } - - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (unlikely(ctx == NULL)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_context_size); - ret = -ENOMEM; - goto out_unlock; - } - - res = &ctx->res; - ctx->base.shareable = false; - ctx->base.tfile = NULL; - - /* - * From here on, the destructor takes over resource freeing. - */ - - ret = vmw_context_init(dev_priv, res, vmw_user_context_free); - if (unlikely(ret != 0)) - goto out_unlock; - - tmp = vmw_resource_reference(&ctx->res); - ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, - &vmw_user_context_base_release, NULL); - - if (unlikely(ret != 0)) { - vmw_resource_unreference(&tmp); - goto out_err; - } - - arg->cid = ctx->base.hash.key; -out_err: - vmw_resource_unreference(&res); -out_unlock: - ttm_read_unlock(&vmaster->lock); - return ret; - -} diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c index 655d57f188d9..d1498bfd7873 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c @@ -60,7 +60,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - vmw_execbuf_release_pinned_bo(dev_priv); + vmw_execbuf_release_pinned_bo(dev_priv, false, 0); ret = ttm_bo_reserve(bo, interruptible, false, false, 0); if (unlikely(ret != 0)) @@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, return ret; if (pin) - vmw_execbuf_release_pinned_bo(dev_priv); + vmw_execbuf_release_pinned_bo(dev_priv, false, 0); ret = ttm_bo_reserve(bo, interruptible, false, false, 0); if (unlikely(ret != 0)) @@ -214,7 +214,8 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, return ret; if (pin) - vmw_execbuf_release_pinned_bo(dev_priv); + vmw_execbuf_release_pinned_bo(dev_priv, false, 0); + ret = ttm_bo_reserve(bo, interruptible, false, false, 0); if (unlikely(ret != 0)) goto err_unlock; @@ -303,9 +304,9 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) uint32_t old_mem_type = bo->mem.mem_type; int ret; - BUG_ON(!ttm_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->reserved)); BUG_ON(old_mem_type != TTM_PL_VRAM && - old_mem_type != VMW_PL_FLAG_GMR); + old_mem_type != VMW_PL_GMR); pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED; if (pin) diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 91581fd5004b..2dd185e42f21 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -292,7 +292,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) PAGE_SIZE, ttm_bo_type_device, &vmw_vram_sys_placement, - 0, false, NULL, + 0, 0, false, NULL, &dev_priv->dummy_query_bo); } @@ -432,7 +432,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) struct vmw_private *dev_priv; int ret; uint32_t svga_id; - enum vmw_res_type i; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (unlikely(dev_priv == NULL)) { @@ -449,18 +448,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->release_mutex); rwlock_init(&dev_priv->resource_lock); - - for (i = vmw_res_context; i < vmw_res_max; ++i) { - idr_init(&dev_priv->res_idr[i]); - INIT_LIST_HEAD(&dev_priv->res_lru[i]); - } - + idr_init(&dev_priv->context_idr); + idr_init(&dev_priv->surface_idr); + idr_init(&dev_priv->stream_idr); mutex_init(&dev_priv->init_mutex); init_waitqueue_head(&dev_priv->fence_queue); init_waitqueue_head(&dev_priv->fifo_queue); dev_priv->fence_queue_waiters = 0; atomic_set(&dev_priv->fifo_queue_waiters, 0); - + INIT_LIST_HEAD(&dev_priv->surface_lru); dev_priv->used_memory_size = 0; dev_priv->io_start = pci_resource_start(dev->pdev, 0); @@ -613,18 +609,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) } } - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { - ret = drm_irq_install(dev); - if (ret != 0) { - DRM_ERROR("Failed installing irq: %d\n", ret); - goto out_no_irq; - } - } - dev_priv->fman = vmw_fence_manager_init(dev_priv); if (unlikely(dev_priv->fman == NULL)) goto out_no_fman; + /* Need to start the fifo to check if we can do screen objects */ + ret = vmw_3d_resource_inc(dev_priv, true); + if (unlikely(ret != 0)) + goto out_no_fifo; vmw_kms_save_vga(dev_priv); /* Start kms and overlay systems, needs fifo. */ @@ -633,11 +625,25 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_no_kms; vmw_overlay_init(dev_priv); + /* 3D Depends on Screen Objects being used. */ + DRM_INFO("Detected %sdevice 3D availability.\n", + vmw_fifo_have_3d(dev_priv) ? + "" : "no "); + + /* We might be done with the fifo now */ if (dev_priv->enable_fb) { - ret = vmw_3d_resource_inc(dev_priv, true); - if (unlikely(ret != 0)) - goto out_no_fifo; vmw_fb_init(dev_priv); + } else { + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv, true); + } + + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { + ret = drm_irq_install(dev); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed installing irq: %d\n", ret); + goto out_no_irq; + } } dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; @@ -645,16 +651,20 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) return 0; -out_no_fifo: +out_no_irq: + if (dev_priv->enable_fb) + vmw_fb_close(dev_priv); vmw_overlay_close(dev_priv); vmw_kms_close(dev_priv); out_no_kms: - vmw_kms_restore_vga(dev_priv); + /* We still have a 3D resource reference held */ + if (dev_priv->enable_fb) { + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv, false); + } +out_no_fifo: vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) - drm_irq_uninstall(dev_priv->dev); -out_no_irq: if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else @@ -674,9 +684,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) out_err1: vmw_ttm_global_release(dev_priv); out_err0: - for (i = vmw_res_context; i < vmw_res_max; ++i) - idr_destroy(&dev_priv->res_idr[i]); - + idr_destroy(&dev_priv->surface_idr); + idr_destroy(&dev_priv->context_idr); + idr_destroy(&dev_priv->stream_idr); kfree(dev_priv); return ret; } @@ -684,14 +694,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) static int vmw_driver_unload(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); - enum vmw_res_type i; unregister_pm_notifier(&dev_priv->pm_nb); - if (dev_priv->ctx.res_ht_initialized) - drm_ht_remove(&dev_priv->ctx.res_ht); if (dev_priv->ctx.cmd_bounce) vfree(dev_priv->ctx.cmd_bounce); + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) + drm_irq_uninstall(dev_priv->dev); if (dev_priv->enable_fb) { vmw_fb_close(dev_priv); vmw_kms_restore_vga(dev_priv); @@ -700,8 +709,6 @@ static int vmw_driver_unload(struct drm_device *dev) vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); vmw_fence_manager_takedown(dev_priv->fman); - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) - drm_irq_uninstall(dev_priv->dev); if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else @@ -716,9 +723,9 @@ static int vmw_driver_unload(struct drm_device *dev) (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); (void)ttm_bo_device_release(&dev_priv->bdev); vmw_ttm_global_release(dev_priv); - - for (i = vmw_res_context; i < vmw_res_max; ++i) - idr_destroy(&dev_priv->res_idr[i]); + idr_destroy(&dev_priv->surface_idr); + idr_destroy(&dev_priv->context_idr); + idr_destroy(&dev_priv->stream_idr); kfree(dev_priv); @@ -917,11 +924,11 @@ static int vmw_master_set(struct drm_device *dev, out_no_active_lock: if (!dev_priv->enable_fb) { - vmw_kms_restore_vga(dev_priv); - vmw_3d_resource_dec(dev_priv, true); mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); mutex_unlock(&dev_priv->hw_mutex); + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv, true); } return ret; } @@ -942,7 +949,7 @@ static void vmw_master_drop(struct drm_device *dev, vmw_fp->locked_master = drm_master_get(file_priv->master); ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); - vmw_execbuf_release_pinned_bo(dev_priv); + vmw_execbuf_release_pinned_bo(dev_priv, false, 0); if (unlikely((ret != 0))) { DRM_ERROR("Unable to lock TTM at VT switch.\n"); @@ -955,11 +962,11 @@ static void vmw_master_drop(struct drm_device *dev, ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); if (unlikely(ret != 0)) DRM_ERROR("Unable to clean VRAM on master drop.\n"); - vmw_kms_restore_vga(dev_priv); - vmw_3d_resource_dec(dev_priv, true); mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); mutex_unlock(&dev_priv->hw_mutex); + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv, true); } dev_priv->active_master = &dev_priv->fbdev_master; @@ -994,8 +1001,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, * This empties VRAM and unbinds all GMR bindings. * Buffer contents is moved to swappable memory. */ - vmw_execbuf_release_pinned_bo(dev_priv); - vmw_resource_evict_all(dev_priv); + vmw_execbuf_release_pinned_bo(dev_priv, false, 0); ttm_bo_swapout_all(&dev_priv->bdev); break; @@ -1092,6 +1098,11 @@ static void vmw_pm_complete(struct device *kdev) struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); + mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); + (void) vmw_read(dev_priv, SVGA_REG_ID); + mutex_unlock(&dev_priv->hw_mutex); + /** * Reclaim 3d reference held by fbdev and potentially * start fifo. diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 13aeda71280e..88a179e26de9 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -67,46 +67,31 @@ struct vmw_fpriv { struct vmw_dma_buffer { struct ttm_buffer_object base; - struct list_head res_list; + struct list_head validate_list; + bool gmr_bound; + uint32_t cur_validate_node; + bool on_validate_list; }; -/** - * struct vmw_validate_buffer - Carries validation info about buffers. - * - * @base: Validation info for TTM. - * @hash: Hash entry for quick lookup of the TTM buffer object. - * - * This structure contains also driver private validation info - * on top of the info needed by TTM. - */ -struct vmw_validate_buffer { - struct ttm_validate_buffer base; - struct drm_hash_item hash; -}; - -struct vmw_res_func; struct vmw_resource { struct kref kref; struct vmw_private *dev_priv; + struct idr *idr; int id; + enum ttm_object_type res_type; bool avail; - unsigned long backup_size; - bool res_dirty; /* Protected by backup buffer reserved */ - bool backup_dirty; /* Protected by backup buffer reserved */ - struct vmw_dma_buffer *backup; - unsigned long backup_offset; - const struct vmw_res_func *func; - struct list_head lru_head; /* Protected by the resource lock */ - struct list_head mob_head; /* Protected by @backup reserved */ - void (*res_free) (struct vmw_resource *res); + void (*remove_from_lists) (struct vmw_resource *res); void (*hw_destroy) (struct vmw_resource *res); -}; - -enum vmw_res_type { - vmw_res_context, - vmw_res_surface, - vmw_res_stream, - vmw_res_max + void (*res_free) (struct vmw_resource *res); + struct list_head validate_head; + struct list_head query_head; /* Protected by the cmdbuf mutex */ + /* TODO is a generic snooper needed? */ +#if 0 + void (*snoop)(struct vmw_resource *res, + struct ttm_object_file *tfile, + SVGA3dCmdHeader *header); + void *snoop_priv; +#endif }; struct vmw_cursor_snooper { @@ -120,18 +105,20 @@ struct vmw_surface_offset; struct vmw_surface { struct vmw_resource res; + struct list_head lru_head; /* Protected by the resource lock */ uint32_t flags; uint32_t format; uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; - struct drm_vmw_size base_size; struct drm_vmw_size *sizes; uint32_t num_sizes; + bool scanout; + /* TODO so far just a extra pointer */ struct vmw_cursor_snooper snooper; + struct ttm_buffer_object *backup; struct vmw_surface_offset *offsets; - SVGA3dTextureFilter autogen_filter; - uint32_t multisample_count; + uint32_t backup_size; }; struct vmw_marker_queue { @@ -158,46 +145,29 @@ struct vmw_relocation { uint32_t index; }; -/** - * struct vmw_res_cache_entry - resource information cache entry - * - * @valid: Whether the entry is valid, which also implies that the execbuf - * code holds a reference to the resource, and it's placed on the - * validation list. - * @handle: User-space handle of a resource. - * @res: Non-ref-counted pointer to the resource. - * - * Used to avoid frequent repeated user-space handle lookups of the - * same resource. - */ -struct vmw_res_cache_entry { - bool valid; - uint32_t handle; - struct vmw_resource *res; - struct vmw_resource_val_node *node; -}; - struct vmw_sw_context{ - struct drm_open_hash res_ht; - bool res_ht_initialized; + struct ida bo_list; + uint32_t last_cid; + bool cid_valid; bool kernel; /**< is the called made from the kernel */ + struct vmw_resource *cur_ctx; + uint32_t last_sid; + uint32_t sid_translation; + bool sid_valid; struct ttm_object_file *tfile; struct list_head validate_nodes; struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; uint32_t cur_reloc; - struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; + struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; uint32_t cur_val_buf; uint32_t *cmd_bounce; uint32_t cmd_bounce_size; struct list_head resource_list; uint32_t fence_flags; + struct list_head query_list; struct ttm_buffer_object *cur_query_bo; - struct list_head res_relocations; - uint32_t *buf_start; - struct vmw_res_cache_entry res_cache[vmw_res_max]; - struct vmw_resource *last_query_ctx; - bool needs_post_query_barrier; - struct vmw_resource *error_resource; + uint32_t cur_query_cid; + bool query_cid_valid; }; struct vmw_legacy_display; @@ -272,7 +242,10 @@ struct vmw_private { */ rwlock_t resource_lock; - struct idr res_idr[vmw_res_max]; + struct idr context_idr; + struct idr surface_idr; + struct idr stream_idr; + /* * Block lastclose from racing with firstopen. */ @@ -347,7 +320,6 @@ struct vmw_private { struct ttm_buffer_object *dummy_query_bo; struct ttm_buffer_object *pinned_bo; uint32_t query_cid; - uint32_t query_cid_valid; bool dummy_query_bo_pinned; /* @@ -357,15 +329,10 @@ struct vmw_private { * protected by the cmdbuf mutex for simplicity. */ - struct list_head res_lru[vmw_res_max]; + struct list_head surface_lru; uint32_t used_memory_size; }; -static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) -{ - return container_of(res, struct vmw_surface, res); -} - static inline struct vmw_private *vmw_priv(struct drm_device *dev) { return (struct vmw_private *)dev->dev_private; @@ -414,16 +381,10 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); /** * Resource utilities - vmwgfx_resource.c */ -struct vmw_user_resource_conv; -extern const struct vmw_user_resource_conv *user_surface_converter; -extern const struct vmw_user_resource_conv *user_context_converter; extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); extern void vmw_resource_unreference(struct vmw_resource **p_res); extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); -extern int vmw_resource_validate(struct vmw_resource *res); -extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); -extern bool vmw_resource_needs_backup(const struct vmw_resource *res); extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, @@ -437,13 +398,14 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, uint32_t handle, struct vmw_surface **out_surf, struct vmw_dma_buffer **out_buf); -extern int vmw_user_resource_lookup_handle( - struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t handle, - const struct vmw_user_resource_conv *converter, - struct vmw_resource **p_res); extern void vmw_surface_res_free(struct vmw_resource *res); +extern int vmw_surface_init(struct vmw_private *dev_priv, + struct vmw_surface *srf, + void (*res_free) (struct vmw_resource *res)); +extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t handle, + struct vmw_surface **out); extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, @@ -461,8 +423,6 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv, size_t size, struct ttm_placement *placement, bool interuptable, void (*bo_free) (struct ttm_buffer_object *bo)); -extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, - struct ttm_object_file *tfile); extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, @@ -480,14 +440,7 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, struct ttm_object_file *tfile, uint32_t *inout_id, struct vmw_resource **out); -extern void vmw_resource_unreserve(struct vmw_resource *res, - struct vmw_dma_buffer *new_backup, - unsigned long new_backup_offset); -extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); -extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, - struct vmw_fence_obj *fence); -extern void vmw_resource_evict_all(struct vmw_private *dev_priv); +extern void vmw_resource_unreserve(struct list_head *list); /** * DMA buffer helper routines - vmwgfx_dmabuf.c @@ -585,9 +538,10 @@ extern int vmw_execbuf_process(struct drm_file *file_priv, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj **out_fence); -extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, - struct vmw_fence_obj *fence); -extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); + +extern void +vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, + bool only_on_cid_match, uint32_t cid); extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, struct vmw_private *dev_priv, @@ -745,13 +699,10 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) { struct vmw_dma_buffer *tmp_buf = *buf; - + struct ttm_buffer_object *bo = &tmp_buf->base; *buf = NULL; - if (tmp_buf != NULL) { - struct ttm_buffer_object *bo = &tmp_buf->base; - ttm_bo_unref(&bo); - } + ttm_bo_unref(&bo); } static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 534c96703c3f..30654b4cc972 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -30,181 +30,6 @@ #include #include -#define VMW_RES_HT_ORDER 12 - -/** - * struct vmw_resource_relocation - Relocation info for resources - * - * @head: List head for the software context's relocation list. - * @res: Non-ref-counted pointer to the resource. - * @offset: Offset of 4 byte entries into the command buffer where the - * id that needs fixup is located. - */ -struct vmw_resource_relocation { - struct list_head head; - const struct vmw_resource *res; - unsigned long offset; -}; - -/** - * struct vmw_resource_val_node - Validation info for resources - * - * @head: List head for the software context's resource list. - * @hash: Hash entry for quick resouce to val_node lookup. - * @res: Ref-counted pointer to the resource. - * @switch_backup: Boolean whether to switch backup buffer on unreserve. - * @new_backup: Refcounted pointer to the new backup buffer. - * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. - * @first_usage: Set to true the first time the resource is referenced in - * the command stream. - * @no_buffer_needed: Resources do not need to allocate buffer backup on - * reservation. The command stream will provide one. - */ -struct vmw_resource_val_node { - struct list_head head; - struct drm_hash_item hash; - struct vmw_resource *res; - struct vmw_dma_buffer *new_backup; - unsigned long new_backup_offset; - bool first_usage; - bool no_buffer_needed; -}; - -/** - * vmw_resource_unreserve - unreserve resources previously reserved for - * command submission. - * - * @list_head: list of resources to unreserve. - * @backoff: Whether command submission failed. - */ -static void vmw_resource_list_unreserve(struct list_head *list, - bool backoff) -{ - struct vmw_resource_val_node *val; - - list_for_each_entry(val, list, head) { - struct vmw_resource *res = val->res; - struct vmw_dma_buffer *new_backup = - backoff ? NULL : val->new_backup; - - vmw_resource_unreserve(res, new_backup, - val->new_backup_offset); - vmw_dmabuf_unreference(&val->new_backup); - } -} - - -/** - * vmw_resource_val_add - Add a resource to the software context's - * resource list if it's not already on it. - * - * @sw_context: Pointer to the software context. - * @res: Pointer to the resource. - * @p_node On successful return points to a valid pointer to a - * struct vmw_resource_val_node, if non-NULL on entry. - */ -static int vmw_resource_val_add(struct vmw_sw_context *sw_context, - struct vmw_resource *res, - struct vmw_resource_val_node **p_node) -{ - struct vmw_resource_val_node *node; - struct drm_hash_item *hash; - int ret; - - if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, - &hash) == 0)) { - node = container_of(hash, struct vmw_resource_val_node, hash); - node->first_usage = false; - if (unlikely(p_node != NULL)) - *p_node = node; - return 0; - } - - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (unlikely(node == NULL)) { - DRM_ERROR("Failed to allocate a resource validation " - "entry.\n"); - return -ENOMEM; - } - - node->hash.key = (unsigned long) res; - ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed to initialize a resource validation " - "entry.\n"); - kfree(node); - return ret; - } - list_add_tail(&node->head, &sw_context->resource_list); - node->res = vmw_resource_reference(res); - node->first_usage = true; - - if (unlikely(p_node != NULL)) - *p_node = node; - - return 0; -} - -/** - * vmw_resource_relocation_add - Add a relocation to the relocation list - * - * @list: Pointer to head of relocation list. - * @res: The resource. - * @offset: Offset into the command buffer currently being parsed where the - * id that needs fixup is located. Granularity is 4 bytes. - */ -static int vmw_resource_relocation_add(struct list_head *list, - const struct vmw_resource *res, - unsigned long offset) -{ - struct vmw_resource_relocation *rel; - - rel = kmalloc(sizeof(*rel), GFP_KERNEL); - if (unlikely(rel == NULL)) { - DRM_ERROR("Failed to allocate a resource relocation.\n"); - return -ENOMEM; - } - - rel->res = res; - rel->offset = offset; - list_add_tail(&rel->head, list); - - return 0; -} - -/** - * vmw_resource_relocations_free - Free all relocations on a list - * - * @list: Pointer to the head of the relocation list. - */ -static void vmw_resource_relocations_free(struct list_head *list) -{ - struct vmw_resource_relocation *rel, *n; - - list_for_each_entry_safe(rel, n, list, head) { - list_del(&rel->head); - kfree(rel); - } -} - -/** - * vmw_resource_relocations_apply - Apply all relocations on a list - * - * @cb: Pointer to the start of the command buffer bein patch. This need - * not be the same buffer as the one being parsed when the relocation - * list was built, but the contents must be the same modulo the - * resource ids. - * @list: Pointer to the head of the relocation list. - */ -static void vmw_resource_relocations_apply(uint32_t *cb, - struct list_head *list) -{ - struct vmw_resource_relocation *rel; - - list_for_each_entry(rel, list, head) - cb[rel->offset] = rel->res->id; -} - static int vmw_cmd_invalid(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) @@ -219,11 +44,25 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, return 0; } +static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context, + struct vmw_resource **p_res) +{ + struct vmw_resource *res = *p_res; + + if (list_empty(&res->validate_head)) { + list_add_tail(&res->validate_head, &sw_context->resource_list); + *p_res = NULL; + } else + vmw_resource_unreference(p_res); +} + /** * vmw_bo_to_validate_list - add a bo to a validate list * * @sw_context: The software context used for this command submission batch. * @bo: The buffer object to add. + * @fence_flags: Fence flags to be or'ed with any other fence flags for + * this buffer on this submission batch. * @p_val_node: If non-NULL Will be updated with the validate node number * on return. * @@ -232,43 +71,31 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, */ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, struct ttm_buffer_object *bo, + uint32_t fence_flags, uint32_t *p_val_node) { uint32_t val_node; - struct vmw_validate_buffer *vval_buf; struct ttm_validate_buffer *val_buf; - struct drm_hash_item *hash; - int ret; - if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, - &hash) == 0)) { - vval_buf = container_of(hash, struct vmw_validate_buffer, - hash); - val_buf = &vval_buf->base; - val_node = vval_buf - sw_context->val_bufs; - } else { - val_node = sw_context->cur_val_buf; - if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { - DRM_ERROR("Max number of DMA buffers per submission " - "exceeded.\n"); - return -EINVAL; - } - vval_buf = &sw_context->val_bufs[val_node]; - vval_buf->hash.key = (unsigned long) bo; - ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed to initialize a buffer validation " - "entry.\n"); - return ret; - } - ++sw_context->cur_val_buf; - val_buf = &vval_buf->base; + val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); + + if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { + DRM_ERROR("Max number of DMA buffers per submission" + " exceeded.\n"); + return -EINVAL; + } + + val_buf = &sw_context->val_bufs[val_node]; + if (unlikely(val_node == sw_context->cur_val_buf)) { + val_buf->new_sync_obj_arg = NULL; val_buf->bo = ttm_bo_reference(bo); - val_buf->reserved = false; list_add_tail(&val_buf->head, &sw_context->validate_nodes); + ++sw_context->cur_val_buf; } - sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; + val_buf->new_sync_obj_arg = (void *) + ((unsigned long) val_buf->new_sync_obj_arg | fence_flags); + sw_context->fence_flags |= fence_flags; if (p_val_node) *p_val_node = val_node; @@ -276,174 +103,85 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, return 0; } -/** - * vmw_resources_reserve - Reserve all resources on the sw_context's - * resource list. - * - * @sw_context: Pointer to the software context. - * - * Note that since vmware's command submission currently is protected by - * the cmdbuf mutex, no fancy deadlock avoidance is required for resources, - * since only a single thread at once will attempt this. - */ -static int vmw_resources_reserve(struct vmw_sw_context *sw_context) +static int vmw_cmd_cid_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) { - struct vmw_resource_val_node *val; - int ret; - - list_for_each_entry(val, &sw_context->resource_list, head) { - struct vmw_resource *res = val->res; - - ret = vmw_resource_reserve(res, val->no_buffer_needed); - if (unlikely(ret != 0)) - return ret; + struct vmw_resource *ctx; - if (res->backup) { - struct ttm_buffer_object *bo = &res->backup->base; + struct vmw_cid_cmd { + SVGA3dCmdHeader header; + __le32 cid; + } *cmd; + int ret; - ret = vmw_bo_to_validate_list - (sw_context, bo, NULL); + cmd = container_of(header, struct vmw_cid_cmd, header); + if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid)) + return 0; - if (unlikely(ret != 0)) - return ret; - } + ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid, + &ctx); + if (unlikely(ret != 0)) { + DRM_ERROR("Could not find or use context %u\n", + (unsigned) cmd->cid); + return ret; } - return 0; -} -/** - * vmw_resources_validate - Validate all resources on the sw_context's - * resource list. - * - * @sw_context: Pointer to the software context. - * - * Before this function is called, all resource backup buffers must have - * been validated. - */ -static int vmw_resources_validate(struct vmw_sw_context *sw_context) -{ - struct vmw_resource_val_node *val; - int ret; - - list_for_each_entry(val, &sw_context->resource_list, head) { - struct vmw_resource *res = val->res; + sw_context->last_cid = cmd->cid; + sw_context->cid_valid = true; + sw_context->cur_ctx = ctx; + vmw_resource_to_validate_list(sw_context, &ctx); - ret = vmw_resource_validate(res); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Failed to validate resource.\n"); - return ret; - } - } return 0; } -/** - * vmw_cmd_res_check - Check that a resource is present and if so, put it - * on the resource validate list unless it's already there. - * - * @dev_priv: Pointer to a device private structure. - * @sw_context: Pointer to the software context. - * @res_type: Resource type. - * @converter: User-space visisble type specific information. - * @id: Pointer to the location in the command buffer currently being - * parsed from where the user-space resource id handle is located. - */ -static int vmw_cmd_res_check(struct vmw_private *dev_priv, +static int vmw_cmd_sid_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, - enum vmw_res_type res_type, - const struct vmw_user_resource_conv *converter, - uint32_t *id, - struct vmw_resource_val_node **p_val) + uint32_t *sid) { - struct vmw_res_cache_entry *rcache = - &sw_context->res_cache[res_type]; - struct vmw_resource *res; - struct vmw_resource_val_node *node; + struct vmw_surface *srf; int ret; + struct vmw_resource *res; - if (*id == SVGA3D_INVALID_ID) + if (*sid == SVGA3D_INVALID_ID) return 0; - /* - * Fastpath in case of repeated commands referencing the same - * resource - */ - - if (likely(rcache->valid && *id == rcache->handle)) { - const struct vmw_resource *res = rcache->res; - - rcache->node->first_usage = false; - if (p_val) - *p_val = rcache->node; - - return vmw_resource_relocation_add - (&sw_context->res_relocations, res, - id - sw_context->buf_start); + if (likely((sw_context->sid_valid && + *sid == sw_context->last_sid))) { + *sid = sw_context->sid_translation; + return 0; } - ret = vmw_user_resource_lookup_handle(dev_priv, - sw_context->tfile, - *id, - converter, - &res); + ret = vmw_user_surface_lookup_handle(dev_priv, + sw_context->tfile, + *sid, &srf); if (unlikely(ret != 0)) { - DRM_ERROR("Could not find or use resource 0x%08x.\n", - (unsigned) *id); - dump_stack(); + DRM_ERROR("Could ot find or use surface 0x%08x " + "address 0x%08lx\n", + (unsigned int) *sid, + (unsigned long) sid); return ret; } - rcache->valid = true; - rcache->res = res; - rcache->handle = *id; + ret = vmw_surface_validate(dev_priv, srf); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Could not validate surface.\n"); + vmw_surface_unreference(&srf); + return ret; + } - ret = vmw_resource_relocation_add(&sw_context->res_relocations, - res, - id - sw_context->buf_start); - if (unlikely(ret != 0)) - goto out_no_reloc; + sw_context->last_sid = *sid; + sw_context->sid_valid = true; + sw_context->sid_translation = srf->res.id; + *sid = sw_context->sid_translation; - ret = vmw_resource_val_add(sw_context, res, &node); - if (unlikely(ret != 0)) - goto out_no_reloc; + res = &srf->res; + vmw_resource_to_validate_list(sw_context, &res); - rcache->node = node; - if (p_val) - *p_val = node; - vmw_resource_unreference(&res); return 0; - -out_no_reloc: - BUG_ON(sw_context->error_resource != NULL); - sw_context->error_resource = res; - - return ret; } -/** - * vmw_cmd_cid_check - Check a command header for valid context information. - * - * @dev_priv: Pointer to a device private structure. - * @sw_context: Pointer to the software context. - * @header: A command header with an embedded user-space context handle. - * - * Convenience function: Call vmw_cmd_res_check with the user-space context - * handle embedded in @header. - */ -static int vmw_cmd_cid_check(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - SVGA3dCmdHeader *header) -{ - struct vmw_cid_cmd { - SVGA3dCmdHeader header; - __le32 cid; - } *cmd; - - cmd = container_of(header, struct vmw_cid_cmd, header); - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, - user_context_converter, &cmd->cid, NULL); -} static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, @@ -460,9 +198,7 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, return ret; cmd = container_of(header, struct vmw_sid_cmd, header); - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cmd->body.target.sid, NULL); + ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid); return ret; } @@ -477,14 +213,10 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, int ret; cmd = container_of(header, struct vmw_sid_cmd, header); - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cmd->body.src.sid, NULL); + ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); if (unlikely(ret != 0)) return ret; - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cmd->body.dest.sid, NULL); + return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); } static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, @@ -498,14 +230,10 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, int ret; cmd = container_of(header, struct vmw_sid_cmd, header); - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cmd->body.src.sid, NULL); + ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); if (unlikely(ret != 0)) return ret; - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cmd->body.dest.sid, NULL); + return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); } static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, @@ -524,9 +252,7 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, return -EPERM; } - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cmd->body.srcImage.sid, NULL); + return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid); } static int vmw_cmd_present_check(struct vmw_private *dev_priv, @@ -546,15 +272,14 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, return -EPERM; } - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, &cmd->body.sid, - NULL); + return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); } /** * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. * * @dev_priv: The device private structure. + * @cid: The hardware context for the next query. * @new_query_bo: The new buffer holding query results. * @sw_context: The software context used for this command submission. * @@ -562,18 +287,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, * query results, and if another buffer currently is pinned for query * results. If so, the function prepares the state of @sw_context for * switching pinned buffers after successful submission of the current - * command batch. + * command batch. It also checks whether we're using a new query context. + * In that case, it makes sure we emit a query barrier for the old + * context before the current query buffer is fenced. */ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, + uint32_t cid, struct ttm_buffer_object *new_query_bo, struct vmw_sw_context *sw_context) { - struct vmw_res_cache_entry *ctx_entry = - &sw_context->res_cache[vmw_res_context]; int ret; - - BUG_ON(!ctx_entry->valid); - sw_context->last_query_ctx = ctx_entry->res; + bool add_cid = false; + uint32_t cid_to_add; if (unlikely(new_query_bo != sw_context->cur_query_bo)) { @@ -583,9 +308,12 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, } if (unlikely(sw_context->cur_query_bo != NULL)) { - sw_context->needs_post_query_barrier = true; + BUG_ON(!sw_context->query_cid_valid); + add_cid = true; + cid_to_add = sw_context->cur_query_cid; ret = vmw_bo_to_validate_list(sw_context, sw_context->cur_query_bo, + DRM_VMW_FENCE_FLAG_EXEC, NULL); if (unlikely(ret != 0)) return ret; @@ -594,12 +322,35 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, ret = vmw_bo_to_validate_list(sw_context, dev_priv->dummy_query_bo, + DRM_VMW_FENCE_FLAG_EXEC, NULL); if (unlikely(ret != 0)) return ret; } + if (unlikely(cid != sw_context->cur_query_cid && + sw_context->query_cid_valid)) { + add_cid = true; + cid_to_add = sw_context->cur_query_cid; + } + + sw_context->cur_query_cid = cid; + sw_context->query_cid_valid = true; + + if (add_cid) { + struct vmw_resource *ctx = sw_context->cur_ctx; + + if (list_empty(&ctx->query_head)) + list_add_tail(&ctx->query_head, + &sw_context->query_list); + ret = vmw_bo_to_validate_list(sw_context, + dev_priv->dummy_query_bo, + DRM_VMW_FENCE_FLAG_EXEC, + NULL); + if (unlikely(ret != 0)) + return ret; + } return 0; } @@ -611,9 +362,10 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, * @sw_context: The software context used for this command submission batch. * * This function will check if we're switching query buffers, and will then, + * if no other query waits are issued this command submission batch, * issue a dummy occlusion query wait used as a query barrier. When the fence * object following that query wait has signaled, we are sure that all - * preceding queries have finished, and the old query buffer can be unpinned. + * preseding queries have finished, and the old query buffer can be unpinned. * However, since both the new query buffer and the old one are fenced with * that fence, we can do an asynchronus unpin now, and be sure that the * old query buffer won't be moved until the fence has signaled. @@ -624,19 +376,20 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context) { + + struct vmw_resource *ctx, *next_ctx; + int ret; + /* * The validate list should still hold references to all * contexts here. */ - if (sw_context->needs_post_query_barrier) { - struct vmw_res_cache_entry *ctx_entry = - &sw_context->res_cache[vmw_res_context]; - struct vmw_resource *ctx; - int ret; + list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list, + query_head) { + list_del_init(&ctx->query_head); - BUG_ON(!ctx_entry->valid); - ctx = ctx_entry->res; + BUG_ON(list_empty(&ctx->validate_head)); ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); @@ -650,46 +403,40 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ttm_bo_unref(&dev_priv->pinned_bo); } - if (!sw_context->needs_post_query_barrier) { - vmw_bo_pin(sw_context->cur_query_bo, true); + vmw_bo_pin(sw_context->cur_query_bo, true); - /* - * We pin also the dummy_query_bo buffer so that we - * don't need to validate it when emitting - * dummy queries in context destroy paths. - */ + /* + * We pin also the dummy_query_bo buffer so that we + * don't need to validate it when emitting + * dummy queries in context destroy paths. + */ - vmw_bo_pin(dev_priv->dummy_query_bo, true); - dev_priv->dummy_query_bo_pinned = true; + vmw_bo_pin(dev_priv->dummy_query_bo, true); + dev_priv->dummy_query_bo_pinned = true; - BUG_ON(sw_context->last_query_ctx == NULL); - dev_priv->query_cid = sw_context->last_query_ctx->id; - dev_priv->query_cid_valid = true; - dev_priv->pinned_bo = - ttm_bo_reference(sw_context->cur_query_bo); - } + dev_priv->query_cid = sw_context->cur_query_cid; + dev_priv->pinned_bo = + ttm_bo_reference(sw_context->cur_query_bo); } } /** - * vmw_translate_guest_pointer - Prepare to translate a user-space buffer - * handle to a valid SVGAGuestPtr + * vmw_query_switch_backoff - clear query barrier list + * @sw_context: The sw context used for this submission batch. * - * @dev_priv: Pointer to a device private structure. - * @sw_context: The software context used for this command batch validation. - * @ptr: Pointer to the user-space handle to be translated. - * @vmw_bo_p: Points to a location that, on successful return will carry - * a reference-counted pointer to the DMA buffer identified by the - * user-space handle in @id. + * This function is used as part of an error path, where a previously + * set up list of query barriers needs to be cleared. * - * This function saves information needed to translate a user-space buffer - * handle to a valid SVGAGuestPtr. The translation does not take place - * immediately, but during a call to vmw_apply_relocations(). - * This function builds a relocation list and a list of buffers to validate. - * The former needs to be freed using either vmw_apply_relocations() or - * vmw_free_relocations(). The latter needs to be freed using - * vmw_clear_validations. */ +static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context) +{ + struct list_head *list, *next; + + list_for_each_safe(list, next, &sw_context->query_list) { + list_del_init(list); + } +} + static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAGuestPtr *ptr, @@ -718,7 +465,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, reloc = &sw_context->relocs[sw_context->cur_reloc++]; reloc->location = ptr; - ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index); + ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC, + &reloc->index); if (unlikely(ret != 0)) goto out_no_reloc; @@ -731,37 +479,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, return ret; } -/** - * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. - * - * @dev_priv: Pointer to a device private struct. - * @sw_context: The software context used for this command submission. - * @header: Pointer to the command header in the command stream. - */ -static int vmw_cmd_begin_query(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - SVGA3dCmdHeader *header) -{ - struct vmw_begin_query_cmd { - SVGA3dCmdHeader header; - SVGA3dCmdBeginQuery q; - } *cmd; - - cmd = container_of(header, struct vmw_begin_query_cmd, - header); - - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, - user_context_converter, &cmd->q.cid, - NULL); -} - -/** - * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. - * - * @dev_priv: Pointer to a device private struct. - * @sw_context: The software context used for this command submission. - * @header: Pointer to the command header in the command stream. - */ static int vmw_cmd_end_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) @@ -784,19 +501,13 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); + ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid, + &vmw_bo->base, sw_context); vmw_dmabuf_unreference(&vmw_bo); return ret; } -/* - * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. - * - * @dev_priv: Pointer to a device private struct. - * @sw_context: The software context used for this command submission. - * @header: Pointer to the command header in the command stream. - */ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) @@ -807,6 +518,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, SVGA3dCmdWaitForQuery q; } *cmd; int ret; + struct vmw_resource *ctx; cmd = container_of(header, struct vmw_query_cmd, header); ret = vmw_cmd_cid_check(dev_priv, sw_context, header); @@ -820,6 +532,16 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, return ret; vmw_dmabuf_unreference(&vmw_bo); + + /* + * This wait will act as a barrier for previous waits for this + * context. + */ + + ctx = sw_context->cur_ctx; + if (!list_empty(&ctx->query_head)) + list_del_init(&ctx->query_head); + return 0; } @@ -828,12 +550,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { struct vmw_dma_buffer *vmw_bo = NULL; + struct ttm_buffer_object *bo; struct vmw_surface *srf = NULL; struct vmw_dma_cmd { SVGA3dCmdHeader header; SVGA3dCmdSurfaceDMA dma; } *cmd; int ret; + struct vmw_resource *res; cmd = container_of(header, struct vmw_dma_cmd, header); ret = vmw_translate_guest_ptr(dev_priv, sw_context, @@ -842,20 +566,37 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, &cmd->dma.host.sid, - NULL); + bo = &vmw_bo->base; + ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, + cmd->dma.host.sid, &srf); + if (ret) { + DRM_ERROR("could not find surface\n"); + goto out_no_reloc; + } + + ret = vmw_surface_validate(dev_priv, srf); if (unlikely(ret != 0)) { - if (unlikely(ret != -ERESTARTSYS)) - DRM_ERROR("could not find surface for DMA.\n"); - goto out_no_surface; + if (ret != -ERESTARTSYS) + DRM_ERROR("Culd not validate surface.\n"); + goto out_no_validate; } - srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); + /* + * Patch command stream with device SID. + */ + cmd->dma.host.sid = srf->res.id; + vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); + + vmw_dmabuf_unreference(&vmw_bo); + + res = &srf->res; + vmw_resource_to_validate_list(sw_context, &res); - vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); + return 0; -out_no_surface: +out_no_validate: + vmw_surface_unreference(&srf); +out_no_reloc: vmw_dmabuf_unreference(&vmw_bo); return ret; } @@ -888,9 +629,8 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv, } for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &decl->array.surfaceId, NULL); + ret = vmw_cmd_sid_check(dev_priv, sw_context, + &decl->array.surfaceId); if (unlikely(ret != 0)) return ret; } @@ -904,9 +644,8 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv, range = (SVGA3dPrimitiveRange *) decl; for (i = 0; i < cmd->body.numRanges; ++i, ++range) { - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &range->indexArray.surfaceId, NULL); + ret = vmw_cmd_sid_check(dev_priv, sw_context, + &range->indexArray.surfaceId); if (unlikely(ret != 0)) return ret; } @@ -937,9 +676,8 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) continue; - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cur_state->value, NULL); + ret = vmw_cmd_sid_check(dev_priv, sw_context, + &cur_state->value); if (unlikely(ret != 0)) return ret; } @@ -970,34 +708,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, return ret; } -/** - * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER - * command - * - * @dev_priv: Pointer to a device private struct. - * @sw_context: The software context being used for this batch. - * @header: Pointer to the command header in the command stream. - */ -static int vmw_cmd_set_shader(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - SVGA3dCmdHeader *header) -{ - struct vmw_set_shader_cmd { - SVGA3dCmdHeader header; - SVGA3dCmdSetShader body; - } *cmd; - int ret; - - cmd = container_of(header, struct vmw_set_shader_cmd, - header); - - ret = vmw_cmd_cid_check(dev_priv, sw_context, header); - if (unlikely(ret != 0)) - return ret; - - return 0; -} - static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf, uint32_t *size) @@ -1071,20 +781,16 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader), + VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query), + VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, - &vmw_cmd_blt_surf_screen_check), - VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid), - VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), - VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), - VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), + &vmw_cmd_blt_surf_screen_check) }; static int vmw_cmd_check(struct vmw_private *dev_priv, @@ -1131,8 +837,6 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv, int32_t cur_size = size; int ret; - sw_context->buf_start = buf; - while (cur_size > 0) { size = cur_size; ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); @@ -1164,63 +868,43 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) for (i = 0; i < sw_context->cur_reloc; ++i) { reloc = &sw_context->relocs[i]; - validate = &sw_context->val_bufs[reloc->index].base; + validate = &sw_context->val_bufs[reloc->index]; bo = validate->bo; - switch (bo->mem.mem_type) { - case TTM_PL_VRAM: + if (bo->mem.mem_type == TTM_PL_VRAM) { reloc->location->offset += bo->offset; reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; - break; - case VMW_PL_GMR: + } else reloc->location->gmrId = bo->mem.start; - break; - default: - BUG(); - } } vmw_free_relocations(sw_context); } -/** - * vmw_resource_list_unrefererence - Free up a resource list and unreference - * all resources referenced by it. - * - * @list: The resource list. - */ -static void vmw_resource_list_unreference(struct list_head *list) -{ - struct vmw_resource_val_node *val, *val_next; - - /* - * Drop references to resources held during command submission. - */ - - list_for_each_entry_safe(val, val_next, list, head) { - list_del_init(&val->head); - vmw_resource_unreference(&val->res); - kfree(val); - } -} - static void vmw_clear_validations(struct vmw_sw_context *sw_context) { - struct vmw_validate_buffer *entry, *next; - struct vmw_resource_val_node *val; + struct ttm_validate_buffer *entry, *next; + struct vmw_resource *res, *res_next; /* * Drop references to DMA buffers held during command submission. */ list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, - base.head) { - list_del(&entry->base.head); - ttm_bo_unref(&entry->base.bo); - (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash); + head) { + list_del(&entry->head); + vmw_dmabuf_validate_clear(entry->bo); + ttm_bo_unref(&entry->bo); sw_context->cur_val_buf--; } BUG_ON(sw_context->cur_val_buf != 0); - list_for_each_entry(val, &sw_context->resource_list, head) - (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); + /* + * Drop references to resources held during command submission. + */ + vmw_resource_unreserve(&sw_context->resource_list); + list_for_each_entry_safe(res, res_next, &sw_context->resource_list, + validate_head) { + list_del_init(&res->validate_head); + vmw_resource_unreference(&res); + } } static int vmw_validate_single_buffer(struct vmw_private *dev_priv, @@ -1263,11 +947,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, static int vmw_validate_buffers(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context) { - struct vmw_validate_buffer *entry; + struct ttm_validate_buffer *entry; int ret; - list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { - ret = vmw_validate_single_buffer(dev_priv, entry->base.bo); + list_for_each_entry(entry, &sw_context->validate_nodes, head) { + ret = vmw_validate_single_buffer(dev_priv, entry->bo); if (unlikely(ret != 0)) return ret; } @@ -1430,8 +1114,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, { struct vmw_sw_context *sw_context = &dev_priv->ctx; struct vmw_fence_obj *fence = NULL; - struct vmw_resource *error_resource; - struct list_head resource_list; uint32_t handle; void *cmd; int ret; @@ -1461,33 +1143,24 @@ int vmw_execbuf_process(struct drm_file *file_priv, sw_context->kernel = true; sw_context->tfile = vmw_fpriv(file_priv)->tfile; + sw_context->cid_valid = false; + sw_context->sid_valid = false; sw_context->cur_reloc = 0; sw_context->cur_val_buf = 0; sw_context->fence_flags = 0; + INIT_LIST_HEAD(&sw_context->query_list); INIT_LIST_HEAD(&sw_context->resource_list); sw_context->cur_query_bo = dev_priv->pinned_bo; - sw_context->last_query_ctx = NULL; - sw_context->needs_post_query_barrier = false; - memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); + sw_context->cur_query_cid = dev_priv->query_cid; + sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL); + INIT_LIST_HEAD(&sw_context->validate_nodes); - INIT_LIST_HEAD(&sw_context->res_relocations); - if (!sw_context->res_ht_initialized) { - ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); - if (unlikely(ret != 0)) - goto out_unlock; - sw_context->res_ht_initialized = true; - } - INIT_LIST_HEAD(&resource_list); ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, command_size); if (unlikely(ret != 0)) goto out_err; - ret = vmw_resources_reserve(sw_context); - if (unlikely(ret != 0)) - goto out_err; - ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); if (unlikely(ret != 0)) goto out_err; @@ -1496,31 +1169,24 @@ int vmw_execbuf_process(struct drm_file *file_priv, if (unlikely(ret != 0)) goto out_err; - ret = vmw_resources_validate(sw_context); - if (unlikely(ret != 0)) - goto out_err; + vmw_apply_relocations(sw_context); if (throttle_us) { ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, throttle_us); if (unlikely(ret != 0)) - goto out_err; + goto out_throttle; } cmd = vmw_fifo_reserve(dev_priv, command_size); if (unlikely(cmd == NULL)) { DRM_ERROR("Failed reserving fifo space for commands.\n"); ret = -ENOMEM; - goto out_err; + goto out_throttle; } - vmw_apply_relocations(sw_context); memcpy(cmd, kernel_commands, command_size); - - vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); - vmw_resource_relocations_free(&sw_context->res_relocations); - vmw_fifo_commit(dev_priv, command_size); vmw_query_bo_switch_commit(dev_priv, sw_context); @@ -1536,14 +1202,9 @@ int vmw_execbuf_process(struct drm_file *file_priv, if (ret != 0) DRM_ERROR("Fence submission error. Syncing.\n"); - vmw_resource_list_unreserve(&sw_context->resource_list, false); ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, (void *) fence); - if (unlikely(dev_priv->pinned_bo != NULL && - !dev_priv->query_cid_valid)) - __vmw_execbuf_release_pinned_bo(dev_priv, fence); - vmw_clear_validations(sw_context); vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, user_fence_rep, fence, handle); @@ -1556,40 +1217,17 @@ int vmw_execbuf_process(struct drm_file *file_priv, vmw_fence_obj_unreference(&fence); } - list_splice_init(&sw_context->resource_list, &resource_list); mutex_unlock(&dev_priv->cmdbuf_mutex); - - /* - * Unreference resources outside of the cmdbuf_mutex to - * avoid deadlocks in resource destruction paths. - */ - vmw_resource_list_unreference(&resource_list); - return 0; out_err: - vmw_resource_relocations_free(&sw_context->res_relocations); vmw_free_relocations(sw_context); +out_throttle: + vmw_query_switch_backoff(sw_context); ttm_eu_backoff_reservation(&sw_context->validate_nodes); - vmw_resource_list_unreserve(&sw_context->resource_list, true); vmw_clear_validations(sw_context); - if (unlikely(dev_priv->pinned_bo != NULL && - !dev_priv->query_cid_valid)) - __vmw_execbuf_release_pinned_bo(dev_priv, NULL); out_unlock: - list_splice_init(&sw_context->resource_list, &resource_list); - error_resource = sw_context->error_resource; - sw_context->error_resource = NULL; mutex_unlock(&dev_priv->cmdbuf_mutex); - - /* - * Unreference resources outside of the cmdbuf_mutex to - * avoid deadlocks in resource destruction paths. - */ - vmw_resource_list_unreference(&resource_list); - if (unlikely(error_resource != NULL)) - vmw_resource_unreference(&error_resource); - return ret; } @@ -1614,13 +1252,13 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) /** - * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned + * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned * query bo. * * @dev_priv: The device private structure. - * @fence: If non-NULL should point to a struct vmw_fence_obj issued - * _after_ a query barrier that flushes all queries touching the current - * buffer pointed to by @dev_priv->pinned_bo + * @only_on_cid_match: Only flush and unpin if the current active query cid + * matches @cid. + * @cid: Optional context id to match. * * This function should be used to unpin the pinned query bo, or * as a query barrier when we need to make sure that all queries have @@ -1633,26 +1271,31 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) * * The function will synchronize on the previous query barrier, and will * thus not finish until that barrier has executed. - * - * the @dev_priv->cmdbuf_mutex needs to be held by the current thread - * before calling this function. */ -void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, - struct vmw_fence_obj *fence) +void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, + bool only_on_cid_match, uint32_t cid) { int ret = 0; struct list_head validate_list; struct ttm_validate_buffer pinned_val, query_val; - struct vmw_fence_obj *lfence = NULL; + struct vmw_fence_obj *fence; + + mutex_lock(&dev_priv->cmdbuf_mutex); if (dev_priv->pinned_bo == NULL) goto out_unlock; + if (only_on_cid_match && cid != dev_priv->query_cid) + goto out_unlock; + INIT_LIST_HEAD(&validate_list); + pinned_val.new_sync_obj_arg = (void *)(unsigned long) + DRM_VMW_FENCE_FLAG_EXEC; pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); list_add_tail(&pinned_val.head, &validate_list); + query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg; query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); list_add_tail(&query_val.head, &validate_list); @@ -1665,34 +1308,25 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, goto out_no_reserve; } - if (dev_priv->query_cid_valid) { - BUG_ON(fence != NULL); - ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); - if (unlikely(ret != 0)) { - vmw_execbuf_unpin_panic(dev_priv); - goto out_no_emit; - } - dev_priv->query_cid_valid = false; + ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); + if (unlikely(ret != 0)) { + vmw_execbuf_unpin_panic(dev_priv); + goto out_no_emit; } vmw_bo_pin(dev_priv->pinned_bo, false); vmw_bo_pin(dev_priv->dummy_query_bo, false); dev_priv->dummy_query_bo_pinned = false; - if (fence == NULL) { - (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, - NULL); - fence = lfence; - } + (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); ttm_eu_fence_buffer_objects(&validate_list, (void *) fence); - if (lfence != NULL) - vmw_fence_obj_unreference(&lfence); ttm_bo_unref(&query_val.bo); ttm_bo_unref(&pinned_val.bo); ttm_bo_unref(&dev_priv->pinned_bo); out_unlock: + mutex_unlock(&dev_priv->cmdbuf_mutex); return; out_no_emit: @@ -1701,31 +1335,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ttm_bo_unref(&query_val.bo); ttm_bo_unref(&pinned_val.bo); ttm_bo_unref(&dev_priv->pinned_bo); -} - -/** - * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned - * query bo. - * - * @dev_priv: The device private structure. - * - * This function should be used to unpin the pinned query bo, or - * as a query barrier when we need to make sure that all queries have - * finished before the next fifo command. (For example on hardware - * context destructions where the hardware may otherwise leak unfinished - * queries). - * - * This function does not return any failure codes, but make attempts - * to do safe unpinning in case of errors. - * - * The function will synchronize on the previous query barrier, and will - * thus not finish until that barrier has executed. - */ -void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) -{ - mutex_lock(&dev_priv->cmdbuf_mutex); - if (dev_priv->query_cid_valid) - __vmw_execbuf_release_pinned_bo(dev_priv, NULL); mutex_unlock(&dev_priv->cmdbuf_mutex); } diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index c62d20e8a6f1..bc187fafd58c 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -537,7 +537,7 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) container_of(fence, struct vmw_user_fence, fence); struct vmw_fence_manager *fman = fence->fman; - ttm_base_object_kfree(ufence, base); + kfree(ufence); /* * Free kernel space accounting. */ diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 2f7c08ebf568..7290811f89be 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -110,6 +110,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); ret = copy_to_user(buffer, bounce, size); + if (ret) + ret = -EFAULT; vfree(bounce); if (unlikely(ret != 0)) @@ -131,7 +133,6 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, struct drm_vmw_rect *clips = NULL; struct drm_mode_object *obj; struct vmw_framebuffer *vfb; - struct vmw_resource *res; uint32_t num_clips; int ret; @@ -179,13 +180,11 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) goto out_no_ttm_lock; - ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid, - user_surface_converter, - &res); + ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid, + &surface); if (ret) goto out_no_surface; - surface = vmw_res_to_srf(res); ret = vmw_kms_present(dev_priv, file_priv, vfb, surface, arg->sid, arg->dest_x, arg->dest_y, diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 79f7e8e60529..070fb239c5af 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -373,7 +373,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) drm_mode_crtc_set_gamma_size(crtc, 256); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.dirty_info_property, 1); diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 87e39f68e9d0..cb55b7b66377 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -35,7 +35,6 @@ #include "svga_escape.h" #define VMW_MAX_NUM_STREAMS 1 -#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE) struct vmw_stream { struct vmw_dma_buffer *buf; @@ -450,14 +449,6 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv) return 0; } - -static bool vmw_overlay_available(const struct vmw_private *dev_priv) -{ - return (dev_priv->overlay_priv != NULL && - ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) == - VMW_OVERLAY_CAP_MASK)); -} - int vmw_overlay_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -470,7 +461,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, struct vmw_resource *res; int ret; - if (!vmw_overlay_available(dev_priv)) + if (!overlay) return -ENOSYS; ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res); @@ -501,7 +492,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, int vmw_overlay_num_overlays(struct vmw_private *dev_priv) { - if (!vmw_overlay_available(dev_priv)) + if (!dev_priv->overlay_priv) return 0; return VMW_MAX_NUM_STREAMS; @@ -512,7 +503,7 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv) struct vmw_overlay *overlay = dev_priv->overlay_priv; int i, k; - if (!vmw_overlay_available(dev_priv)) + if (!overlay) return 0; mutex_lock(&overlay->mutex); @@ -578,6 +569,12 @@ int vmw_overlay_init(struct vmw_private *dev_priv) if (dev_priv->overlay_priv) return -EINVAL; + if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) && + (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) { + DRM_INFO("hardware doesn't support overlays\n"); + return -ENOSYS; + } + overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); if (!overlay) return -ENOMEM; diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 0def4ff5b621..da3c6b5b98a1 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -30,7 +30,17 @@ #include #include #include -#include "vmwgfx_resource_priv.h" + +struct vmw_user_context { + struct ttm_base_object base; + struct vmw_resource res; +}; + +struct vmw_user_surface { + struct ttm_base_object base; + struct vmw_surface srf; + uint32_t size; +}; struct vmw_user_dma_buffer { struct ttm_base_object base; @@ -52,21 +62,17 @@ struct vmw_user_stream { struct vmw_stream stream; }; +struct vmw_surface_offset { + uint32_t face; + uint32_t mip; + uint32_t bo_offset; +}; + +static uint64_t vmw_user_context_size; +static uint64_t vmw_user_surface_size; static uint64_t vmw_user_stream_size; -static const struct vmw_res_func vmw_stream_func = { - .res_type = vmw_res_stream, - .needs_backup = false, - .may_evict = false, - .type_name = "video streams", - .backup_placement = NULL, - .create = NULL, - .destroy = NULL, - .bind = NULL, - .unbind = NULL -}; - static inline struct vmw_dma_buffer * vmw_dma_buffer(struct ttm_buffer_object *bo) { @@ -94,14 +100,13 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) * * Release the resource id to the resource id manager and set it to -1 */ -void vmw_resource_release_id(struct vmw_resource *res) +static void vmw_resource_release_id(struct vmw_resource *res) { struct vmw_private *dev_priv = res->dev_priv; - struct idr *idr = &dev_priv->res_idr[res->func->res_type]; write_lock(&dev_priv->resource_lock); if (res->id != -1) - idr_remove(idr, res->id); + idr_remove(res->idr, res->id); res->id = -1; write_unlock(&dev_priv->resource_lock); } @@ -111,33 +116,17 @@ static void vmw_resource_release(struct kref *kref) struct vmw_resource *res = container_of(kref, struct vmw_resource, kref); struct vmw_private *dev_priv = res->dev_priv; - int id; - struct idr *idr = &dev_priv->res_idr[res->func->res_type]; + int id = res->id; + struct idr *idr = res->idr; res->avail = false; - list_del_init(&res->lru_head); + if (res->remove_from_lists != NULL) + res->remove_from_lists(res); write_unlock(&dev_priv->resource_lock); - if (res->backup) { - struct ttm_buffer_object *bo = &res->backup->base; - - ttm_bo_reserve(bo, false, false, false, 0); - if (!list_empty(&res->mob_head) && - res->func->unbind != NULL) { - struct ttm_validate_buffer val_buf; - - val_buf.bo = bo; - res->func->unbind(res, false, &val_buf); - } - res->backup_dirty = false; - list_del_init(&res->mob_head); - ttm_bo_unreserve(bo); - vmw_dmabuf_unreference(&res->backup); - } if (likely(res->hw_destroy != NULL)) res->hw_destroy(res); - id = res->id; if (res->res_free != NULL) res->res_free(res); else @@ -164,25 +153,25 @@ void vmw_resource_unreference(struct vmw_resource **p_res) /** * vmw_resource_alloc_id - release a resource id to the id manager. * + * @dev_priv: Pointer to the device private structure. * @res: Pointer to the resource. * * Allocate the lowest free resource from the resource manager, and set * @res->id to that id. Returns 0 on success and -ENOMEM on failure. */ -int vmw_resource_alloc_id(struct vmw_resource *res) +static int vmw_resource_alloc_id(struct vmw_private *dev_priv, + struct vmw_resource *res) { - struct vmw_private *dev_priv = res->dev_priv; int ret; - struct idr *idr = &dev_priv->res_idr[res->func->res_type]; BUG_ON(res->id != -1); do { - if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) + if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0)) return -ENOMEM; write_lock(&dev_priv->resource_lock); - ret = idr_get_new_above(idr, res, 1, &res->id); + ret = idr_get_new_above(res->idr, res, 1, &res->id); write_unlock(&dev_priv->resource_lock); } while (ret == -EAGAIN); @@ -190,39 +179,31 @@ int vmw_resource_alloc_id(struct vmw_resource *res) return ret; } -/** - * vmw_resource_init - initialize a struct vmw_resource - * - * @dev_priv: Pointer to a device private struct. - * @res: The struct vmw_resource to initialize. - * @obj_type: Resource object type. - * @delay_id: Boolean whether to defer device id allocation until - * the first validation. - * @res_free: Resource destructor. - * @func: Resource function table. - */ -int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, - bool delay_id, - void (*res_free) (struct vmw_resource *res), - const struct vmw_res_func *func) + +static int vmw_resource_init(struct vmw_private *dev_priv, + struct vmw_resource *res, + struct idr *idr, + enum ttm_object_type obj_type, + bool delay_id, + void (*res_free) (struct vmw_resource *res), + void (*remove_from_lists) + (struct vmw_resource *res)) { kref_init(&res->kref); res->hw_destroy = NULL; res->res_free = res_free; + res->remove_from_lists = remove_from_lists; + res->res_type = obj_type; + res->idr = idr; res->avail = false; res->dev_priv = dev_priv; - res->func = func; - INIT_LIST_HEAD(&res->lru_head); - INIT_LIST_HEAD(&res->mob_head); + INIT_LIST_HEAD(&res->query_head); + INIT_LIST_HEAD(&res->validate_head); res->id = -1; - res->backup = NULL; - res->backup_offset = 0; - res->backup_dirty = false; - res->res_dirty = false; if (delay_id) return 0; else - return vmw_resource_alloc_id(res); + return vmw_resource_alloc_id(dev_priv, res); } /** @@ -237,8 +218,9 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, * Activate basically means that the function vmw_resource_lookup will * find it. */ -void vmw_resource_activate(struct vmw_resource *res, - void (*hw_destroy) (struct vmw_resource *)) + +static void vmw_resource_activate(struct vmw_resource *res, + void (*hw_destroy) (struct vmw_resource *)) { struct vmw_private *dev_priv = res->dev_priv; @@ -268,245 +250,1396 @@ struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, } /** - * vmw_user_resource_lookup_handle - lookup a struct resource from a - * TTM user-space handle and perform basic type checks - * - * @dev_priv: Pointer to a device private struct - * @tfile: Pointer to a struct ttm_object_file identifying the caller - * @handle: The TTM user-space handle - * @converter: Pointer to an object describing the resource type - * @p_res: On successful return the location pointed to will contain - * a pointer to a refcounted struct vmw_resource. - * - * If the handle can't be found or is associated with an incorrect resource - * type, -EINVAL will be returned. + * Context management: */ -int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t handle, - const struct vmw_user_resource_conv - *converter, - struct vmw_resource **p_res) + +static void vmw_hw_context_destroy(struct vmw_resource *res) { - struct ttm_base_object *base; - struct vmw_resource *res; - int ret = -EINVAL; - base = ttm_base_object_lookup(tfile, handle); - if (unlikely(base == NULL)) - return -EINVAL; + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDestroyContext body; + } *cmd; - if (unlikely(base->object_type != converter->object_type)) - goto out_bad_resource; - res = converter->base_obj_to_res(base); + vmw_execbuf_release_pinned_bo(dev_priv, true, res->id); - read_lock(&dev_priv->resource_lock); - if (!res->avail || res->res_free != converter->res_free) { - read_unlock(&dev_priv->resource_lock); - goto out_bad_resource; + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for surface " + "destruction.\n"); + return; } - kref_get(&res->kref); - read_unlock(&dev_priv->resource_lock); - - *p_res = res; - ret = 0; - -out_bad_resource: - ttm_base_object_unref(&base); + cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); + cmd->header.size = cpu_to_le32(sizeof(cmd->body)); + cmd->body.cid = cpu_to_le32(res->id); - return ret; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_3d_resource_dec(dev_priv, false); } -/** - * Helper function that looks either a surface or dmabuf. - * - * The pointer this pointed at by out_surf and out_buf needs to be null. - */ -int vmw_user_lookup_handle(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t handle, - struct vmw_surface **out_surf, - struct vmw_dma_buffer **out_buf) +static int vmw_context_init(struct vmw_private *dev_priv, + struct vmw_resource *res, + void (*res_free) (struct vmw_resource *res)) { - struct vmw_resource *res; int ret; - BUG_ON(*out_surf || *out_buf); + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDefineContext body; + } *cmd; - ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle, - user_surface_converter, - &res); - if (!ret) { - *out_surf = vmw_res_to_srf(res); - return 0; + ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr, + VMW_RES_CONTEXT, false, res_free, NULL); + + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to allocate a resource id.\n"); + goto out_early; } - *out_surf = NULL; - ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); - return ret; -} + if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) { + DRM_ERROR("Out of hw context ids.\n"); + vmw_resource_unreference(&res); + return -ENOMEM; + } -/** - * Buffer management. - */ -void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) -{ - struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Fifo reserve failed.\n"); + vmw_resource_unreference(&res); + return -ENOMEM; + } - kfree(vmw_bo); + cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); + cmd->header.size = cpu_to_le32(sizeof(cmd->body)); + cmd->body.cid = cpu_to_le32(res->id); + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + (void) vmw_3d_resource_inc(dev_priv, false); + vmw_resource_activate(res, vmw_hw_context_destroy); + return 0; + +out_early: + if (res_free == NULL) + kfree(res); + else + res_free(res); + return ret; } -int vmw_dmabuf_init(struct vmw_private *dev_priv, - struct vmw_dma_buffer *vmw_bo, - size_t size, struct ttm_placement *placement, - bool interruptible, - void (*bo_free) (struct ttm_buffer_object *bo)) +struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) { - struct ttm_bo_device *bdev = &dev_priv->bdev; - size_t acc_size; + struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); int ret; - BUG_ON(!bo_free); - - acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); - memset(vmw_bo, 0, sizeof(*vmw_bo)); - - INIT_LIST_HEAD(&vmw_bo->res_list); + if (unlikely(res == NULL)) + return NULL; - ret = ttm_bo_init(bdev, &vmw_bo->base, size, - ttm_bo_type_device, placement, - 0, interruptible, - NULL, acc_size, NULL, bo_free); - return ret; + ret = vmw_context_init(dev_priv, res, NULL); + return (ret == 0) ? res : NULL; } -static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) +/** + * User-space context management: + */ + +static void vmw_user_context_free(struct vmw_resource *res) { - struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); + struct vmw_user_context *ctx = + container_of(res, struct vmw_user_context, res); + struct vmw_private *dev_priv = res->dev_priv; - ttm_base_object_kfree(vmw_user_bo, base); + kfree(ctx); + ttm_mem_global_free(vmw_mem_glob(dev_priv), + vmw_user_context_size); } -static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) +/** + * This function is called when user space has no more references on the + * base object. It releases the base-object's reference on the resource object. + */ + +static void vmw_user_context_base_release(struct ttm_base_object **p_base) { - struct vmw_user_dma_buffer *vmw_user_bo; struct ttm_base_object *base = *p_base; - struct ttm_buffer_object *bo; + struct vmw_user_context *ctx = + container_of(base, struct vmw_user_context, base); + struct vmw_resource *res = &ctx->res; *p_base = NULL; + vmw_resource_unreference(&res); +} - if (unlikely(base == NULL)) - return; +int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_resource *res; + struct vmw_user_context *ctx; + struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + int ret = 0; - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); - bo = &vmw_user_bo->dma.base; - ttm_bo_unref(&bo); + res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid); + if (unlikely(res == NULL)) + return -EINVAL; + + if (res->res_free != &vmw_user_context_free) { + ret = -EINVAL; + goto out; + } + + ctx = container_of(res, struct vmw_user_context, res); + if (ctx->base.tfile != tfile && !ctx->base.shareable) { + ret = -EPERM; + goto out; + } + + ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE); +out: + vmw_resource_unreference(&res); + return ret; } -/** - * vmw_user_dmabuf_alloc - Allocate a user dma buffer - * - * @dev_priv: Pointer to a struct device private. - * @tfile: Pointer to a struct ttm_object_file on which to register the user - * object. - * @size: Size of the dma buffer. - * @shareable: Boolean whether the buffer is shareable with other open files. - * @handle: Pointer to where the handle value should be assigned. - * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer - * should be assigned. - */ -int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t size, - bool shareable, - uint32_t *handle, - struct vmw_dma_buffer **p_dma_buf) +int vmw_context_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct vmw_user_dma_buffer *user_bo; - struct ttm_buffer_object *tmp; + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_user_context *ctx; + struct vmw_resource *res; + struct vmw_resource *tmp; + struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_master *vmaster = vmw_master(file_priv->master); int ret; - user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); - if (unlikely(user_bo == NULL)) { - DRM_ERROR("Failed to allocate a buffer.\n"); - return -ENOMEM; - } - ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, - &vmw_vram_sys_placement, true, - &vmw_user_dmabuf_destroy); + /* + * Approximate idr memory usage with 128 bytes. It will be limited + * by maximum number_of contexts anyway. + */ + + if (unlikely(vmw_user_context_size == 0)) + vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; + + ret = ttm_read_lock(&vmaster->lock, true); if (unlikely(ret != 0)) return ret; - tmp = ttm_bo_reference(&user_bo->dma.base); - ret = ttm_base_object_init(tfile, - &user_bo->base, - shareable, - ttm_buffer_type, - &vmw_user_dmabuf_release, NULL); + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), + vmw_user_context_size, + false, true); if (unlikely(ret != 0)) { - ttm_bo_unref(&tmp); - goto out_no_base_object; + if (ret != -ERESTARTSYS) + DRM_ERROR("Out of graphics memory for context" + " creation.\n"); + goto out_unlock; + } + + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (unlikely(ctx == NULL)) { + ttm_mem_global_free(vmw_mem_glob(dev_priv), + vmw_user_context_size); + ret = -ENOMEM; + goto out_unlock; } - *p_dma_buf = &user_bo->dma; - *handle = user_bo->base.hash.key; + res = &ctx->res; + ctx->base.shareable = false; + ctx->base.tfile = NULL; + + /* + * From here on, the destructor takes over resource freeing. + */ + + ret = vmw_context_init(dev_priv, res, vmw_user_context_free); + if (unlikely(ret != 0)) + goto out_unlock; + + tmp = vmw_resource_reference(&ctx->res); + ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, + &vmw_user_context_base_release, NULL); + + if (unlikely(ret != 0)) { + vmw_resource_unreference(&tmp); + goto out_err; + } + + arg->cid = res->id; +out_err: + vmw_resource_unreference(&res); +out_unlock: + ttm_read_unlock(&vmaster->lock); + return ret; + +} + +int vmw_context_check(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + int id, + struct vmw_resource **p_res) +{ + struct vmw_resource *res; + int ret = 0; + + read_lock(&dev_priv->resource_lock); + res = idr_find(&dev_priv->context_idr, id); + if (res && res->avail) { + struct vmw_user_context *ctx = + container_of(res, struct vmw_user_context, res); + if (ctx->base.tfile != tfile && !ctx->base.shareable) + ret = -EPERM; + if (p_res) + *p_res = vmw_resource_reference(res); + } else + ret = -EINVAL; + read_unlock(&dev_priv->resource_lock); -out_no_base_object: return ret; } +struct vmw_bpp { + uint8_t bpp; + uint8_t s_bpp; +}; + +/* + * Size table for the supported SVGA3D surface formats. It consists of + * two values. The bpp value and the s_bpp value which is short for + * "stride bits per pixel" The values are given in such a way that the + * minimum stride for the image is calculated using + * + * min_stride = w*s_bpp + * + * and the total memory requirement for the image is + * + * h*min_stride*bpp/s_bpp + * + */ +static const struct vmw_bpp vmw_sf_bpp[] = { + [SVGA3D_FORMAT_INVALID] = {0, 0}, + [SVGA3D_X8R8G8B8] = {32, 32}, + [SVGA3D_A8R8G8B8] = {32, 32}, + [SVGA3D_R5G6B5] = {16, 16}, + [SVGA3D_X1R5G5B5] = {16, 16}, + [SVGA3D_A1R5G5B5] = {16, 16}, + [SVGA3D_A4R4G4B4] = {16, 16}, + [SVGA3D_Z_D32] = {32, 32}, + [SVGA3D_Z_D16] = {16, 16}, + [SVGA3D_Z_D24S8] = {32, 32}, + [SVGA3D_Z_D15S1] = {16, 16}, + [SVGA3D_LUMINANCE8] = {8, 8}, + [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8}, + [SVGA3D_LUMINANCE16] = {16, 16}, + [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16}, + [SVGA3D_DXT1] = {4, 16}, + [SVGA3D_DXT2] = {8, 32}, + [SVGA3D_DXT3] = {8, 32}, + [SVGA3D_DXT4] = {8, 32}, + [SVGA3D_DXT5] = {8, 32}, + [SVGA3D_BUMPU8V8] = {16, 16}, + [SVGA3D_BUMPL6V5U5] = {16, 16}, + [SVGA3D_BUMPX8L8V8U8] = {32, 32}, + [SVGA3D_ARGB_S10E5] = {16, 16}, + [SVGA3D_ARGB_S23E8] = {32, 32}, + [SVGA3D_A2R10G10B10] = {32, 32}, + [SVGA3D_V8U8] = {16, 16}, + [SVGA3D_Q8W8V8U8] = {32, 32}, + [SVGA3D_CxV8U8] = {16, 16}, + [SVGA3D_X8L8V8U8] = {32, 32}, + [SVGA3D_A2W10V10U10] = {32, 32}, + [SVGA3D_ALPHA8] = {8, 8}, + [SVGA3D_R_S10E5] = {16, 16}, + [SVGA3D_R_S23E8] = {32, 32}, + [SVGA3D_RG_S10E5] = {16, 16}, + [SVGA3D_RG_S23E8] = {32, 32}, + [SVGA3D_BUFFER] = {8, 8}, + [SVGA3D_Z_D24X8] = {32, 32}, + [SVGA3D_V16U16] = {32, 32}, + [SVGA3D_G16R16] = {32, 32}, + [SVGA3D_A16B16G16R16] = {64, 64}, + [SVGA3D_UYVY] = {12, 12}, + [SVGA3D_YUY2] = {12, 12}, + [SVGA3D_NV12] = {12, 8}, + [SVGA3D_AYUV] = {32, 32}, + [SVGA3D_BC4_UNORM] = {4, 16}, + [SVGA3D_BC5_UNORM] = {8, 32}, + [SVGA3D_Z_DF16] = {16, 16}, + [SVGA3D_Z_DF24] = {24, 24}, + [SVGA3D_Z_D24S8_INT] = {32, 32} +}; + + +/** + * Surface management. + */ + +struct vmw_surface_dma { + SVGA3dCmdHeader header; + SVGA3dCmdSurfaceDMA body; + SVGA3dCopyBox cb; + SVGA3dCmdSurfaceDMASuffix suffix; +}; + +struct vmw_surface_define { + SVGA3dCmdHeader header; + SVGA3dCmdDefineSurface body; +}; + +struct vmw_surface_destroy { + SVGA3dCmdHeader header; + SVGA3dCmdDestroySurface body; +}; + + /** - * vmw_user_dmabuf_verify_access - verify access permissions on this - * buffer object. + * vmw_surface_dma_size - Compute fifo size for a dma command. + * + * @srf: Pointer to a struct vmw_surface * - * @bo: Pointer to the buffer object being accessed - * @tfile: Identifying the caller. + * Computes the required size for a surface dma command for backup or + * restoration of the surface represented by @srf. */ -int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, - struct ttm_object_file *tfile) +static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) { - struct vmw_user_dma_buffer *vmw_user_bo; + return srf->num_sizes * sizeof(struct vmw_surface_dma); +} - if (unlikely(bo->destroy != vmw_user_dmabuf_destroy)) - return -EPERM; - vmw_user_bo = vmw_user_dma_buffer(bo); - return (vmw_user_bo->base.tfile == tfile || - vmw_user_bo->base.shareable) ? 0 : -EPERM; +/** + * vmw_surface_define_size - Compute fifo size for a surface define command. + * + * @srf: Pointer to a struct vmw_surface + * + * Computes the required size for a surface define command for the definition + * of the surface represented by @srf. + */ +static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) +{ + return sizeof(struct vmw_surface_define) + srf->num_sizes * + sizeof(SVGA3dSize); } -int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + +/** + * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. + * + * Computes the required size for a surface destroy command for the destruction + * of a hw surface. + */ +static inline uint32_t vmw_surface_destroy_size(void) { - struct vmw_private *dev_priv = vmw_priv(dev); - union drm_vmw_alloc_dmabuf_arg *arg = - (union drm_vmw_alloc_dmabuf_arg *)data; - struct drm_vmw_alloc_dmabuf_req *req = &arg->req; - struct drm_vmw_dmabuf_rep *rep = &arg->rep; - struct vmw_dma_buffer *dma_buf; - uint32_t handle; - struct vmw_master *vmaster = vmw_master(file_priv->master); - int ret; + return sizeof(struct vmw_surface_destroy); +} - ret = ttm_read_lock(&vmaster->lock, true); - if (unlikely(ret != 0)) - return ret; +/** + * vmw_surface_destroy_encode - Encode a surface_destroy command. + * + * @id: The surface id + * @cmd_space: Pointer to memory area in which the commands should be encoded. + */ +static void vmw_surface_destroy_encode(uint32_t id, + void *cmd_space) +{ + struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *) + cmd_space; - ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, - req->size, false, &handle, &dma_buf); - if (unlikely(ret != 0)) - goto out_no_dmabuf; + cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; + cmd->header.size = sizeof(cmd->body); + cmd->body.sid = id; +} - rep->handle = handle; - rep->map_handle = dma_buf->base.addr_space_offset; - rep->cur_gmr_id = handle; - rep->cur_gmr_offset = 0; +/** + * vmw_surface_define_encode - Encode a surface_define command. + * + * @srf: Pointer to a struct vmw_surface object. + * @cmd_space: Pointer to memory area in which the commands should be encoded. + */ +static void vmw_surface_define_encode(const struct vmw_surface *srf, + void *cmd_space) +{ + struct vmw_surface_define *cmd = (struct vmw_surface_define *) + cmd_space; + struct drm_vmw_size *src_size; + SVGA3dSize *cmd_size; + uint32_t cmd_len; + int i; + + cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); + + cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; + cmd->header.size = cmd_len; + cmd->body.sid = srf->res.id; + cmd->body.surfaceFlags = srf->flags; + cmd->body.format = cpu_to_le32(srf->format); + for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) + cmd->body.face[i].numMipLevels = srf->mip_levels[i]; + + cmd += 1; + cmd_size = (SVGA3dSize *) cmd; + src_size = srf->sizes; + + for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { + cmd_size->width = src_size->width; + cmd_size->height = src_size->height; + cmd_size->depth = src_size->depth; + } +} + + +/** + * vmw_surface_dma_encode - Encode a surface_dma command. + * + * @srf: Pointer to a struct vmw_surface object. + * @cmd_space: Pointer to memory area in which the commands should be encoded. + * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents + * should be placed or read from. + * @to_surface: Boolean whether to DMA to the surface or from the surface. + */ +static void vmw_surface_dma_encode(struct vmw_surface *srf, + void *cmd_space, + const SVGAGuestPtr *ptr, + bool to_surface) +{ + uint32_t i; + uint32_t bpp = vmw_sf_bpp[srf->format].bpp; + uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp; + struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; + + for (i = 0; i < srf->num_sizes; ++i) { + SVGA3dCmdHeader *header = &cmd->header; + SVGA3dCmdSurfaceDMA *body = &cmd->body; + SVGA3dCopyBox *cb = &cmd->cb; + SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; + const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; + const struct drm_vmw_size *cur_size = &srf->sizes[i]; + + header->id = SVGA_3D_CMD_SURFACE_DMA; + header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); + + body->guest.ptr = *ptr; + body->guest.ptr.offset += cur_offset->bo_offset; + body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3; + body->host.sid = srf->res.id; + body->host.face = cur_offset->face; + body->host.mipmap = cur_offset->mip; + body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : + SVGA3D_READ_HOST_VRAM); + cb->x = 0; + cb->y = 0; + cb->z = 0; + cb->srcx = 0; + cb->srcy = 0; + cb->srcz = 0; + cb->w = cur_size->width; + cb->h = cur_size->height; + cb->d = cur_size->depth; + + suffix->suffixSize = sizeof(*suffix); + suffix->maximumOffset = body->guest.pitch*cur_size->height* + cur_size->depth*bpp / stride_bpp; + suffix->flags.discard = 0; + suffix->flags.unsynchronized = 0; + suffix->flags.reserved = 0; + ++cmd; + } +}; + + +static void vmw_hw_surface_destroy(struct vmw_resource *res) +{ + + struct vmw_private *dev_priv = res->dev_priv; + struct vmw_surface *srf; + void *cmd; + + if (res->id != -1) { + + cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for surface " + "destruction.\n"); + return; + } + + vmw_surface_destroy_encode(res->id, cmd); + vmw_fifo_commit(dev_priv, vmw_surface_destroy_size()); + + /* + * used_memory_size_atomic, or separate lock + * to avoid taking dev_priv::cmdbuf_mutex in + * the destroy path. + */ + + mutex_lock(&dev_priv->cmdbuf_mutex); + srf = container_of(res, struct vmw_surface, res); + dev_priv->used_memory_size -= srf->backup_size; + mutex_unlock(&dev_priv->cmdbuf_mutex); + + } + vmw_3d_resource_dec(dev_priv, false); +} + +void vmw_surface_res_free(struct vmw_resource *res) +{ + struct vmw_surface *srf = container_of(res, struct vmw_surface, res); + + if (srf->backup) + ttm_bo_unref(&srf->backup); + kfree(srf->offsets); + kfree(srf->sizes); + kfree(srf->snooper.image); + kfree(srf); +} + + +/** + * vmw_surface_do_validate - make a surface available to the device. + * + * @dev_priv: Pointer to a device private struct. + * @srf: Pointer to a struct vmw_surface. + * + * If the surface doesn't have a hw id, allocate one, and optionally + * DMA the backed up surface contents to the device. + * + * Returns -EBUSY if there wasn't sufficient device resources to + * complete the validation. Retry after freeing up resources. + * + * May return other errors if the kernel is out of guest resources. + */ +int vmw_surface_do_validate(struct vmw_private *dev_priv, + struct vmw_surface *srf) +{ + struct vmw_resource *res = &srf->res; + struct list_head val_list; + struct ttm_validate_buffer val_buf; + uint32_t submit_size; + uint8_t *cmd; + int ret; + + if (likely(res->id != -1)) + return 0; + + if (unlikely(dev_priv->used_memory_size + srf->backup_size >= + dev_priv->memory_size)) + return -EBUSY; + + /* + * Reserve- and validate the backup DMA bo. + */ + + if (srf->backup) { + INIT_LIST_HEAD(&val_list); + val_buf.bo = ttm_bo_reference(srf->backup); + val_buf.new_sync_obj_arg = (void *)((unsigned long) + DRM_VMW_FENCE_FLAG_EXEC); + list_add_tail(&val_buf.head, &val_list); + ret = ttm_eu_reserve_buffers(&val_list); + if (unlikely(ret != 0)) + goto out_no_reserve; + + ret = ttm_bo_validate(srf->backup, &vmw_srf_placement, + true, false, false); + if (unlikely(ret != 0)) + goto out_no_validate; + } + + /* + * Alloc id for the resource. + */ + + ret = vmw_resource_alloc_id(dev_priv, res); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to allocate a surface id.\n"); + goto out_no_id; + } + if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { + ret = -EBUSY; + goto out_no_fifo; + } - vmw_dmabuf_unreference(&dma_buf); + /* + * Encode surface define- and dma commands. + */ + + submit_size = vmw_surface_define_size(srf); + if (srf->backup) + submit_size += vmw_surface_dma_size(srf); + + cmd = vmw_fifo_reserve(dev_priv, submit_size); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for surface " + "validation.\n"); + ret = -ENOMEM; + goto out_no_fifo; + } + + vmw_surface_define_encode(srf, cmd); + if (srf->backup) { + SVGAGuestPtr ptr; + + cmd += vmw_surface_define_size(srf); + vmw_bo_get_guest_ptr(srf->backup, &ptr); + vmw_surface_dma_encode(srf, cmd, &ptr, true); + } + + vmw_fifo_commit(dev_priv, submit_size); + + /* + * Create a fence object and fence the backup buffer. + */ + + if (srf->backup) { + struct vmw_fence_obj *fence; + + (void) vmw_execbuf_fence_commands(NULL, dev_priv, + &fence, NULL); + ttm_eu_fence_buffer_objects(&val_list, fence); + if (likely(fence != NULL)) + vmw_fence_obj_unreference(&fence); + ttm_bo_unref(&val_buf.bo); + ttm_bo_unref(&srf->backup); + } + + /* + * Surface memory usage accounting. + */ + + dev_priv->used_memory_size += srf->backup_size; + + return 0; + +out_no_fifo: + vmw_resource_release_id(res); +out_no_id: +out_no_validate: + if (srf->backup) + ttm_eu_backoff_reservation(&val_list); +out_no_reserve: + if (srf->backup) + ttm_bo_unref(&val_buf.bo); + return ret; +} + +/** + * vmw_surface_evict - Evict a hw surface. + * + * @dev_priv: Pointer to a device private struct. + * @srf: Pointer to a struct vmw_surface + * + * DMA the contents of a hw surface to a backup guest buffer object, + * and destroy the hw surface, releasing its id. + */ +int vmw_surface_evict(struct vmw_private *dev_priv, + struct vmw_surface *srf) +{ + struct vmw_resource *res = &srf->res; + struct list_head val_list; + struct ttm_validate_buffer val_buf; + uint32_t submit_size; + uint8_t *cmd; + int ret; + struct vmw_fence_obj *fence; + SVGAGuestPtr ptr; + + BUG_ON(res->id == -1); + + /* + * Create a surface backup buffer object. + */ + + if (!srf->backup) { + ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size, + ttm_bo_type_device, + &vmw_srf_placement, 0, 0, true, + NULL, &srf->backup); + if (unlikely(ret != 0)) + return ret; + } + + /* + * Reserve- and validate the backup DMA bo. + */ + + INIT_LIST_HEAD(&val_list); + val_buf.bo = ttm_bo_reference(srf->backup); + val_buf.new_sync_obj_arg = (void *)(unsigned long) + DRM_VMW_FENCE_FLAG_EXEC; + list_add_tail(&val_buf.head, &val_list); + ret = ttm_eu_reserve_buffers(&val_list); + if (unlikely(ret != 0)) + goto out_no_reserve; + + ret = ttm_bo_validate(srf->backup, &vmw_srf_placement, + true, false, false); + if (unlikely(ret != 0)) + goto out_no_validate; + + + /* + * Encode the dma- and surface destroy commands. + */ + + submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size(); + cmd = vmw_fifo_reserve(dev_priv, submit_size); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for surface " + "eviction.\n"); + ret = -ENOMEM; + goto out_no_fifo; + } + + vmw_bo_get_guest_ptr(srf->backup, &ptr); + vmw_surface_dma_encode(srf, cmd, &ptr, false); + cmd += vmw_surface_dma_size(srf); + vmw_surface_destroy_encode(res->id, cmd); + vmw_fifo_commit(dev_priv, submit_size); + + /* + * Surface memory usage accounting. + */ + + dev_priv->used_memory_size -= srf->backup_size; + + /* + * Create a fence object and fence the DMA buffer. + */ + + (void) vmw_execbuf_fence_commands(NULL, dev_priv, + &fence, NULL); + ttm_eu_fence_buffer_objects(&val_list, fence); + if (likely(fence != NULL)) + vmw_fence_obj_unreference(&fence); + ttm_bo_unref(&val_buf.bo); + + /* + * Release the surface ID. + */ + + vmw_resource_release_id(res); + + return 0; + +out_no_fifo: +out_no_validate: + if (srf->backup) + ttm_eu_backoff_reservation(&val_list); +out_no_reserve: + ttm_bo_unref(&val_buf.bo); + ttm_bo_unref(&srf->backup); + return ret; +} + + +/** + * vmw_surface_validate - make a surface available to the device, evicting + * other surfaces if needed. + * + * @dev_priv: Pointer to a device private struct. + * @srf: Pointer to a struct vmw_surface. + * + * Try to validate a surface and if it fails due to limited device resources, + * repeatedly try to evict other surfaces until the request can be + * acommodated. + * + * May return errors if out of resources. + */ +int vmw_surface_validate(struct vmw_private *dev_priv, + struct vmw_surface *srf) +{ + int ret; + struct vmw_surface *evict_srf; + + do { + write_lock(&dev_priv->resource_lock); + list_del_init(&srf->lru_head); + write_unlock(&dev_priv->resource_lock); + + ret = vmw_surface_do_validate(dev_priv, srf); + if (likely(ret != -EBUSY)) + break; + + write_lock(&dev_priv->resource_lock); + if (list_empty(&dev_priv->surface_lru)) { + DRM_ERROR("Out of device memory for surfaces.\n"); + ret = -EBUSY; + write_unlock(&dev_priv->resource_lock); + break; + } + + evict_srf = vmw_surface_reference + (list_first_entry(&dev_priv->surface_lru, + struct vmw_surface, + lru_head)); + list_del_init(&evict_srf->lru_head); + + write_unlock(&dev_priv->resource_lock); + (void) vmw_surface_evict(dev_priv, evict_srf); + + vmw_surface_unreference(&evict_srf); + + } while (1); + + if (unlikely(ret != 0 && srf->res.id != -1)) { + write_lock(&dev_priv->resource_lock); + list_add_tail(&srf->lru_head, &dev_priv->surface_lru); + write_unlock(&dev_priv->resource_lock); + } + + return ret; +} + + +/** + * vmw_surface_remove_from_lists - Remove surface resources from lookup lists + * + * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface + * + * As part of the resource destruction, remove the surface from any + * lookup lists. + */ +static void vmw_surface_remove_from_lists(struct vmw_resource *res) +{ + struct vmw_surface *srf = container_of(res, struct vmw_surface, res); + + list_del_init(&srf->lru_head); +} + +int vmw_surface_init(struct vmw_private *dev_priv, + struct vmw_surface *srf, + void (*res_free) (struct vmw_resource *res)) +{ + int ret; + struct vmw_resource *res = &srf->res; + + BUG_ON(res_free == NULL); + INIT_LIST_HEAD(&srf->lru_head); + ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr, + VMW_RES_SURFACE, true, res_free, + vmw_surface_remove_from_lists); + + if (unlikely(ret != 0)) + res_free(res); + + /* + * The surface won't be visible to hardware until a + * surface validate. + */ + + (void) vmw_3d_resource_inc(dev_priv, false); + vmw_resource_activate(res, vmw_hw_surface_destroy); + return ret; +} + +static void vmw_user_surface_free(struct vmw_resource *res) +{ + struct vmw_surface *srf = container_of(res, struct vmw_surface, res); + struct vmw_user_surface *user_srf = + container_of(srf, struct vmw_user_surface, srf); + struct vmw_private *dev_priv = srf->res.dev_priv; + uint32_t size = user_srf->size; + + if (srf->backup) + ttm_bo_unref(&srf->backup); + kfree(srf->offsets); + kfree(srf->sizes); + kfree(srf->snooper.image); + kfree(user_srf); + ttm_mem_global_free(vmw_mem_glob(dev_priv), size); +} + +/** + * vmw_resource_unreserve - unreserve resources previously reserved for + * command submission. + * + * @list_head: list of resources to unreserve. + * + * Currently only surfaces are considered, and unreserving a surface + * means putting it back on the device's surface lru list, + * so that it can be evicted if necessary. + * This function traverses the resource list and + * checks whether resources are surfaces, and in that case puts them back + * on the device's surface LRU list. + */ +void vmw_resource_unreserve(struct list_head *list) +{ + struct vmw_resource *res; + struct vmw_surface *srf; + rwlock_t *lock = NULL; + + list_for_each_entry(res, list, validate_head) { + + if (res->res_free != &vmw_surface_res_free && + res->res_free != &vmw_user_surface_free) + continue; + + if (unlikely(lock == NULL)) { + lock = &res->dev_priv->resource_lock; + write_lock(lock); + } + + srf = container_of(res, struct vmw_surface, res); + list_del_init(&srf->lru_head); + list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru); + } + + if (lock != NULL) + write_unlock(lock); +} + +/** + * Helper function that looks either a surface or dmabuf. + * + * The pointer this pointed at by out_surf and out_buf needs to be null. + */ +int vmw_user_lookup_handle(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t handle, + struct vmw_surface **out_surf, + struct vmw_dma_buffer **out_buf) +{ + int ret; + + BUG_ON(*out_surf || *out_buf); + + ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf); + if (!ret) + return 0; + + ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); + return ret; +} + + +int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t handle, struct vmw_surface **out) +{ + struct vmw_resource *res; + struct vmw_surface *srf; + struct vmw_user_surface *user_srf; + struct ttm_base_object *base; + int ret = -EINVAL; + + base = ttm_base_object_lookup(tfile, handle); + if (unlikely(base == NULL)) + return -EINVAL; + + if (unlikely(base->object_type != VMW_RES_SURFACE)) + goto out_bad_resource; + + user_srf = container_of(base, struct vmw_user_surface, base); + srf = &user_srf->srf; + res = &srf->res; + + read_lock(&dev_priv->resource_lock); + + if (!res->avail || res->res_free != &vmw_user_surface_free) { + read_unlock(&dev_priv->resource_lock); + goto out_bad_resource; + } + + kref_get(&res->kref); + read_unlock(&dev_priv->resource_lock); + + *out = srf; + ret = 0; + +out_bad_resource: + ttm_base_object_unref(&base); + + return ret; +} + +static void vmw_user_surface_base_release(struct ttm_base_object **p_base) +{ + struct ttm_base_object *base = *p_base; + struct vmw_user_surface *user_srf = + container_of(base, struct vmw_user_surface, base); + struct vmw_resource *res = &user_srf->srf.res; + + *p_base = NULL; + vmw_resource_unreference(&res); +} + +int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + + return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); +} + +int vmw_surface_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_user_surface *user_srf; + struct vmw_surface *srf; + struct vmw_resource *res; + struct vmw_resource *tmp; + union drm_vmw_surface_create_arg *arg = + (union drm_vmw_surface_create_arg *)data; + struct drm_vmw_surface_create_req *req = &arg->req; + struct drm_vmw_surface_arg *rep = &arg->rep; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct drm_vmw_size __user *user_sizes; + int ret; + int i, j; + uint32_t cur_bo_offset; + struct drm_vmw_size *cur_size; + struct vmw_surface_offset *cur_offset; + uint32_t stride_bpp; + uint32_t bpp; + uint32_t num_sizes; + uint32_t size; + struct vmw_master *vmaster = vmw_master(file_priv->master); + + if (unlikely(vmw_user_surface_size == 0)) + vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + + 128; + + num_sizes = 0; + for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) + num_sizes += req->mip_levels[i]; + + if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * + DRM_VMW_MAX_MIP_LEVELS) + return -EINVAL; + + size = vmw_user_surface_size + 128 + + ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + + ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); + + + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + return ret; + + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), + size, false, true); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Out of graphics memory for surface" + " creation.\n"); + goto out_unlock; + } + + user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL); + if (unlikely(user_srf == NULL)) { + ret = -ENOMEM; + goto out_no_user_srf; + } + + srf = &user_srf->srf; + res = &srf->res; + + srf->flags = req->flags; + srf->format = req->format; + srf->scanout = req->scanout; + srf->backup = NULL; + + memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); + srf->num_sizes = num_sizes; + user_srf->size = size; + + srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); + if (unlikely(srf->sizes == NULL)) { + ret = -ENOMEM; + goto out_no_sizes; + } + srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), + GFP_KERNEL); + if (unlikely(srf->sizes == NULL)) { + ret = -ENOMEM; + goto out_no_offsets; + } + + user_sizes = (struct drm_vmw_size __user *)(unsigned long) + req->size_addr; + + ret = copy_from_user(srf->sizes, user_sizes, + srf->num_sizes * sizeof(*srf->sizes)); + if (unlikely(ret != 0)) { + ret = -EFAULT; + goto out_no_copy; + } + + cur_bo_offset = 0; + cur_offset = srf->offsets; + cur_size = srf->sizes; + + bpp = vmw_sf_bpp[srf->format].bpp; + stride_bpp = vmw_sf_bpp[srf->format].s_bpp; + + for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { + for (j = 0; j < srf->mip_levels[i]; ++j) { + uint32_t stride = + (cur_size->width * stride_bpp + 7) >> 3; + + cur_offset->face = i; + cur_offset->mip = j; + cur_offset->bo_offset = cur_bo_offset; + cur_bo_offset += stride * cur_size->height * + cur_size->depth * bpp / stride_bpp; + ++cur_offset; + ++cur_size; + } + } + srf->backup_size = cur_bo_offset; + + if (srf->scanout && + srf->num_sizes == 1 && + srf->sizes[0].width == 64 && + srf->sizes[0].height == 64 && + srf->format == SVGA3D_A8R8G8B8) { + + /* allocate image area and clear it */ + srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL); + if (!srf->snooper.image) { + DRM_ERROR("Failed to allocate cursor_image\n"); + ret = -ENOMEM; + goto out_no_copy; + } + } else { + srf->snooper.image = NULL; + } + srf->snooper.crtc = NULL; + + user_srf->base.shareable = false; + user_srf->base.tfile = NULL; + + /** + * From this point, the generic resource management functions + * destroy the object on failure. + */ + + ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); + if (unlikely(ret != 0)) + goto out_unlock; + + tmp = vmw_resource_reference(&srf->res); + ret = ttm_base_object_init(tfile, &user_srf->base, + req->shareable, VMW_RES_SURFACE, + &vmw_user_surface_base_release, NULL); + + if (unlikely(ret != 0)) { + vmw_resource_unreference(&tmp); + vmw_resource_unreference(&res); + goto out_unlock; + } + + rep->sid = user_srf->base.hash.key; + if (rep->sid == SVGA3D_INVALID_ID) + DRM_ERROR("Created bad Surface ID.\n"); + + vmw_resource_unreference(&res); + + ttm_read_unlock(&vmaster->lock); + return 0; +out_no_copy: + kfree(srf->offsets); +out_no_offsets: + kfree(srf->sizes); +out_no_sizes: + kfree(user_srf); +out_no_user_srf: + ttm_mem_global_free(vmw_mem_glob(dev_priv), size); +out_unlock: + ttm_read_unlock(&vmaster->lock); + return ret; +} + +int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + union drm_vmw_surface_reference_arg *arg = + (union drm_vmw_surface_reference_arg *)data; + struct drm_vmw_surface_arg *req = &arg->req; + struct drm_vmw_surface_create_req *rep = &arg->rep; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_surface *srf; + struct vmw_user_surface *user_srf; + struct drm_vmw_size __user *user_sizes; + struct ttm_base_object *base; + int ret = -EINVAL; + + base = ttm_base_object_lookup(tfile, req->sid); + if (unlikely(base == NULL)) { + DRM_ERROR("Could not find surface to reference.\n"); + return -EINVAL; + } + + if (unlikely(base->object_type != VMW_RES_SURFACE)) + goto out_bad_resource; + + user_srf = container_of(base, struct vmw_user_surface, base); + srf = &user_srf->srf; + + ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); + if (unlikely(ret != 0)) { + DRM_ERROR("Could not add a reference to a surface.\n"); + goto out_no_reference; + } + + rep->flags = srf->flags; + rep->format = srf->format; + memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); + user_sizes = (struct drm_vmw_size __user *)(unsigned long) + rep->size_addr; + + if (user_sizes) + ret = copy_to_user(user_sizes, srf->sizes, + srf->num_sizes * sizeof(*srf->sizes)); + if (unlikely(ret != 0)) { + DRM_ERROR("copy_to_user failed %p %u\n", + user_sizes, srf->num_sizes); + ret = -EFAULT; + } +out_bad_resource: +out_no_reference: + ttm_base_object_unref(&base); + + return ret; +} + +int vmw_surface_check(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t handle, int *id) +{ + struct ttm_base_object *base; + struct vmw_user_surface *user_srf; + + int ret = -EPERM; + + base = ttm_base_object_lookup(tfile, handle); + if (unlikely(base == NULL)) + return -EINVAL; + + if (unlikely(base->object_type != VMW_RES_SURFACE)) + goto out_bad_surface; + + user_srf = container_of(base, struct vmw_user_surface, base); + *id = user_srf->srf.res.id; + ret = 0; + +out_bad_surface: + /** + * FIXME: May deadlock here when called from the + * command parsing code. + */ + + ttm_base_object_unref(&base); + return ret; +} + +/** + * Buffer management. + */ +void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) +{ + struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); + + kfree(vmw_bo); +} + +int vmw_dmabuf_init(struct vmw_private *dev_priv, + struct vmw_dma_buffer *vmw_bo, + size_t size, struct ttm_placement *placement, + bool interruptible, + void (*bo_free) (struct ttm_buffer_object *bo)) +{ + struct ttm_bo_device *bdev = &dev_priv->bdev; + size_t acc_size; + int ret; + + BUG_ON(!bo_free); + + acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); + memset(vmw_bo, 0, sizeof(*vmw_bo)); + + INIT_LIST_HEAD(&vmw_bo->validate_list); + + ret = ttm_bo_init(bdev, &vmw_bo->base, size, + ttm_bo_type_device, placement, + 0, 0, interruptible, + NULL, acc_size, NULL, bo_free); + return ret; +} + +static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) +{ + struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); + + kfree(vmw_user_bo); +} + +static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) +{ + struct vmw_user_dma_buffer *vmw_user_bo; + struct ttm_base_object *base = *p_base; + struct ttm_buffer_object *bo; + + *p_base = NULL; + + if (unlikely(base == NULL)) + return; + + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); + bo = &vmw_user_bo->dma.base; + ttm_bo_unref(&bo); +} + +int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + union drm_vmw_alloc_dmabuf_arg *arg = + (union drm_vmw_alloc_dmabuf_arg *)data; + struct drm_vmw_alloc_dmabuf_req *req = &arg->req; + struct drm_vmw_dmabuf_rep *rep = &arg->rep; + struct vmw_user_dma_buffer *vmw_user_bo; + struct ttm_buffer_object *tmp; + struct vmw_master *vmaster = vmw_master(file_priv->master); + int ret; + + vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); + if (unlikely(vmw_user_bo == NULL)) + return -ENOMEM; + + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) { + kfree(vmw_user_bo); + return ret; + } + + ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, + &vmw_vram_sys_placement, true, + &vmw_user_dmabuf_destroy); + if (unlikely(ret != 0)) + goto out_no_dmabuf; + + tmp = ttm_bo_reference(&vmw_user_bo->dma.base); + ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, + &vmw_user_bo->base, + false, + ttm_buffer_type, + &vmw_user_dmabuf_release, NULL); + if (unlikely(ret != 0)) + goto out_no_base_object; + else { + rep->handle = vmw_user_bo->base.hash.key; + rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; + rep->cur_gmr_id = vmw_user_bo->base.hash.key; + rep->cur_gmr_offset = 0; + } + +out_no_base_object: + ttm_bo_unref(&tmp); out_no_dmabuf: ttm_read_unlock(&vmaster->lock); @@ -524,6 +1657,27 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, TTM_REF_USAGE); } +uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, + uint32_t cur_validate_node) +{ + struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); + + if (likely(vmw_bo->on_validate_list)) + return vmw_bo->cur_validate_node; + + vmw_bo->cur_validate_node = cur_validate_node; + vmw_bo->on_validate_list = true; + + return cur_validate_node; +} + +void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo) +{ + struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); + + vmw_bo->on_validate_list = false; +} + int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, uint32_t handle, struct vmw_dma_buffer **out) { @@ -552,18 +1706,6 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, return 0; } -int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, - struct vmw_dma_buffer *dma_buf) -{ - struct vmw_user_dma_buffer *user_bo; - - if (dma_buf->base.destroy != vmw_user_dmabuf_destroy) - return -EINVAL; - - user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); - return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); -} - /* * Stream management */ @@ -588,8 +1730,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv, struct vmw_resource *res = &stream->res; int ret; - ret = vmw_resource_init(dev_priv, res, false, res_free, - &vmw_stream_func); + ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, + VMW_RES_STREAM, false, res_free, NULL); if (unlikely(ret != 0)) { if (res_free == NULL) @@ -611,13 +1753,17 @@ static int vmw_stream_init(struct vmw_private *dev_priv, return 0; } +/** + * User-space context management: + */ + static void vmw_user_stream_free(struct vmw_resource *res) { struct vmw_user_stream *stream = container_of(res, struct vmw_user_stream, stream.res); struct vmw_private *dev_priv = res->dev_priv; - ttm_base_object_kfree(stream, base); + kfree(stream); ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_user_stream_size); } @@ -646,11 +1792,9 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, struct vmw_user_stream *stream; struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct idr *idr = &dev_priv->res_idr[vmw_res_stream]; int ret = 0; - - res = vmw_resource_lookup(dev_priv, idr, arg->stream_id); + res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id); if (unlikely(res == NULL)) return -EINVAL; @@ -751,8 +1895,7 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv, struct vmw_resource *res; int ret; - res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream], - *inout_id); + res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id); if (unlikely(res == NULL)) return -EINVAL; @@ -847,453 +1990,3 @@ int vmw_dumb_destroy(struct drm_file *file_priv, return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, handle, TTM_REF_USAGE); } - -/** - * vmw_resource_buf_alloc - Allocate a backup buffer for a resource. - * - * @res: The resource for which to allocate a backup buffer. - * @interruptible: Whether any sleeps during allocation should be - * performed while interruptible. - */ -static int vmw_resource_buf_alloc(struct vmw_resource *res, - bool interruptible) -{ - unsigned long size = - (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; - struct vmw_dma_buffer *backup; - int ret; - - if (likely(res->backup)) { - BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size); - return 0; - } - - backup = kzalloc(sizeof(*backup), GFP_KERNEL); - if (unlikely(backup == NULL)) - return -ENOMEM; - - ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, - res->func->backup_placement, - interruptible, - &vmw_dmabuf_bo_free); - if (unlikely(ret != 0)) - goto out_no_dmabuf; - - res->backup = backup; - -out_no_dmabuf: - return ret; -} - -/** - * vmw_resource_do_validate - Make a resource up-to-date and visible - * to the device. - * - * @res: The resource to make visible to the device. - * @val_buf: Information about a buffer possibly - * containing backup data if a bind operation is needed. - * - * On hardware resource shortage, this function returns -EBUSY and - * should be retried once resources have been freed up. - */ -static int vmw_resource_do_validate(struct vmw_resource *res, - struct ttm_validate_buffer *val_buf) -{ - int ret = 0; - const struct vmw_res_func *func = res->func; - - if (unlikely(res->id == -1)) { - ret = func->create(res); - if (unlikely(ret != 0)) - return ret; - } - - if (func->bind && - ((func->needs_backup && list_empty(&res->mob_head) && - val_buf->bo != NULL) || - (!func->needs_backup && val_buf->bo != NULL))) { - ret = func->bind(res, val_buf); - if (unlikely(ret != 0)) - goto out_bind_failed; - if (func->needs_backup) - list_add_tail(&res->mob_head, &res->backup->res_list); - } - - /* - * Only do this on write operations, and move to - * vmw_resource_unreserve if it can be called after - * backup buffers have been unreserved. Otherwise - * sort out locking. - */ - res->res_dirty = true; - - return 0; - -out_bind_failed: - func->destroy(res); - - return ret; -} - -/** - * vmw_resource_unreserve - Unreserve a resource previously reserved for - * command submission. - * - * @res: Pointer to the struct vmw_resource to unreserve. - * @new_backup: Pointer to new backup buffer if command submission - * switched. - * @new_backup_offset: New backup offset if @new_backup is !NULL. - * - * Currently unreserving a resource means putting it back on the device's - * resource lru list, so that it can be evicted if necessary. - */ -void vmw_resource_unreserve(struct vmw_resource *res, - struct vmw_dma_buffer *new_backup, - unsigned long new_backup_offset) -{ - struct vmw_private *dev_priv = res->dev_priv; - - if (!list_empty(&res->lru_head)) - return; - - if (new_backup && new_backup != res->backup) { - - if (res->backup) { - BUG_ON(atomic_read(&res->backup->base.reserved) == 0); - list_del_init(&res->mob_head); - vmw_dmabuf_unreference(&res->backup); - } - - res->backup = vmw_dmabuf_reference(new_backup); - BUG_ON(atomic_read(&new_backup->base.reserved) == 0); - list_add_tail(&res->mob_head, &new_backup->res_list); - } - if (new_backup) - res->backup_offset = new_backup_offset; - - if (!res->func->may_evict) - return; - - write_lock(&dev_priv->resource_lock); - list_add_tail(&res->lru_head, - &res->dev_priv->res_lru[res->func->res_type]); - write_unlock(&dev_priv->resource_lock); -} - -/** - * vmw_resource_check_buffer - Check whether a backup buffer is needed - * for a resource and in that case, allocate - * one, reserve and validate it. - * - * @res: The resource for which to allocate a backup buffer. - * @interruptible: Whether any sleeps during allocation should be - * performed while interruptible. - * @val_buf: On successful return contains data about the - * reserved and validated backup buffer. - */ -int vmw_resource_check_buffer(struct vmw_resource *res, - bool interruptible, - struct ttm_validate_buffer *val_buf) -{ - struct list_head val_list; - bool backup_dirty = false; - int ret; - - if (unlikely(res->backup == NULL)) { - ret = vmw_resource_buf_alloc(res, interruptible); - if (unlikely(ret != 0)) - return ret; - } - - INIT_LIST_HEAD(&val_list); - val_buf->bo = ttm_bo_reference(&res->backup->base); - list_add_tail(&val_buf->head, &val_list); - ret = ttm_eu_reserve_buffers(&val_list); - if (unlikely(ret != 0)) - goto out_no_reserve; - - if (res->func->needs_backup && list_empty(&res->mob_head)) - return 0; - - backup_dirty = res->backup_dirty; - ret = ttm_bo_validate(&res->backup->base, - res->func->backup_placement, - true, false, false); - - if (unlikely(ret != 0)) - goto out_no_validate; - - return 0; - -out_no_validate: - ttm_eu_backoff_reservation(&val_list); -out_no_reserve: - ttm_bo_unref(&val_buf->bo); - if (backup_dirty) - vmw_dmabuf_unreference(&res->backup); - - return ret; -} - -/** - * vmw_resource_reserve - Reserve a resource for command submission - * - * @res: The resource to reserve. - * - * This function takes the resource off the LRU list and make sure - * a backup buffer is present for guest-backed resources. However, - * the buffer may not be bound to the resource at this point. - * - */ -int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) -{ - struct vmw_private *dev_priv = res->dev_priv; - int ret; - - write_lock(&dev_priv->resource_lock); - list_del_init(&res->lru_head); - write_unlock(&dev_priv->resource_lock); - - if (res->func->needs_backup && res->backup == NULL && - !no_backup) { - ret = vmw_resource_buf_alloc(res, true); - if (unlikely(ret != 0)) - return ret; - } - - return 0; -} - -/** - * vmw_resource_backoff_reservation - Unreserve and unreference a - * backup buffer - *. - * @val_buf: Backup buffer information. - */ -void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) -{ - struct list_head val_list; - - if (likely(val_buf->bo == NULL)) - return; - - INIT_LIST_HEAD(&val_list); - list_add_tail(&val_buf->head, &val_list); - ttm_eu_backoff_reservation(&val_list); - ttm_bo_unref(&val_buf->bo); -} - -/** - * vmw_resource_do_evict - Evict a resource, and transfer its data - * to a backup buffer. - * - * @res: The resource to evict. - */ -int vmw_resource_do_evict(struct vmw_resource *res) -{ - struct ttm_validate_buffer val_buf; - const struct vmw_res_func *func = res->func; - int ret; - - BUG_ON(!func->may_evict); - - val_buf.bo = NULL; - ret = vmw_resource_check_buffer(res, true, &val_buf); - if (unlikely(ret != 0)) - return ret; - - if (unlikely(func->unbind != NULL && - (!func->needs_backup || !list_empty(&res->mob_head)))) { - ret = func->unbind(res, res->res_dirty, &val_buf); - if (unlikely(ret != 0)) - goto out_no_unbind; - list_del_init(&res->mob_head); - } - ret = func->destroy(res); - res->backup_dirty = true; - res->res_dirty = false; -out_no_unbind: - vmw_resource_backoff_reservation(&val_buf); - - return ret; -} - - -/** - * vmw_resource_validate - Make a resource up-to-date and visible - * to the device. - * - * @res: The resource to make visible to the device. - * - * On succesful return, any backup DMA buffer pointed to by @res->backup will - * be reserved and validated. - * On hardware resource shortage, this function will repeatedly evict - * resources of the same type until the validation succeeds. - */ -int vmw_resource_validate(struct vmw_resource *res) -{ - int ret; - struct vmw_resource *evict_res; - struct vmw_private *dev_priv = res->dev_priv; - struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; - struct ttm_validate_buffer val_buf; - - if (likely(!res->func->may_evict)) - return 0; - - val_buf.bo = NULL; - if (res->backup) - val_buf.bo = &res->backup->base; - do { - ret = vmw_resource_do_validate(res, &val_buf); - if (likely(ret != -EBUSY)) - break; - - write_lock(&dev_priv->resource_lock); - if (list_empty(lru_list) || !res->func->may_evict) { - DRM_ERROR("Out of device device id entries " - "for %s.\n", res->func->type_name); - ret = -EBUSY; - write_unlock(&dev_priv->resource_lock); - break; - } - - evict_res = vmw_resource_reference - (list_first_entry(lru_list, struct vmw_resource, - lru_head)); - list_del_init(&evict_res->lru_head); - - write_unlock(&dev_priv->resource_lock); - vmw_resource_do_evict(evict_res); - vmw_resource_unreference(&evict_res); - } while (1); - - if (unlikely(ret != 0)) - goto out_no_validate; - else if (!res->func->needs_backup && res->backup) { - list_del_init(&res->mob_head); - vmw_dmabuf_unreference(&res->backup); - } - - return 0; - -out_no_validate: - return ret; -} - -/** - * vmw_fence_single_bo - Utility function to fence a single TTM buffer - * object without unreserving it. - * - * @bo: Pointer to the struct ttm_buffer_object to fence. - * @fence: Pointer to the fence. If NULL, this function will - * insert a fence into the command stream.. - * - * Contrary to the ttm_eu version of this function, it takes only - * a single buffer object instead of a list, and it also doesn't - * unreserve the buffer object, which needs to be done separately. - */ -void vmw_fence_single_bo(struct ttm_buffer_object *bo, - struct vmw_fence_obj *fence) -{ - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_driver *driver = bdev->driver; - struct vmw_fence_obj *old_fence_obj; - struct vmw_private *dev_priv = - container_of(bdev, struct vmw_private, bdev); - - if (fence == NULL) - vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - else - driver->sync_obj_ref(fence); - - spin_lock(&bdev->fence_lock); - - old_fence_obj = bo->sync_obj; - bo->sync_obj = fence; - - spin_unlock(&bdev->fence_lock); - - if (old_fence_obj) - vmw_fence_obj_unreference(&old_fence_obj); -} - -/** - * vmw_resource_move_notify - TTM move_notify_callback - * - * @bo: The TTM buffer object about to move. - * @mem: The truct ttm_mem_reg indicating to what memory - * region the move is taking place. - * - * For now does nothing. - */ -void vmw_resource_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) -{ -} - -/** - * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. - * - * @res: The resource being queried. - */ -bool vmw_resource_needs_backup(const struct vmw_resource *res) -{ - return res->func->needs_backup; -} - -/** - * vmw_resource_evict_type - Evict all resources of a specific type - * - * @dev_priv: Pointer to a device private struct - * @type: The resource type to evict - * - * To avoid thrashing starvation or as part of the hibernation sequence, - * evict all evictable resources of a specific type. - */ -static void vmw_resource_evict_type(struct vmw_private *dev_priv, - enum vmw_res_type type) -{ - struct list_head *lru_list = &dev_priv->res_lru[type]; - struct vmw_resource *evict_res; - - do { - write_lock(&dev_priv->resource_lock); - - if (list_empty(lru_list)) - goto out_unlock; - - evict_res = vmw_resource_reference( - list_first_entry(lru_list, struct vmw_resource, - lru_head)); - list_del_init(&evict_res->lru_head); - write_unlock(&dev_priv->resource_lock); - vmw_resource_do_evict(evict_res); - vmw_resource_unreference(&evict_res); - } while (1); - -out_unlock: - write_unlock(&dev_priv->resource_lock); -} - -/** - * vmw_resource_evict_all - Evict all evictable resources - * - * @dev_priv: Pointer to a device private struct - * - * To avoid thrashing starvation or as part of the hibernation sequence, - * evict all evictable resources. In particular this means that all - * guest-backed resources that are registered with the device are - * evicted and the OTable becomes clean. - */ -void vmw_resource_evict_all(struct vmw_private *dev_priv) -{ - enum vmw_res_type type; - - mutex_lock(&dev_priv->cmdbuf_mutex); - - for (type = 0; type < vmw_res_max; ++type) - vmw_resource_evict_type(dev_priv, type); - - mutex_unlock(&dev_priv->cmdbuf_mutex); -} diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h deleted file mode 100644 index f3adeed2854c..000000000000 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h +++ /dev/null @@ -1,84 +0,0 @@ -/************************************************************************** - * - * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#ifndef _VMWGFX_RESOURCE_PRIV_H_ -#define _VMWGFX_RESOURCE_PRIV_H_ - -#include "vmwgfx_drv.h" - -/** - * struct vmw_user_resource_conv - Identify a derived user-exported resource - * type and provide a function to convert its ttm_base_object pointer to - * a struct vmw_resource - */ -struct vmw_user_resource_conv { - enum ttm_object_type object_type; - struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base); - void (*res_free) (struct vmw_resource *res); -}; - -/** - * struct vmw_res_func - members and functions common for a resource type - * - * @res_type: Enum that identifies the lru list to use for eviction. - * @needs_backup: Whether the resource is guest-backed and needs - * persistent buffer storage. - * @type_name: String that identifies the resource type. - * @backup_placement: TTM placement for backup buffers. - * @may_evict Whether the resource may be evicted. - * @create: Create a hardware resource. - * @destroy: Destroy a hardware resource. - * @bind: Bind a hardware resource to persistent buffer storage. - * @unbind: Unbind a hardware resource from persistent - * buffer storage. - */ - -struct vmw_res_func { - enum vmw_res_type res_type; - bool needs_backup; - const char *type_name; - struct ttm_placement *backup_placement; - bool may_evict; - - int (*create) (struct vmw_resource *res); - int (*destroy) (struct vmw_resource *res); - int (*bind) (struct vmw_resource *res, - struct ttm_validate_buffer *val_buf); - int (*unbind) (struct vmw_resource *res, - bool readback, - struct ttm_validate_buffer *val_buf); -}; - -int vmw_resource_alloc_id(struct vmw_resource *res); -void vmw_resource_release_id(struct vmw_resource *res); -int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, - bool delay_id, - void (*res_free) (struct vmw_resource *res), - const struct vmw_res_func *func); -void vmw_resource_activate(struct vmw_resource *res, - void (*hw_destroy) (struct vmw_resource *)); -#endif diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 26387c3d5a21..6deaf2f8bab1 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -468,7 +468,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) drm_mode_crtc_set_gamma_size(crtc, 256); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.dirty_info_property, 1); @@ -485,7 +485,7 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv) return -EINVAL; } - if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) { + if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) { DRM_INFO("Not using screen objects," " missing cap SCREEN_OBJECT_2\n"); return -ENOSYS; diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c deleted file mode 100644 index 582814339748..000000000000 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ /dev/null @@ -1,893 +0,0 @@ -/************************************************************************** - * - * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#include "vmwgfx_drv.h" -#include "vmwgfx_resource_priv.h" -#include -#include "svga3d_surfacedefs.h" - -/** - * struct vmw_user_surface - User-space visible surface resource - * - * @base: The TTM base object handling user-space visibility. - * @srf: The surface metadata. - * @size: TTM accounting size for the surface. - */ -struct vmw_user_surface { - struct ttm_base_object base; - struct vmw_surface srf; - uint32_t size; - uint32_t backup_handle; -}; - -/** - * struct vmw_surface_offset - Backing store mip level offset info - * - * @face: Surface face. - * @mip: Mip level. - * @bo_offset: Offset into backing store of this mip level. - * - */ -struct vmw_surface_offset { - uint32_t face; - uint32_t mip; - uint32_t bo_offset; -}; - -static void vmw_user_surface_free(struct vmw_resource *res); -static struct vmw_resource * -vmw_user_surface_base_to_res(struct ttm_base_object *base); -static int vmw_legacy_srf_bind(struct vmw_resource *res, - struct ttm_validate_buffer *val_buf); -static int vmw_legacy_srf_unbind(struct vmw_resource *res, - bool readback, - struct ttm_validate_buffer *val_buf); -static int vmw_legacy_srf_create(struct vmw_resource *res); -static int vmw_legacy_srf_destroy(struct vmw_resource *res); - -static const struct vmw_user_resource_conv user_surface_conv = { - .object_type = VMW_RES_SURFACE, - .base_obj_to_res = vmw_user_surface_base_to_res, - .res_free = vmw_user_surface_free -}; - -const struct vmw_user_resource_conv *user_surface_converter = - &user_surface_conv; - - -static uint64_t vmw_user_surface_size; - -static const struct vmw_res_func vmw_legacy_surface_func = { - .res_type = vmw_res_surface, - .needs_backup = false, - .may_evict = true, - .type_name = "legacy surfaces", - .backup_placement = &vmw_srf_placement, - .create = &vmw_legacy_srf_create, - .destroy = &vmw_legacy_srf_destroy, - .bind = &vmw_legacy_srf_bind, - .unbind = &vmw_legacy_srf_unbind -}; - -/** - * struct vmw_surface_dma - SVGA3D DMA command - */ -struct vmw_surface_dma { - SVGA3dCmdHeader header; - SVGA3dCmdSurfaceDMA body; - SVGA3dCopyBox cb; - SVGA3dCmdSurfaceDMASuffix suffix; -}; - -/** - * struct vmw_surface_define - SVGA3D Surface Define command - */ -struct vmw_surface_define { - SVGA3dCmdHeader header; - SVGA3dCmdDefineSurface body; -}; - -/** - * struct vmw_surface_destroy - SVGA3D Surface Destroy command - */ -struct vmw_surface_destroy { - SVGA3dCmdHeader header; - SVGA3dCmdDestroySurface body; -}; - - -/** - * vmw_surface_dma_size - Compute fifo size for a dma command. - * - * @srf: Pointer to a struct vmw_surface - * - * Computes the required size for a surface dma command for backup or - * restoration of the surface represented by @srf. - */ -static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) -{ - return srf->num_sizes * sizeof(struct vmw_surface_dma); -} - - -/** - * vmw_surface_define_size - Compute fifo size for a surface define command. - * - * @srf: Pointer to a struct vmw_surface - * - * Computes the required size for a surface define command for the definition - * of the surface represented by @srf. - */ -static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) -{ - return sizeof(struct vmw_surface_define) + srf->num_sizes * - sizeof(SVGA3dSize); -} - - -/** - * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. - * - * Computes the required size for a surface destroy command for the destruction - * of a hw surface. - */ -static inline uint32_t vmw_surface_destroy_size(void) -{ - return sizeof(struct vmw_surface_destroy); -} - -/** - * vmw_surface_destroy_encode - Encode a surface_destroy command. - * - * @id: The surface id - * @cmd_space: Pointer to memory area in which the commands should be encoded. - */ -static void vmw_surface_destroy_encode(uint32_t id, - void *cmd_space) -{ - struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *) - cmd_space; - - cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; - cmd->header.size = sizeof(cmd->body); - cmd->body.sid = id; -} - -/** - * vmw_surface_define_encode - Encode a surface_define command. - * - * @srf: Pointer to a struct vmw_surface object. - * @cmd_space: Pointer to memory area in which the commands should be encoded. - */ -static void vmw_surface_define_encode(const struct vmw_surface *srf, - void *cmd_space) -{ - struct vmw_surface_define *cmd = (struct vmw_surface_define *) - cmd_space; - struct drm_vmw_size *src_size; - SVGA3dSize *cmd_size; - uint32_t cmd_len; - int i; - - cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); - - cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; - cmd->header.size = cmd_len; - cmd->body.sid = srf->res.id; - cmd->body.surfaceFlags = srf->flags; - cmd->body.format = cpu_to_le32(srf->format); - for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) - cmd->body.face[i].numMipLevels = srf->mip_levels[i]; - - cmd += 1; - cmd_size = (SVGA3dSize *) cmd; - src_size = srf->sizes; - - for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { - cmd_size->width = src_size->width; - cmd_size->height = src_size->height; - cmd_size->depth = src_size->depth; - } -} - -/** - * vmw_surface_dma_encode - Encode a surface_dma command. - * - * @srf: Pointer to a struct vmw_surface object. - * @cmd_space: Pointer to memory area in which the commands should be encoded. - * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents - * should be placed or read from. - * @to_surface: Boolean whether to DMA to the surface or from the surface. - */ -static void vmw_surface_dma_encode(struct vmw_surface *srf, - void *cmd_space, - const SVGAGuestPtr *ptr, - bool to_surface) -{ - uint32_t i; - struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; - const struct svga3d_surface_desc *desc = - svga3dsurface_get_desc(srf->format); - - for (i = 0; i < srf->num_sizes; ++i) { - SVGA3dCmdHeader *header = &cmd->header; - SVGA3dCmdSurfaceDMA *body = &cmd->body; - SVGA3dCopyBox *cb = &cmd->cb; - SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; - const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; - const struct drm_vmw_size *cur_size = &srf->sizes[i]; - - header->id = SVGA_3D_CMD_SURFACE_DMA; - header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); - - body->guest.ptr = *ptr; - body->guest.ptr.offset += cur_offset->bo_offset; - body->guest.pitch = svga3dsurface_calculate_pitch(desc, - cur_size); - body->host.sid = srf->res.id; - body->host.face = cur_offset->face; - body->host.mipmap = cur_offset->mip; - body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : - SVGA3D_READ_HOST_VRAM); - cb->x = 0; - cb->y = 0; - cb->z = 0; - cb->srcx = 0; - cb->srcy = 0; - cb->srcz = 0; - cb->w = cur_size->width; - cb->h = cur_size->height; - cb->d = cur_size->depth; - - suffix->suffixSize = sizeof(*suffix); - suffix->maximumOffset = - svga3dsurface_get_image_buffer_size(desc, cur_size, - body->guest.pitch); - suffix->flags.discard = 0; - suffix->flags.unsynchronized = 0; - suffix->flags.reserved = 0; - ++cmd; - } -}; - - -/** - * vmw_hw_surface_destroy - destroy a Device surface - * - * @res: Pointer to a struct vmw_resource embedded in a struct - * vmw_surface. - * - * Destroys a the device surface associated with a struct vmw_surface if - * any, and adjusts accounting and resource count accordingly. - */ -static void vmw_hw_surface_destroy(struct vmw_resource *res) -{ - - struct vmw_private *dev_priv = res->dev_priv; - struct vmw_surface *srf; - void *cmd; - - if (res->id != -1) { - - cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed reserving FIFO space for surface " - "destruction.\n"); - return; - } - - vmw_surface_destroy_encode(res->id, cmd); - vmw_fifo_commit(dev_priv, vmw_surface_destroy_size()); - - /* - * used_memory_size_atomic, or separate lock - * to avoid taking dev_priv::cmdbuf_mutex in - * the destroy path. - */ - - mutex_lock(&dev_priv->cmdbuf_mutex); - srf = vmw_res_to_srf(res); - dev_priv->used_memory_size -= res->backup_size; - mutex_unlock(&dev_priv->cmdbuf_mutex); - } - vmw_3d_resource_dec(dev_priv, false); -} - -/** - * vmw_legacy_srf_create - Create a device surface as part of the - * resource validation process. - * - * @res: Pointer to a struct vmw_surface. - * - * If the surface doesn't have a hw id. - * - * Returns -EBUSY if there wasn't sufficient device resources to - * complete the validation. Retry after freeing up resources. - * - * May return other errors if the kernel is out of guest resources. - */ -static int vmw_legacy_srf_create(struct vmw_resource *res) -{ - struct vmw_private *dev_priv = res->dev_priv; - struct vmw_surface *srf; - uint32_t submit_size; - uint8_t *cmd; - int ret; - - if (likely(res->id != -1)) - return 0; - - srf = vmw_res_to_srf(res); - if (unlikely(dev_priv->used_memory_size + res->backup_size >= - dev_priv->memory_size)) - return -EBUSY; - - /* - * Alloc id for the resource. - */ - - ret = vmw_resource_alloc_id(res); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed to allocate a surface id.\n"); - goto out_no_id; - } - - if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { - ret = -EBUSY; - goto out_no_fifo; - } - - /* - * Encode surface define- commands. - */ - - submit_size = vmw_surface_define_size(srf); - cmd = vmw_fifo_reserve(dev_priv, submit_size); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed reserving FIFO space for surface " - "creation.\n"); - ret = -ENOMEM; - goto out_no_fifo; - } - - vmw_surface_define_encode(srf, cmd); - vmw_fifo_commit(dev_priv, submit_size); - /* - * Surface memory usage accounting. - */ - - dev_priv->used_memory_size += res->backup_size; - return 0; - -out_no_fifo: - vmw_resource_release_id(res); -out_no_id: - return ret; -} - -/** - * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface. - * - * @res: Pointer to a struct vmw_res embedded in a struct - * vmw_surface. - * @val_buf: Pointer to a struct ttm_validate_buffer containing - * information about the backup buffer. - * @bind: Boolean wether to DMA to the surface. - * - * Transfer backup data to or from a legacy surface as part of the - * validation process. - * May return other errors if the kernel is out of guest resources. - * The backup buffer will be fenced or idle upon successful completion, - * and if the surface needs persistent backup storage, the backup buffer - * will also be returned reserved iff @bind is true. - */ -static int vmw_legacy_srf_dma(struct vmw_resource *res, - struct ttm_validate_buffer *val_buf, - bool bind) -{ - SVGAGuestPtr ptr; - struct vmw_fence_obj *fence; - uint32_t submit_size; - struct vmw_surface *srf = vmw_res_to_srf(res); - uint8_t *cmd; - struct vmw_private *dev_priv = res->dev_priv; - - BUG_ON(val_buf->bo == NULL); - - submit_size = vmw_surface_dma_size(srf); - cmd = vmw_fifo_reserve(dev_priv, submit_size); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed reserving FIFO space for surface " - "DMA.\n"); - return -ENOMEM; - } - vmw_bo_get_guest_ptr(val_buf->bo, &ptr); - vmw_surface_dma_encode(srf, cmd, &ptr, bind); - - vmw_fifo_commit(dev_priv, submit_size); - - /* - * Create a fence object and fence the backup buffer. - */ - - (void) vmw_execbuf_fence_commands(NULL, dev_priv, - &fence, NULL); - - vmw_fence_single_bo(val_buf->bo, fence); - - if (likely(fence != NULL)) - vmw_fence_obj_unreference(&fence); - - return 0; -} - -/** - * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the - * surface validation process. - * - * @res: Pointer to a struct vmw_res embedded in a struct - * vmw_surface. - * @val_buf: Pointer to a struct ttm_validate_buffer containing - * information about the backup buffer. - * - * This function will copy backup data to the surface if the - * backup buffer is dirty. - */ -static int vmw_legacy_srf_bind(struct vmw_resource *res, - struct ttm_validate_buffer *val_buf) -{ - if (!res->backup_dirty) - return 0; - - return vmw_legacy_srf_dma(res, val_buf, true); -} - - -/** - * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the - * surface eviction process. - * - * @res: Pointer to a struct vmw_res embedded in a struct - * vmw_surface. - * @val_buf: Pointer to a struct ttm_validate_buffer containing - * information about the backup buffer. - * - * This function will copy backup data from the surface. - */ -static int vmw_legacy_srf_unbind(struct vmw_resource *res, - bool readback, - struct ttm_validate_buffer *val_buf) -{ - if (unlikely(readback)) - return vmw_legacy_srf_dma(res, val_buf, false); - return 0; -} - -/** - * vmw_legacy_srf_destroy - Destroy a device surface as part of a - * resource eviction process. - * - * @res: Pointer to a struct vmw_res embedded in a struct - * vmw_surface. - */ -static int vmw_legacy_srf_destroy(struct vmw_resource *res) -{ - struct vmw_private *dev_priv = res->dev_priv; - uint32_t submit_size; - uint8_t *cmd; - - BUG_ON(res->id == -1); - - /* - * Encode the dma- and surface destroy commands. - */ - - submit_size = vmw_surface_destroy_size(); - cmd = vmw_fifo_reserve(dev_priv, submit_size); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed reserving FIFO space for surface " - "eviction.\n"); - return -ENOMEM; - } - - vmw_surface_destroy_encode(res->id, cmd); - vmw_fifo_commit(dev_priv, submit_size); - - /* - * Surface memory usage accounting. - */ - - dev_priv->used_memory_size -= res->backup_size; - - /* - * Release the surface ID. - */ - - vmw_resource_release_id(res); - - return 0; -} - - -/** - * vmw_surface_init - initialize a struct vmw_surface - * - * @dev_priv: Pointer to a device private struct. - * @srf: Pointer to the struct vmw_surface to initialize. - * @res_free: Pointer to a resource destructor used to free - * the object. - */ -static int vmw_surface_init(struct vmw_private *dev_priv, - struct vmw_surface *srf, - void (*res_free) (struct vmw_resource *res)) -{ - int ret; - struct vmw_resource *res = &srf->res; - - BUG_ON(res_free == NULL); - (void) vmw_3d_resource_inc(dev_priv, false); - ret = vmw_resource_init(dev_priv, res, true, res_free, - &vmw_legacy_surface_func); - - if (unlikely(ret != 0)) { - vmw_3d_resource_dec(dev_priv, false); - res_free(res); - return ret; - } - - /* - * The surface won't be visible to hardware until a - * surface validate. - */ - - vmw_resource_activate(res, vmw_hw_surface_destroy); - return ret; -} - -/** - * vmw_user_surface_base_to_res - TTM base object to resource converter for - * user visible surfaces - * - * @base: Pointer to a TTM base object - * - * Returns the struct vmw_resource embedded in a struct vmw_surface - * for the user-visible object identified by the TTM base object @base. - */ -static struct vmw_resource * -vmw_user_surface_base_to_res(struct ttm_base_object *base) -{ - return &(container_of(base, struct vmw_user_surface, base)->srf.res); -} - -/** - * vmw_user_surface_free - User visible surface resource destructor - * - * @res: A struct vmw_resource embedded in a struct vmw_surface. - */ -static void vmw_user_surface_free(struct vmw_resource *res) -{ - struct vmw_surface *srf = vmw_res_to_srf(res); - struct vmw_user_surface *user_srf = - container_of(srf, struct vmw_user_surface, srf); - struct vmw_private *dev_priv = srf->res.dev_priv; - uint32_t size = user_srf->size; - - kfree(srf->offsets); - kfree(srf->sizes); - kfree(srf->snooper.image); - ttm_base_object_kfree(user_srf, base); - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); -} - -/** - * vmw_user_surface_free - User visible surface TTM base object destructor - * - * @p_base: Pointer to a pointer to a TTM base object - * embedded in a struct vmw_user_surface. - * - * Drops the base object's reference on its resource, and the - * pointer pointed to by *p_base is set to NULL. - */ -static void vmw_user_surface_base_release(struct ttm_base_object **p_base) -{ - struct ttm_base_object *base = *p_base; - struct vmw_user_surface *user_srf = - container_of(base, struct vmw_user_surface, base); - struct vmw_resource *res = &user_srf->srf.res; - - *p_base = NULL; - vmw_resource_unreference(&res); -} - -/** - * vmw_user_surface_destroy_ioctl - Ioctl function implementing - * the user surface destroy functionality. - * - * @dev: Pointer to a struct drm_device. - * @data: Pointer to data copied from / to user-space. - * @file_priv: Pointer to a drm file private structure. - */ -int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - - return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); -} - -/** - * vmw_user_surface_define_ioctl - Ioctl function implementing - * the user surface define functionality. - * - * @dev: Pointer to a struct drm_device. - * @data: Pointer to data copied from / to user-space. - * @file_priv: Pointer to a drm file private structure. - */ -int vmw_surface_define_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct vmw_private *dev_priv = vmw_priv(dev); - struct vmw_user_surface *user_srf; - struct vmw_surface *srf; - struct vmw_resource *res; - struct vmw_resource *tmp; - union drm_vmw_surface_create_arg *arg = - (union drm_vmw_surface_create_arg *)data; - struct drm_vmw_surface_create_req *req = &arg->req; - struct drm_vmw_surface_arg *rep = &arg->rep; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct drm_vmw_size __user *user_sizes; - int ret; - int i, j; - uint32_t cur_bo_offset; - struct drm_vmw_size *cur_size; - struct vmw_surface_offset *cur_offset; - uint32_t num_sizes; - uint32_t size; - struct vmw_master *vmaster = vmw_master(file_priv->master); - const struct svga3d_surface_desc *desc; - - if (unlikely(vmw_user_surface_size == 0)) - vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + - 128; - - num_sizes = 0; - for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) - num_sizes += req->mip_levels[i]; - - if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * - DRM_VMW_MAX_MIP_LEVELS) - return -EINVAL; - - size = vmw_user_surface_size + 128 + - ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + - ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); - - - desc = svga3dsurface_get_desc(req->format); - if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { - DRM_ERROR("Invalid surface format for surface creation.\n"); - return -EINVAL; - } - - ret = ttm_read_lock(&vmaster->lock, true); - if (unlikely(ret != 0)) - return ret; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - size, false, true); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for surface" - " creation.\n"); - goto out_unlock; - } - - user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); - if (unlikely(user_srf == NULL)) { - ret = -ENOMEM; - goto out_no_user_srf; - } - - srf = &user_srf->srf; - res = &srf->res; - - srf->flags = req->flags; - srf->format = req->format; - srf->scanout = req->scanout; - - memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); - srf->num_sizes = num_sizes; - user_srf->size = size; - - srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); - if (unlikely(srf->sizes == NULL)) { - ret = -ENOMEM; - goto out_no_sizes; - } - srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), - GFP_KERNEL); - if (unlikely(srf->sizes == NULL)) { - ret = -ENOMEM; - goto out_no_offsets; - } - - user_sizes = (struct drm_vmw_size __user *)(unsigned long) - req->size_addr; - - ret = copy_from_user(srf->sizes, user_sizes, - srf->num_sizes * sizeof(*srf->sizes)); - if (unlikely(ret != 0)) { - ret = -EFAULT; - goto out_no_copy; - } - - srf->base_size = *srf->sizes; - srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; - srf->multisample_count = 1; - - cur_bo_offset = 0; - cur_offset = srf->offsets; - cur_size = srf->sizes; - - for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { - for (j = 0; j < srf->mip_levels[i]; ++j) { - uint32_t stride = svga3dsurface_calculate_pitch - (desc, cur_size); - - cur_offset->face = i; - cur_offset->mip = j; - cur_offset->bo_offset = cur_bo_offset; - cur_bo_offset += svga3dsurface_get_image_buffer_size - (desc, cur_size, stride); - ++cur_offset; - ++cur_size; - } - } - res->backup_size = cur_bo_offset; - if (srf->scanout && - srf->num_sizes == 1 && - srf->sizes[0].width == 64 && - srf->sizes[0].height == 64 && - srf->format == SVGA3D_A8R8G8B8) { - - srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); - /* clear the image */ - if (srf->snooper.image) { - memset(srf->snooper.image, 0x00, 64 * 64 * 4); - } else { - DRM_ERROR("Failed to allocate cursor_image\n"); - ret = -ENOMEM; - goto out_no_copy; - } - } else { - srf->snooper.image = NULL; - } - srf->snooper.crtc = NULL; - - user_srf->base.shareable = false; - user_srf->base.tfile = NULL; - - /** - * From this point, the generic resource management functions - * destroy the object on failure. - */ - - ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); - if (unlikely(ret != 0)) - goto out_unlock; - - tmp = vmw_resource_reference(&srf->res); - ret = ttm_base_object_init(tfile, &user_srf->base, - req->shareable, VMW_RES_SURFACE, - &vmw_user_surface_base_release, NULL); - - if (unlikely(ret != 0)) { - vmw_resource_unreference(&tmp); - vmw_resource_unreference(&res); - goto out_unlock; - } - - rep->sid = user_srf->base.hash.key; - vmw_resource_unreference(&res); - - ttm_read_unlock(&vmaster->lock); - return 0; -out_no_copy: - kfree(srf->offsets); -out_no_offsets: - kfree(srf->sizes); -out_no_sizes: - ttm_base_object_kfree(user_srf, base); -out_no_user_srf: - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); -out_unlock: - ttm_read_unlock(&vmaster->lock); - return ret; -} - -/** - * vmw_user_surface_define_ioctl - Ioctl function implementing - * the user surface reference functionality. - * - * @dev: Pointer to a struct drm_device. - * @data: Pointer to data copied from / to user-space. - * @file_priv: Pointer to a drm file private structure. - */ -int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - union drm_vmw_surface_reference_arg *arg = - (union drm_vmw_surface_reference_arg *)data; - struct drm_vmw_surface_arg *req = &arg->req; - struct drm_vmw_surface_create_req *rep = &arg->rep; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct vmw_surface *srf; - struct vmw_user_surface *user_srf; - struct drm_vmw_size __user *user_sizes; - struct ttm_base_object *base; - int ret = -EINVAL; - - base = ttm_base_object_lookup(tfile, req->sid); - if (unlikely(base == NULL)) { - DRM_ERROR("Could not find surface to reference.\n"); - return -EINVAL; - } - - if (unlikely(base->object_type != VMW_RES_SURFACE)) - goto out_bad_resource; - - user_srf = container_of(base, struct vmw_user_surface, base); - srf = &user_srf->srf; - - ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); - if (unlikely(ret != 0)) { - DRM_ERROR("Could not add a reference to a surface.\n"); - goto out_no_reference; - } - - rep->flags = srf->flags; - rep->format = srf->format; - memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); - user_sizes = (struct drm_vmw_size __user *)(unsigned long) - rep->size_addr; - - if (user_sizes) - ret = copy_to_user(user_sizes, srf->sizes, - srf->num_sizes * sizeof(*srf->sizes)); - if (unlikely(ret != 0)) { - DRM_ERROR("copy_to_user failed %p %u\n", - user_sizes, srf->num_sizes); - ret = -EFAULT; - } -out_bad_resource: -out_no_reference: - ttm_base_object_unref(&base); - - return ret; -} diff --git a/trunk/drivers/gpu/vga/vga_switcheroo.c b/trunk/drivers/gpu/vga/vga_switcheroo.c index fa60add0ff63..e25cf31faab2 100644 --- a/trunk/drivers/gpu/vga/vga_switcheroo.c +++ b/trunk/drivers/gpu/vga/vga_switcheroo.c @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -375,6 +376,7 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char usercmd[64]; + const char *pdev_name; int ret; bool delay = false, can_switch; bool just_mux = false; @@ -466,6 +468,7 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, goto out; if (can_switch) { + pdev_name = pci_name(client->pdev); ret = vga_switchto_stage1(client); if (ret) printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret); @@ -537,6 +540,7 @@ static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv) int vga_switcheroo_process_delayed_switch(void) { struct vga_switcheroo_client *client; + const char *pdev_name; int ret; int err = -EINVAL; @@ -551,6 +555,7 @@ int vga_switcheroo_process_delayed_switch(void) if (!client || !check_can_switch()) goto err; + pdev_name = pci_name(client->pdev); ret = vga_switchto_stage2(client); if (ret) printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret); @@ -562,3 +567,4 @@ int vga_switcheroo_process_delayed_switch(void) return err; } EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); + diff --git a/trunk/drivers/hid/hid-microsoft.c b/trunk/drivers/hid/hid-microsoft.c index f676c01bb471..6fcd466d0825 100644 --- a/trunk/drivers/hid/hid-microsoft.c +++ b/trunk/drivers/hid/hid-microsoft.c @@ -46,9 +46,9 @@ static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc, rdesc[559] = 0x45; } /* the same as above (s/usage/physical/) */ - if ((quirks & MS_RDESC_3K) && *rsize == 106 && - !memcmp((char []){ 0x19, 0x00, 0x29, 0xff }, - &rdesc[94], 4)) { + if ((quirks & MS_RDESC_3K) && *rsize == 106 && rdesc[94] == 0x19 && + rdesc[95] == 0x00 && rdesc[96] == 0x29 && + rdesc[97] == 0xff) { rdesc[94] = 0x35; rdesc[96] = 0x45; } diff --git a/trunk/drivers/hid/hidraw.c b/trunk/drivers/hid/hidraw.c index 17d15bb610d1..7c47fc3f7b2b 100644 --- a/trunk/drivers/hid/hidraw.c +++ b/trunk/drivers/hid/hidraw.c @@ -42,7 +42,6 @@ static struct cdev hidraw_cdev; static struct class *hidraw_class; static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES]; static DEFINE_MUTEX(minors_lock); -static void drop_ref(struct hidraw *hid, int exists_bit); static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { @@ -114,7 +113,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, __u8 *buf; int ret = 0; - if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { + if (!hidraw_table[minor]) { ret = -ENODEV; goto out; } @@ -262,7 +261,7 @@ static int hidraw_open(struct inode *inode, struct file *file) } mutex_lock(&minors_lock); - if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { + if (!hidraw_table[minor]) { err = -ENODEV; goto out_unlock; } @@ -299,12 +298,36 @@ static int hidraw_open(struct inode *inode, struct file *file) static int hidraw_release(struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); + struct hidraw *dev; struct hidraw_list *list = file->private_data; + int ret; + int i; + + mutex_lock(&minors_lock); + if (!hidraw_table[minor]) { + ret = -ENODEV; + goto unlock; + } - drop_ref(hidraw_table[minor], 0); list_del(&list->node); + dev = hidraw_table[minor]; + if (!--dev->open) { + if (list->hidraw->exist) { + hid_hw_power(dev->hid, PM_HINT_NORMAL); + hid_hw_close(dev->hid); + } else { + kfree(list->hidraw); + } + } + + for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i) + kfree(list->buffer[i].value); kfree(list); - return 0; + ret = 0; +unlock: + mutex_unlock(&minors_lock); + + return ret; } static long hidraw_ioctl(struct file *file, unsigned int cmd, @@ -506,7 +529,21 @@ EXPORT_SYMBOL_GPL(hidraw_connect); void hidraw_disconnect(struct hid_device *hid) { struct hidraw *hidraw = hid->hidraw; - drop_ref(hidraw, 1); + + mutex_lock(&minors_lock); + hidraw->exist = 0; + + device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); + + hidraw_table[hidraw->minor] = NULL; + + if (hidraw->open) { + hid_hw_close(hid); + wake_up_interruptible(&hidraw->wait); + } else { + kfree(hidraw); + } + mutex_unlock(&minors_lock); } EXPORT_SYMBOL_GPL(hidraw_disconnect); @@ -555,23 +592,3 @@ void hidraw_exit(void) unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES); } - -static void drop_ref(struct hidraw *hidraw, int exists_bit) -{ - mutex_lock(&minors_lock); - if (exists_bit) { - hid_hw_close(hidraw->hid); - hidraw->exist = 0; - if (hidraw->open) - wake_up_interruptible(&hidraw->wait); - } else { - --hidraw->open; - } - - if (!hidraw->open && !hidraw->exist) { - device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); - hidraw_table[hidraw->minor] = NULL; - kfree(hidraw); - } - mutex_unlock(&minors_lock); -} diff --git a/trunk/drivers/hwmon/asb100.c b/trunk/drivers/hwmon/asb100.c index a227be47149f..520e5bf4f76d 100644 --- a/trunk/drivers/hwmon/asb100.c +++ b/trunk/drivers/hwmon/asb100.c @@ -32,7 +32,7 @@ * ASB100-A supports pwm1, while plain ASB100 does not. There is no known * way for the driver to tell which one is there. * - * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA + * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA * asb100 7 3 1 4 0x31 0x0694 yes no */ diff --git a/trunk/drivers/hwmon/w83627ehf.c b/trunk/drivers/hwmon/w83627ehf.c index 1821b7423d5b..de3c7e04c3b5 100644 --- a/trunk/drivers/hwmon/w83627ehf.c +++ b/trunk/drivers/hwmon/w83627ehf.c @@ -2083,6 +2083,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) mutex_init(&data->lock); mutex_init(&data->update_lock); data->name = w83627ehf_device_names[sio_data->kind]; + data->bank = 0xff; /* Force initial bank selection */ platform_set_drvdata(pdev, data); /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */ diff --git a/trunk/drivers/hwmon/w83627hf.c b/trunk/drivers/hwmon/w83627hf.c index 5b1a6a666441..af1589908709 100644 --- a/trunk/drivers/hwmon/w83627hf.c +++ b/trunk/drivers/hwmon/w83627hf.c @@ -25,7 +25,7 @@ /* * Supports following chips: * - * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA + * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA * w83627hf 9 3 2 3 0x20 0x5ca3 no yes(LPC) * w83627thf 7 3 3 3 0x90 0x5ca3 no yes(LPC) * w83637hf 7 3 3 3 0x80 0x5ca3 no yes(LPC) diff --git a/trunk/drivers/hwmon/w83781d.c b/trunk/drivers/hwmon/w83781d.c index 5a5046d94c3e..20f11d31da40 100644 --- a/trunk/drivers/hwmon/w83781d.c +++ b/trunk/drivers/hwmon/w83781d.c @@ -24,7 +24,7 @@ /* * Supports following chips: * - * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA + * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA * as99127f 7 3 0 3 0x31 0x12c3 yes no * as99127f rev.2 (type_name = as99127f) 0x31 0x5ca3 yes no * w83781d 7 3 0 3 0x10-1 0x5ca3 yes yes diff --git a/trunk/drivers/hwmon/w83791d.c b/trunk/drivers/hwmon/w83791d.c index 39ab7bcc616e..ed397c645198 100644 --- a/trunk/drivers/hwmon/w83791d.c +++ b/trunk/drivers/hwmon/w83791d.c @@ -22,7 +22,7 @@ /* * Supports following chips: * - * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA + * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA * w83791d 10 5 5 3 0x71 0x5ca3 yes no * * The w83791d chip appears to be part way between the 83781d and the diff --git a/trunk/drivers/hwmon/w83792d.c b/trunk/drivers/hwmon/w83792d.c index 053645279f38..301942d08453 100644 --- a/trunk/drivers/hwmon/w83792d.c +++ b/trunk/drivers/hwmon/w83792d.c @@ -31,7 +31,7 @@ /* * Supports following chips: * - * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA + * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA * w83792d 9 7 7 3 0x7a 0x5ca3 yes no */ diff --git a/trunk/drivers/hwmon/w83l786ng.c b/trunk/drivers/hwmon/w83l786ng.c index f0e8286c3c70..79710bcac2f7 100644 --- a/trunk/drivers/hwmon/w83l786ng.c +++ b/trunk/drivers/hwmon/w83l786ng.c @@ -20,7 +20,7 @@ /* * Supports following chips: * - * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA + * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA * w83l786ng 3 2 2 2 0x7b 0x5ca3 yes no */ diff --git a/trunk/drivers/i2c/busses/i2c-at91.c b/trunk/drivers/i2c/busses/i2c-at91.c index aa59a254be2c..c02bf208084f 100644 --- a/trunk/drivers/i2c/busses/i2c-at91.c +++ b/trunk/drivers/i2c/busses/i2c-at91.c @@ -39,6 +39,7 @@ #define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */ #define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */ #define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */ +#define AT91_TWI_QUICK 0x0040 /* SMBus quick command */ #define AT91_TWI_SWRST 0x0080 /* Software Reset */ #define AT91_TWI_MMR 0x0004 /* Master Mode Register */ @@ -212,7 +213,11 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) INIT_COMPLETION(dev->cmd_complete); dev->transfer_status = 0; - if (dev->msg->flags & I2C_M_RD) { + + if (!dev->buf_len) { + at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK); + at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); + } else if (dev->msg->flags & I2C_M_RD) { unsigned start_flags = AT91_TWI_START; if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) { diff --git a/trunk/drivers/i2c/busses/i2c-mxs.c b/trunk/drivers/i2c/busses/i2c-mxs.c index 286ca1917820..0670da79ee5e 100644 --- a/trunk/drivers/i2c/busses/i2c-mxs.c +++ b/trunk/drivers/i2c/busses/i2c-mxs.c @@ -287,12 +287,14 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap, select_init_dma_fail: dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE); select_init_pio_fail: + dmaengine_terminate_all(i2c->dmach); return -EINVAL; /* Write failpath. */ write_init_dma_fail: dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE); write_init_pio_fail: + dmaengine_terminate_all(i2c->dmach); return -EINVAL; } diff --git a/trunk/drivers/i2c/busses/i2c-omap.c b/trunk/drivers/i2c/busses/i2c-omap.c index db31eaed6ea5..3525c9e62cb0 100644 --- a/trunk/drivers/i2c/busses/i2c-omap.c +++ b/trunk/drivers/i2c/busses/i2c-omap.c @@ -43,7 +43,6 @@ #include #include #include -#include /* I2C controller revisions */ #define OMAP_I2C_OMAP1_REV_2 0x20 @@ -187,8 +186,9 @@ struct omap_i2c_dev { int reg_shift; /* bit shift for I2C register addresses */ struct completion cmd_complete; struct resource *ioarea; - u32 latency; /* maximum MPU wkup latency */ - struct pm_qos_request pm_qos_request; + u32 latency; /* maximum mpu wkup latency */ + void (*set_mpu_wkup_lat)(struct device *dev, + long latency); u32 speed; /* Speed of bus in kHz */ u32 dtrev; /* extra revision from DT */ u32 flags; @@ -494,7 +494,9 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx) dev->b_hw = 1; /* Enable hardware fixes */ /* calculate wakeup latency constraint for MPU */ - dev->latency = (1000000 * dev->threshold) / (1000 * dev->speed / 8); + if (dev->set_mpu_wkup_lat != NULL) + dev->latency = (1000000 * dev->threshold) / + (1000 * dev->speed / 8); } /* @@ -522,6 +524,9 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, dev->buf = msg->buf; dev->buf_len = msg->len; + /* make sure writes to dev->buf_len are ordered */ + barrier(); + omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len); /* Clear the FIFO Buffers */ @@ -579,7 +584,6 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, */ timeout = wait_for_completion_timeout(&dev->cmd_complete, OMAP_I2C_TIMEOUT); - dev->buf_len = 0; if (timeout == 0) { dev_err(dev->dev, "controller timed out\n"); omap_i2c_init(dev); @@ -629,16 +633,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) if (r < 0) goto out; - /* - * When waiting for completion of a i2c transfer, we need to - * set a wake up latency constraint for the MPU. This is to - * ensure quick enough wakeup from idle, when transfer - * completes. - */ - if (dev->latency) - pm_qos_add_request(&dev->pm_qos_request, - PM_QOS_CPU_DMA_LATENCY, - dev->latency); + if (dev->set_mpu_wkup_lat != NULL) + dev->set_mpu_wkup_lat(dev->dev, dev->latency); for (i = 0; i < num; i++) { r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1))); @@ -646,8 +642,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) break; } - if (dev->latency) - pm_qos_remove_request(&dev->pm_qos_request); + if (dev->set_mpu_wkup_lat != NULL) + dev->set_mpu_wkup_lat(dev->dev, -1); if (r == 0) r = num; @@ -1104,6 +1100,7 @@ omap_i2c_probe(struct platform_device *pdev) } else if (pdata != NULL) { dev->speed = pdata->clkrate; dev->flags = pdata->flags; + dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat; dev->dtrev = pdata->rev; } @@ -1159,8 +1156,9 @@ omap_i2c_probe(struct platform_device *pdev) dev->b_hw = 1; /* Enable hardware fixes */ /* calculate wakeup latency constraint for MPU */ - dev->latency = (1000000 * dev->fifo_size) / - (1000 * dev->speed / 8); + if (dev->set_mpu_wkup_lat != NULL) + dev->latency = (1000000 * dev->fifo_size) / + (1000 * dev->speed / 8); } /* reset ASAP, clearing any IRQs */ diff --git a/trunk/drivers/i2c/busses/i2c-s3c2410.c b/trunk/drivers/i2c/busses/i2c-s3c2410.c index 3e0335f1fc60..9d902725bac9 100644 --- a/trunk/drivers/i2c/busses/i2c-s3c2410.c +++ b/trunk/drivers/i2c/busses/i2c-s3c2410.c @@ -806,6 +806,7 @@ static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c) dev_err(i2c->dev, "invalid gpio[%d]: %d\n", idx, gpio); goto free_gpio; } + i2c->gpios[idx] = gpio; ret = gpio_request(gpio, "i2c-bus"); if (ret) { diff --git a/trunk/drivers/i2c/muxes/i2c-mux-pinctrl.c b/trunk/drivers/i2c/muxes/i2c-mux-pinctrl.c index 5f097f309b9f..7fa5b24b16db 100644 --- a/trunk/drivers/i2c/muxes/i2c-mux-pinctrl.c +++ b/trunk/drivers/i2c/muxes/i2c-mux-pinctrl.c @@ -169,7 +169,7 @@ static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev) mux->busses = devm_kzalloc(&pdev->dev, sizeof(mux->busses) * mux->pdata->bus_count, GFP_KERNEL); - if (!mux->states) { + if (!mux->busses) { dev_err(&pdev->dev, "Cannot allocate busses\n"); ret = -ENOMEM; goto err; diff --git a/trunk/drivers/input/input-mt.c b/trunk/drivers/input/input-mt.c index c0ec7d42c3be..1abbc170d8b7 100644 --- a/trunk/drivers/input/input-mt.c +++ b/trunk/drivers/input/input-mt.c @@ -26,10 +26,14 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src) * input_mt_init_slots() - initialize MT input slots * @dev: input device supporting MT events and finger tracking * @num_slots: number of slots used by the device + * @flags: mt tasks to handle in core * * This function allocates all necessary memory for MT slot handling * in the input device, prepares the ABS_MT_SLOT and * ABS_MT_TRACKING_ID events for use and sets up appropriate buffers. + * Depending on the flags set, it also performs pointer emulation and + * frame synchronization. + * * May be called repeatedly. Returns -EINVAL if attempting to * reinitialize with a different number of slots. */ diff --git a/trunk/drivers/input/mousedev.c b/trunk/drivers/input/mousedev.c index 8f02e3d0e712..4c842c320c2e 100644 --- a/trunk/drivers/input/mousedev.c +++ b/trunk/drivers/input/mousedev.c @@ -12,8 +12,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MOUSEDEV_MINOR_BASE 32 -#define MOUSEDEV_MINORS 32 -#define MOUSEDEV_MIX 31 +#define MOUSEDEV_MINORS 31 +#define MOUSEDEV_MIX 63 #include #include diff --git a/trunk/drivers/input/touchscreen/ads7846.c b/trunk/drivers/input/touchscreen/ads7846.c index f02028ec3db6..78e5d9ab0ba7 100644 --- a/trunk/drivers/input/touchscreen/ads7846.c +++ b/trunk/drivers/input/touchscreen/ads7846.c @@ -955,7 +955,8 @@ static int ads7846_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume); -static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads7846 *ts) +static int __devinit ads7846_setup_pendown(struct spi_device *spi, + struct ads7846 *ts) { struct ads7846_platform_data *pdata = spi->dev.platform_data; int err; @@ -981,6 +982,9 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784 ts->gpio_pendown = pdata->gpio_pendown; + if (pdata->gpio_pendown_debounce) + gpio_set_debounce(pdata->gpio_pendown, + pdata->gpio_pendown_debounce); } else { dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n"); return -EINVAL; diff --git a/trunk/drivers/iommu/intel-iommu.c b/trunk/drivers/iommu/intel-iommu.c index d4a4cd445cab..0badfa48b32b 100644 --- a/trunk/drivers/iommu/intel-iommu.c +++ b/trunk/drivers/iommu/intel-iommu.c @@ -4108,7 +4108,7 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to) static int intel_iommu_add_device(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - struct pci_dev *bridge, *dma_pdev; + struct pci_dev *bridge, *dma_pdev = NULL; struct iommu_group *group; int ret; @@ -4122,7 +4122,7 @@ static int intel_iommu_add_device(struct device *dev) dma_pdev = pci_get_domain_bus_and_slot( pci_domain_nr(pdev->bus), bridge->subordinate->number, 0); - else + if (!dma_pdev) dma_pdev = pci_dev_get(bridge); } else dma_pdev = pci_dev_get(pdev); diff --git a/trunk/drivers/iommu/tegra-smmu.c b/trunk/drivers/iommu/tegra-smmu.c index a649f146d17b..c0f7a4266263 100644 --- a/trunk/drivers/iommu/tegra-smmu.c +++ b/trunk/drivers/iommu/tegra-smmu.c @@ -1054,6 +1054,7 @@ static int smmu_debugfs_stats_show(struct seq_file *s, void *v) stats[i], val, offs); } seq_printf(s, "\n"); + dput(dent); return 0; } diff --git a/trunk/drivers/irqchip/irq-bcm2835.c b/trunk/drivers/irqchip/irq-bcm2835.c index dc670ccc6978..16c78f1c5ef2 100644 --- a/trunk/drivers/irqchip/irq-bcm2835.c +++ b/trunk/drivers/irqchip/irq-bcm2835.c @@ -168,7 +168,8 @@ static int __init armctrl_of_init(struct device_node *node, } static struct of_device_id irq_of_match[] __initconst = { - { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init } + { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init }, + { } }; void __init bcm2835_init_irq(void) diff --git a/trunk/drivers/isdn/Kconfig b/trunk/drivers/isdn/Kconfig index a233ed53913a..86cd75a0e84d 100644 --- a/trunk/drivers/isdn/Kconfig +++ b/trunk/drivers/isdn/Kconfig @@ -4,7 +4,7 @@ menuconfig ISDN bool "ISDN support" - depends on NET + depends on NET && NETDEVICES depends on !S390 && !UML ---help--- ISDN ("Integrated Services Digital Network", called RNIS in France) diff --git a/trunk/drivers/isdn/i4l/Kconfig b/trunk/drivers/isdn/i4l/Kconfig index 2302fbe70ac6..9c6650ea848e 100644 --- a/trunk/drivers/isdn/i4l/Kconfig +++ b/trunk/drivers/isdn/i4l/Kconfig @@ -6,7 +6,7 @@ if ISDN_I4L config ISDN_PPP bool "Support synchronous PPP" - depends on INET && NETDEVICES + depends on INET select SLHC help Over digital connections such as ISDN, there is no need to diff --git a/trunk/drivers/isdn/i4l/isdn_common.c b/trunk/drivers/isdn/i4l/isdn_common.c index 8c610fa6782b..e2a945ee9f05 100644 --- a/trunk/drivers/isdn/i4l/isdn_common.c +++ b/trunk/drivers/isdn/i4l/isdn_common.c @@ -1312,7 +1312,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) } else return -EINVAL; break; -#ifdef CONFIG_NETDEVICES case IIOCNETGPN: /* Get peer phone number of a connected * isdn network interface */ @@ -1322,7 +1321,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) return isdn_net_getpeer(&phone, argp); } else return -EINVAL; -#endif default: return -EINVAL; } @@ -1352,7 +1350,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) case IIOCNETLCR: printk(KERN_INFO "INFO: ISDN_ABC_LCR_SUPPORT not enabled\n"); return -ENODEV; -#ifdef CONFIG_NETDEVICES case IIOCNETAIF: /* Add a network-interface */ if (arg) { @@ -1491,7 +1488,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) return -EFAULT; return isdn_net_force_hangup(name); break; -#endif /* CONFIG_NETDEVICES */ case IIOCSETVER: dev->net_verbose = arg; printk(KERN_INFO "isdn: Verbose-Level is %d\n", dev->net_verbose); diff --git a/trunk/drivers/leds/ledtrig-cpu.c b/trunk/drivers/leds/ledtrig-cpu.c index b312056da14d..4239b3955ff0 100644 --- a/trunk/drivers/leds/ledtrig-cpu.c +++ b/trunk/drivers/leds/ledtrig-cpu.c @@ -33,8 +33,6 @@ struct led_trigger_cpu { char name[MAX_NAME_LEN]; struct led_trigger *_trig; - struct mutex lock; - int lock_is_inited; }; static DEFINE_PER_CPU(struct led_trigger_cpu, cpu_trig); @@ -50,12 +48,6 @@ void ledtrig_cpu(enum cpu_led_event ledevt) { struct led_trigger_cpu *trig = &__get_cpu_var(cpu_trig); - /* mutex lock should be initialized before calling mutex_call() */ - if (!trig->lock_is_inited) - return; - - mutex_lock(&trig->lock); - /* Locate the correct CPU LED */ switch (ledevt) { case CPU_LED_IDLE_END: @@ -75,8 +67,6 @@ void ledtrig_cpu(enum cpu_led_event ledevt) /* Will leave the LED as it is */ break; } - - mutex_unlock(&trig->lock); } EXPORT_SYMBOL(ledtrig_cpu); @@ -117,14 +107,9 @@ static int __init ledtrig_cpu_init(void) for_each_possible_cpu(cpu) { struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); - mutex_init(&trig->lock); - snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu); - mutex_lock(&trig->lock); led_trigger_register_simple(trig->name, &trig->_trig); - trig->lock_is_inited = 1; - mutex_unlock(&trig->lock); } register_syscore_ops(&ledtrig_cpu_syscore_ops); @@ -142,15 +127,9 @@ static void __exit ledtrig_cpu_exit(void) for_each_possible_cpu(cpu) { struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); - mutex_lock(&trig->lock); - led_trigger_unregister_simple(trig->_trig); trig->_trig = NULL; memset(trig->name, 0, MAX_NAME_LEN); - trig->lock_is_inited = 0; - - mutex_unlock(&trig->lock); - mutex_destroy(&trig->lock); } unregister_syscore_ops(&ledtrig_cpu_syscore_ops); diff --git a/trunk/drivers/md/dm.c b/trunk/drivers/md/dm.c index 02db9183ca01..77e6eff41cae 100644 --- a/trunk/drivers/md/dm.c +++ b/trunk/drivers/md/dm.c @@ -740,8 +740,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue) if (!md_in_flight(md)) wake_up(&md->wait); + /* + * Run this off this callpath, as drivers could invoke end_io while + * inside their request_fn (and holding the queue lock). Calling + * back into ->request_fn() could deadlock attempting to grab the + * queue lock again. + */ if (run_queue) - blk_run_queue(md->queue); + blk_run_queue_async(md->queue); /* * dm_put() must be at the end of this function. See the comment above diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c index 9ab768acfb62..61200717687b 100644 --- a/trunk/drivers/md/md.c +++ b/trunk/drivers/md/md.c @@ -1817,10 +1817,10 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) memset(bbp, 0xff, PAGE_SIZE); for (i = 0 ; i < bb->count ; i++) { - u64 internal_bb = *p++; + u64 internal_bb = p[i]; u64 store_bb = ((BB_OFFSET(internal_bb) << 10) | BB_LEN(internal_bb)); - *bbp++ = cpu_to_le64(store_bb); + bbp[i] = cpu_to_le64(store_bb); } bb->changed = 0; if (read_seqretry(&bb->lock, seq)) @@ -5294,7 +5294,7 @@ void md_stop_writes(struct mddev *mddev) } EXPORT_SYMBOL_GPL(md_stop_writes); -void md_stop(struct mddev *mddev) +static void __md_stop(struct mddev *mddev) { mddev->ready = 0; mddev->pers->stop(mddev); @@ -5304,6 +5304,18 @@ void md_stop(struct mddev *mddev) mddev->pers = NULL; clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } + +void md_stop(struct mddev *mddev) +{ + /* stop the array and free an attached data structures. + * This is called from dm-raid + */ + __md_stop(mddev); + bitmap_destroy(mddev); + if (mddev->bio_set) + bioset_free(mddev->bio_set); +} + EXPORT_SYMBOL_GPL(md_stop); static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) @@ -5364,7 +5376,7 @@ static int do_md_stop(struct mddev * mddev, int mode, set_disk_ro(disk, 0); __md_stop_writes(mddev); - md_stop(mddev); + __md_stop(mddev); mddev->queue->merge_bvec_fn = NULL; mddev->queue->backing_dev_info.congested_fn = NULL; @@ -7936,9 +7948,9 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, sector_t *first_bad, int *bad_sectors) { int hi; - int lo = 0; + int lo; u64 *p = bb->page; - int rv = 0; + int rv; sector_t target = s + sectors; unsigned seq; @@ -7953,7 +7965,8 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, retry: seq = read_seqbegin(&bb->lock); - + lo = 0; + rv = 0; hi = bb->count; /* Binary search between lo and hi for 'target' diff --git a/trunk/drivers/md/raid10.c b/trunk/drivers/md/raid10.c index d1295aff4173..0d5d0ff2c0f7 100644 --- a/trunk/drivers/md/raid10.c +++ b/trunk/drivers/md/raid10.c @@ -499,7 +499,7 @@ static void raid10_end_write_request(struct bio *bio, int error) */ one_write_done(r10_bio); if (dec_rdev) - rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); + rdev_dec_pending(rdev, conf->mddev); } /* @@ -1334,18 +1334,21 @@ static void make_request(struct mddev *mddev, struct bio * bio) blocked_rdev = rrdev; break; } + if (rdev && (test_bit(Faulty, &rdev->flags) + || test_bit(Unmerged, &rdev->flags))) + rdev = NULL; if (rrdev && (test_bit(Faulty, &rrdev->flags) || test_bit(Unmerged, &rrdev->flags))) rrdev = NULL; r10_bio->devs[i].bio = NULL; r10_bio->devs[i].repl_bio = NULL; - if (!rdev || test_bit(Faulty, &rdev->flags) || - test_bit(Unmerged, &rdev->flags)) { + + if (!rdev && !rrdev) { set_bit(R10BIO_Degraded, &r10_bio->state); continue; } - if (test_bit(WriteErrorSeen, &rdev->flags)) { + if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { sector_t first_bad; sector_t dev_sector = r10_bio->devs[i].addr; int bad_sectors; @@ -1387,8 +1390,10 @@ static void make_request(struct mddev *mddev, struct bio * bio) max_sectors = good_sectors; } } - r10_bio->devs[i].bio = bio; - atomic_inc(&rdev->nr_pending); + if (rdev) { + r10_bio->devs[i].bio = bio; + atomic_inc(&rdev->nr_pending); + } if (rrdev) { r10_bio->devs[i].repl_bio = bio; atomic_inc(&rrdev->nr_pending); @@ -1444,69 +1449,71 @@ static void make_request(struct mddev *mddev, struct bio * bio) for (i = 0; i < conf->copies; i++) { struct bio *mbio; int d = r10_bio->devs[i].devnum; - if (!r10_bio->devs[i].bio) - continue; + if (r10_bio->devs[i].bio) { + struct md_rdev *rdev = conf->mirrors[d].rdev; + mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, + max_sectors); + r10_bio->devs[i].bio = mbio; + + mbio->bi_sector = (r10_bio->devs[i].addr+ + choose_data_offset(r10_bio, + rdev)); + mbio->bi_bdev = rdev->bdev; + mbio->bi_end_io = raid10_end_write_request; + mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; + mbio->bi_private = r10_bio; - mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, - max_sectors); - r10_bio->devs[i].bio = mbio; + atomic_inc(&r10_bio->remaining); - mbio->bi_sector = (r10_bio->devs[i].addr+ - choose_data_offset(r10_bio, - conf->mirrors[d].rdev)); - mbio->bi_bdev = conf->mirrors[d].rdev->bdev; - mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; - mbio->bi_private = r10_bio; + cb = blk_check_plugged(raid10_unplug, mddev, + sizeof(*plug)); + if (cb) + plug = container_of(cb, struct raid10_plug_cb, + cb); + else + plug = NULL; + spin_lock_irqsave(&conf->device_lock, flags); + if (plug) { + bio_list_add(&plug->pending, mbio); + plug->pending_cnt++; + } else { + bio_list_add(&conf->pending_bio_list, mbio); + conf->pending_count++; + } + spin_unlock_irqrestore(&conf->device_lock, flags); + if (!plug) + md_wakeup_thread(mddev->thread); + } - atomic_inc(&r10_bio->remaining); + if (r10_bio->devs[i].repl_bio) { + struct md_rdev *rdev = conf->mirrors[d].replacement; + if (rdev == NULL) { + /* Replacement just got moved to main 'rdev' */ + smp_mb(); + rdev = conf->mirrors[d].rdev; + } + mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, + max_sectors); + r10_bio->devs[i].repl_bio = mbio; + + mbio->bi_sector = (r10_bio->devs[i].addr + + choose_data_offset( + r10_bio, rdev)); + mbio->bi_bdev = rdev->bdev; + mbio->bi_end_io = raid10_end_write_request; + mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; + mbio->bi_private = r10_bio; - cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); - if (cb) - plug = container_of(cb, struct raid10_plug_cb, cb); - else - plug = NULL; - spin_lock_irqsave(&conf->device_lock, flags); - if (plug) { - bio_list_add(&plug->pending, mbio); - plug->pending_cnt++; - } else { + atomic_inc(&r10_bio->remaining); + spin_lock_irqsave(&conf->device_lock, flags); bio_list_add(&conf->pending_bio_list, mbio); conf->pending_count++; + spin_unlock_irqrestore(&conf->device_lock, flags); + if (!mddev_check_plugged(mddev)) + md_wakeup_thread(mddev->thread); } - spin_unlock_irqrestore(&conf->device_lock, flags); - if (!plug) - md_wakeup_thread(mddev->thread); - - if (!r10_bio->devs[i].repl_bio) - continue; - - mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, - max_sectors); - r10_bio->devs[i].repl_bio = mbio; - - /* We are actively writing to the original device - * so it cannot disappear, so the replacement cannot - * become NULL here - */ - mbio->bi_sector = (r10_bio->devs[i].addr + - choose_data_offset( - r10_bio, - conf->mirrors[d].replacement)); - mbio->bi_bdev = conf->mirrors[d].replacement->bdev; - mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; - mbio->bi_private = r10_bio; - - atomic_inc(&r10_bio->remaining); - spin_lock_irqsave(&conf->device_lock, flags); - bio_list_add(&conf->pending_bio_list, mbio); - conf->pending_count++; - spin_unlock_irqrestore(&conf->device_lock, flags); - if (!mddev_check_plugged(mddev)) - md_wakeup_thread(mddev->thread); } /* Don't remove the bias on 'remaining' (one_write_done) until diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c index c5439dce0295..a4502686e7a8 100644 --- a/trunk/drivers/md/raid5.c +++ b/trunk/drivers/md/raid5.c @@ -2774,10 +2774,12 @@ static void handle_stripe_clean_event(struct r5conf *conf, dev = &sh->dev[i]; if (!test_bit(R5_LOCKED, &dev->flags) && (test_bit(R5_UPTODATE, &dev->flags) || - test_and_clear_bit(R5_Discard, &dev->flags))) { + test_bit(R5_Discard, &dev->flags))) { /* We can return any write requests */ struct bio *wbi, *wbi2; pr_debug("Return write for disc %d\n", i); + if (test_and_clear_bit(R5_Discard, &dev->flags)) + clear_bit(R5_UPTODATE, &dev->flags); wbi = dev->written; dev->written = NULL; while (wbi && wbi->bi_sector < @@ -2795,7 +2797,8 @@ static void handle_stripe_clean_event(struct r5conf *conf, !test_bit(STRIPE_DEGRADED, &sh->state), 0); } - } + } else if (test_bit(R5_Discard, &sh->dev[i].flags)) + clear_bit(R5_Discard, &sh->dev[i].flags); if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) if (atomic_dec_and_test(&conf->pending_full_writes)) @@ -3490,40 +3493,6 @@ static void handle_stripe(struct stripe_head *sh) handle_failed_sync(conf, sh, &s); } - /* - * might be able to return some write requests if the parity blocks - * are safe, or on a failed drive - */ - pdev = &sh->dev[sh->pd_idx]; - s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) - || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); - qdev = &sh->dev[sh->qd_idx]; - s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) - || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) - || conf->level < 6; - - if (s.written && - (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) - && !test_bit(R5_LOCKED, &pdev->flags) - && (test_bit(R5_UPTODATE, &pdev->flags) || - test_bit(R5_Discard, &pdev->flags))))) && - (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) - && !test_bit(R5_LOCKED, &qdev->flags) - && (test_bit(R5_UPTODATE, &qdev->flags) || - test_bit(R5_Discard, &qdev->flags)))))) - handle_stripe_clean_event(conf, sh, disks, &s.return_bi); - - /* Now we might consider reading some blocks, either to check/generate - * parity, or to satisfy requests - * or to load a block that is being partially written. - */ - if (s.to_read || s.non_overwrite - || (conf->level == 6 && s.to_write && s.failed) - || (s.syncing && (s.uptodate + s.compute < disks)) - || s.replacing - || s.expanding) - handle_stripe_fill(sh, &s, disks); - /* Now we check to see if any write operations have recently * completed */ @@ -3561,6 +3530,40 @@ static void handle_stripe(struct stripe_head *sh) s.dec_preread_active = 1; } + /* + * might be able to return some write requests if the parity blocks + * are safe, or on a failed drive + */ + pdev = &sh->dev[sh->pd_idx]; + s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) + || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); + qdev = &sh->dev[sh->qd_idx]; + s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) + || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) + || conf->level < 6; + + if (s.written && + (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) + && !test_bit(R5_LOCKED, &pdev->flags) + && (test_bit(R5_UPTODATE, &pdev->flags) || + test_bit(R5_Discard, &pdev->flags))))) && + (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) + && !test_bit(R5_LOCKED, &qdev->flags) + && (test_bit(R5_UPTODATE, &qdev->flags) || + test_bit(R5_Discard, &qdev->flags)))))) + handle_stripe_clean_event(conf, sh, disks, &s.return_bi); + + /* Now we might consider reading some blocks, either to check/generate + * parity, or to satisfy requests + * or to load a block that is being partially written. + */ + if (s.to_read || s.non_overwrite + || (conf->level == 6 && s.to_write && s.failed) + || (s.syncing && (s.uptodate + s.compute < disks)) + || s.replacing + || s.expanding) + handle_stripe_fill(sh, &s, disks); + /* Now to consider new write requests and what else, if anything * should be read. We do not handle new writes when: * 1/ A 'write' operation (copy+xor) is already in flight. @@ -5529,6 +5532,10 @@ static int run(struct mddev *mddev) * discard data disk but write parity disk */ stripe = stripe * PAGE_SIZE; + /* Round up to power of 2, as discard handling + * currently assumes that */ + while ((stripe-1) & stripe) + stripe = (stripe | (stripe-1)) + 1; mddev->queue->limits.discard_alignment = stripe; mddev->queue->limits.discard_granularity = stripe; /* diff --git a/trunk/drivers/mmc/host/dw_mmc-exynos.c b/trunk/drivers/mmc/host/dw_mmc-exynos.c index 660bbc528862..4d50da618166 100644 --- a/trunk/drivers/mmc/host/dw_mmc-exynos.c +++ b/trunk/drivers/mmc/host/dw_mmc-exynos.c @@ -208,7 +208,7 @@ static unsigned long exynos5250_dwmmc_caps[4] = { MMC_CAP_CMD23, }; -static struct dw_mci_drv_data exynos5250_drv_data = { +static const struct dw_mci_drv_data exynos5250_drv_data = { .caps = exynos5250_dwmmc_caps, .init = dw_mci_exynos_priv_init, .setup_clock = dw_mci_exynos_setup_clock, @@ -220,14 +220,14 @@ static struct dw_mci_drv_data exynos5250_drv_data = { static const struct of_device_id dw_mci_exynos_match[] = { { .compatible = "samsung,exynos5250-dw-mshc", - .data = (void *)&exynos5250_drv_data, }, + .data = &exynos5250_drv_data, }, {}, }; -MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match); +MODULE_DEVICE_TABLE(of, dw_mci_exynos_match); int dw_mci_exynos_probe(struct platform_device *pdev) { - struct dw_mci_drv_data *drv_data; + const struct dw_mci_drv_data *drv_data; const struct of_device_id *match; match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node); diff --git a/trunk/drivers/mmc/host/dw_mmc-pltfm.c b/trunk/drivers/mmc/host/dw_mmc-pltfm.c index c960ca7ffbe6..917936bee5d5 100644 --- a/trunk/drivers/mmc/host/dw_mmc-pltfm.c +++ b/trunk/drivers/mmc/host/dw_mmc-pltfm.c @@ -24,7 +24,7 @@ #include "dw_mmc.h" int dw_mci_pltfm_register(struct platform_device *pdev, - struct dw_mci_drv_data *drv_data) + const struct dw_mci_drv_data *drv_data) { struct dw_mci *host; struct resource *regs; @@ -50,8 +50,8 @@ int dw_mci_pltfm_register(struct platform_device *pdev, if (!host->regs) return -ENOMEM; - if (host->drv_data->init) { - ret = host->drv_data->init(host); + if (drv_data && drv_data->init) { + ret = drv_data->init(host); if (ret) return ret; } diff --git a/trunk/drivers/mmc/host/dw_mmc-pltfm.h b/trunk/drivers/mmc/host/dw_mmc-pltfm.h index 301f24541fc2..2ac37b81de4d 100644 --- a/trunk/drivers/mmc/host/dw_mmc-pltfm.h +++ b/trunk/drivers/mmc/host/dw_mmc-pltfm.h @@ -13,7 +13,7 @@ #define _DW_MMC_PLTFM_H_ extern int dw_mci_pltfm_register(struct platform_device *pdev, - struct dw_mci_drv_data *drv_data); + const struct dw_mci_drv_data *drv_data); extern int __devexit dw_mci_pltfm_remove(struct platform_device *pdev); extern const struct dev_pm_ops dw_mci_pltfm_pmops; diff --git a/trunk/drivers/mmc/host/dw_mmc.c b/trunk/drivers/mmc/host/dw_mmc.c index c2828f35c3b8..c0667c8af2bd 100644 --- a/trunk/drivers/mmc/host/dw_mmc.c +++ b/trunk/drivers/mmc/host/dw_mmc.c @@ -232,6 +232,7 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) { struct mmc_data *data; struct dw_mci_slot *slot = mmc_priv(mmc); + struct dw_mci_drv_data *drv_data = slot->host->drv_data; u32 cmdr; cmd->error = -EINPROGRESS; @@ -261,8 +262,8 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) cmdr |= SDMMC_CMD_DAT_WR; } - if (slot->host->drv_data->prepare_command) - slot->host->drv_data->prepare_command(slot->host, &cmdr); + if (drv_data && drv_data->prepare_command) + drv_data->prepare_command(slot->host, &cmdr); return cmdr; } @@ -434,7 +435,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) return 0; } -static struct dw_mci_dma_ops dw_mci_idmac_ops = { +static const struct dw_mci_dma_ops dw_mci_idmac_ops = { .init = dw_mci_idmac_init, .start = dw_mci_idmac_start_dma, .stop = dw_mci_idmac_stop_dma, @@ -772,6 +773,7 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct dw_mci_slot *slot = mmc_priv(mmc); + struct dw_mci_drv_data *drv_data = slot->host->drv_data; u32 regs; /* set default 1 bit mode */ @@ -807,8 +809,8 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) slot->clock = ios->clock; } - if (slot->host->drv_data->set_ios) - slot->host->drv_data->set_ios(slot->host, ios); + if (drv_data && drv_data->set_ios) + drv_data->set_ios(slot->host, ios); switch (ios->power_mode) { case MMC_POWER_UP: @@ -1815,6 +1817,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) { struct mmc_host *mmc; struct dw_mci_slot *slot; + struct dw_mci_drv_data *drv_data = host->drv_data; int ctrl_id, ret; u8 bus_width; @@ -1854,8 +1857,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) } else { ctrl_id = to_platform_device(host->dev)->id; } - if (host->drv_data && host->drv_data->caps) - mmc->caps |= host->drv_data->caps[ctrl_id]; + if (drv_data && drv_data->caps) + mmc->caps |= drv_data->caps[ctrl_id]; if (host->pdata->caps2) mmc->caps2 = host->pdata->caps2; @@ -1867,10 +1870,10 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) else bus_width = 1; - if (host->drv_data->setup_bus) { + if (drv_data && drv_data->setup_bus) { struct device_node *slot_np; slot_np = dw_mci_of_find_slot_node(host->dev, slot->id); - ret = host->drv_data->setup_bus(host, slot_np, bus_width); + ret = drv_data->setup_bus(host, slot_np, bus_width); if (ret) goto err_setup_bus; } @@ -1968,7 +1971,7 @@ static void dw_mci_init_dma(struct dw_mci *host) /* Determine which DMA interface to use */ #ifdef CONFIG_MMC_DW_IDMAC host->dma_ops = &dw_mci_idmac_ops; - dev_info(&host->dev, "Using internal DMA controller.\n"); + dev_info(host->dev, "Using internal DMA controller.\n"); #endif if (!host->dma_ops) @@ -2035,6 +2038,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) struct dw_mci_board *pdata; struct device *dev = host->dev; struct device_node *np = dev->of_node; + struct dw_mci_drv_data *drv_data = host->drv_data; int idx, ret; pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); @@ -2062,8 +2066,8 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); - if (host->drv_data->parse_dt) { - ret = host->drv_data->parse_dt(host); + if (drv_data && drv_data->parse_dt) { + ret = drv_data->parse_dt(host); if (ret) return ERR_PTR(ret); } @@ -2080,6 +2084,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) int dw_mci_probe(struct dw_mci *host) { + struct dw_mci_drv_data *drv_data = host->drv_data; int width, i, ret = 0; u32 fifo_size; int init_slots = 0; @@ -2127,8 +2132,8 @@ int dw_mci_probe(struct dw_mci *host) else host->bus_hz = clk_get_rate(host->ciu_clk); - if (host->drv_data->setup_clock) { - ret = host->drv_data->setup_clock(host); + if (drv_data && drv_data->setup_clock) { + ret = drv_data->setup_clock(host); if (ret) { dev_err(host->dev, "implementation specific clock setup failed\n"); @@ -2228,6 +2233,21 @@ int dw_mci_probe(struct dw_mci *host) else host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; + /* + * Enable interrupts for command done, data over, data empty, card det, + * receive ready and error such as transmit, receive timeout, crc error + */ + mci_writel(host, RINTSTS, 0xFFFFFFFF); + mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | + SDMMC_INT_TXDR | SDMMC_INT_RXDR | + DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); + mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ + + dev_info(host->dev, "DW MMC controller at irq %d, " + "%d bit host data width, " + "%u deep fifo\n", + host->irq, width, fifo_size); + /* We need at least one slot to succeed */ for (i = 0; i < host->num_slots; i++) { ret = dw_mci_init_slot(host, i); @@ -2257,20 +2277,6 @@ int dw_mci_probe(struct dw_mci *host) else host->data_offset = DATA_240A_OFFSET; - /* - * Enable interrupts for command done, data over, data empty, card det, - * receive ready and error such as transmit, receive timeout, crc error - */ - mci_writel(host, RINTSTS, 0xFFFFFFFF); - mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | - SDMMC_INT_TXDR | SDMMC_INT_RXDR | - DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); - mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ - - dev_info(host->dev, "DW MMC controller at irq %d, " - "%d bit host data width, " - "%u deep fifo\n", - host->irq, width, fifo_size); if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n"); diff --git a/trunk/drivers/mmc/host/mxcmmc.c b/trunk/drivers/mmc/host/mxcmmc.c index 565c2e4fac75..6290b7f1ccfe 100644 --- a/trunk/drivers/mmc/host/mxcmmc.c +++ b/trunk/drivers/mmc/host/mxcmmc.c @@ -1134,4 +1134,4 @@ module_platform_driver(mxcmci_driver); MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver"); MODULE_AUTHOR("Sascha Hauer, Pengutronix"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:imx-mmc"); +MODULE_ALIAS("platform:mxc-mmc"); diff --git a/trunk/drivers/mmc/host/omap_hsmmc.c b/trunk/drivers/mmc/host/omap_hsmmc.c index 54bfd0cc106b..fedd258cc4ea 100644 --- a/trunk/drivers/mmc/host/omap_hsmmc.c +++ b/trunk/drivers/mmc/host/omap_hsmmc.c @@ -178,7 +178,8 @@ struct omap_hsmmc_host { static int omap_hsmmc_card_detect(struct device *dev, int slot) { - struct omap_mmc_platform_data *mmc = dev->platform_data; + struct omap_hsmmc_host *host = dev_get_drvdata(dev); + struct omap_mmc_platform_data *mmc = host->pdata; /* NOTE: assumes card detect signal is active-low */ return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); @@ -186,7 +187,8 @@ static int omap_hsmmc_card_detect(struct device *dev, int slot) static int omap_hsmmc_get_wp(struct device *dev, int slot) { - struct omap_mmc_platform_data *mmc = dev->platform_data; + struct omap_hsmmc_host *host = dev_get_drvdata(dev); + struct omap_mmc_platform_data *mmc = host->pdata; /* NOTE: assumes write protect signal is active-high */ return gpio_get_value_cansleep(mmc->slots[0].gpio_wp); @@ -194,7 +196,8 @@ static int omap_hsmmc_get_wp(struct device *dev, int slot) static int omap_hsmmc_get_cover_state(struct device *dev, int slot) { - struct omap_mmc_platform_data *mmc = dev->platform_data; + struct omap_hsmmc_host *host = dev_get_drvdata(dev); + struct omap_mmc_platform_data *mmc = host->pdata; /* NOTE: assumes card detect signal is active-low */ return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); @@ -204,7 +207,8 @@ static int omap_hsmmc_get_cover_state(struct device *dev, int slot) static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot) { - struct omap_mmc_platform_data *mmc = dev->platform_data; + struct omap_hsmmc_host *host = dev_get_drvdata(dev); + struct omap_mmc_platform_data *mmc = host->pdata; disable_irq(mmc->slots[0].card_detect_irq); return 0; @@ -212,7 +216,8 @@ static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot) static int omap_hsmmc_resume_cdirq(struct device *dev, int slot) { - struct omap_mmc_platform_data *mmc = dev->platform_data; + struct omap_hsmmc_host *host = dev_get_drvdata(dev); + struct omap_mmc_platform_data *mmc = host->pdata; enable_irq(mmc->slots[0].card_detect_irq); return 0; @@ -2009,9 +2014,9 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev) clk_put(host->dbclk); } - mmc_free_host(host->mmc); + omap_hsmmc_gpio_free(host->pdata); iounmap(host->base); - omap_hsmmc_gpio_free(pdev->dev.platform_data); + mmc_free_host(host->mmc); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) diff --git a/trunk/drivers/mmc/host/sdhci-dove.c b/trunk/drivers/mmc/host/sdhci-dove.c index 90140eb03e36..8fd50a211037 100644 --- a/trunk/drivers/mmc/host/sdhci-dove.c +++ b/trunk/drivers/mmc/host/sdhci-dove.c @@ -19,6 +19,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include #include #include #include @@ -84,30 +85,32 @@ static int __devinit sdhci_dove_probe(struct platform_device *pdev) struct sdhci_dove_priv *priv; int ret; - ret = sdhci_pltfm_register(pdev, &sdhci_dove_pdata); - if (ret) - goto sdhci_dove_register_fail; - priv = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_dove_priv), GFP_KERNEL); if (!priv) { dev_err(&pdev->dev, "unable to allocate private data"); - ret = -ENOMEM; - goto sdhci_dove_allocate_fail; + return -ENOMEM; } + priv->clk = clk_get(&pdev->dev, NULL); + if (!IS_ERR(priv->clk)) + clk_prepare_enable(priv->clk); + + ret = sdhci_pltfm_register(pdev, &sdhci_dove_pdata); + if (ret) + goto sdhci_dove_register_fail; + host = platform_get_drvdata(pdev); pltfm_host = sdhci_priv(host); pltfm_host->priv = priv; - priv->clk = clk_get(&pdev->dev, NULL); - if (!IS_ERR(priv->clk)) - clk_prepare_enable(priv->clk); return 0; -sdhci_dove_allocate_fail: - sdhci_pltfm_unregister(pdev); sdhci_dove_register_fail: + if (!IS_ERR(priv->clk)) { + clk_disable_unprepare(priv->clk); + clk_put(priv->clk); + } return ret; } @@ -117,14 +120,13 @@ static int __devexit sdhci_dove_remove(struct platform_device *pdev) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_dove_priv *priv = pltfm_host->priv; - if (priv->clk) { - if (!IS_ERR(priv->clk)) { - clk_disable_unprepare(priv->clk); - clk_put(priv->clk); - } - devm_kfree(&pdev->dev, priv->clk); + sdhci_pltfm_unregister(pdev); + + if (!IS_ERR(priv->clk)) { + clk_disable_unprepare(priv->clk); + clk_put(priv->clk); } - return sdhci_pltfm_unregister(pdev); + return 0; } static const struct of_device_id sdhci_dove_of_match_table[] __devinitdata = { diff --git a/trunk/drivers/mmc/host/sdhci-of-esdhc.c b/trunk/drivers/mmc/host/sdhci-of-esdhc.c index ae5fcbfa1eef..63d219f57cae 100644 --- a/trunk/drivers/mmc/host/sdhci-of-esdhc.c +++ b/trunk/drivers/mmc/host/sdhci-of-esdhc.c @@ -169,6 +169,16 @@ static void esdhc_of_resume(struct sdhci_host *host) } #endif +static void esdhc_of_platform_init(struct sdhci_host *host) +{ + u32 vvn; + + vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); + vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; + if (vvn == VENDOR_V_22) + host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; +} + static struct sdhci_ops sdhci_esdhc_ops = { .read_l = esdhc_readl, .read_w = esdhc_readw, @@ -180,6 +190,7 @@ static struct sdhci_ops sdhci_esdhc_ops = { .enable_dma = esdhc_of_enable_dma, .get_max_clock = esdhc_of_get_max_clock, .get_min_clock = esdhc_of_get_min_clock, + .platform_init = esdhc_of_platform_init, #ifdef CONFIG_PM .platform_suspend = esdhc_of_suspend, .platform_resume = esdhc_of_resume, diff --git a/trunk/drivers/mmc/host/sdhci-pci.c b/trunk/drivers/mmc/host/sdhci-pci.c index 4bb74b042a06..04936f353ced 100644 --- a/trunk/drivers/mmc/host/sdhci-pci.c +++ b/trunk/drivers/mmc/host/sdhci-pci.c @@ -1196,7 +1196,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( return ERR_PTR(-ENODEV); } - if (pci_resource_len(pdev, bar) != 0x100) { + if (pci_resource_len(pdev, bar) < 0x100) { dev_err(&pdev->dev, "Invalid iomem size. You may " "experience problems.\n"); } diff --git a/trunk/drivers/mmc/host/sdhci-pltfm.c b/trunk/drivers/mmc/host/sdhci-pltfm.c index 65551a9709cc..27164457f861 100644 --- a/trunk/drivers/mmc/host/sdhci-pltfm.c +++ b/trunk/drivers/mmc/host/sdhci-pltfm.c @@ -150,6 +150,13 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, goto err_remap; } + /* + * Some platforms need to probe the controller to be able to + * determine which caps should be used. + */ + if (host->ops && host->ops->platform_init) + host->ops->platform_init(host); + platform_set_drvdata(pdev, host); return host; diff --git a/trunk/drivers/mmc/host/sdhci-s3c.c b/trunk/drivers/mmc/host/sdhci-s3c.c index 2903949594c6..a54dd5d7a5f9 100644 --- a/trunk/drivers/mmc/host/sdhci-s3c.c +++ b/trunk/drivers/mmc/host/sdhci-s3c.c @@ -211,8 +211,8 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock) if (ourhost->cur_clk != best_src) { struct clk *clk = ourhost->clk_bus[best_src]; - clk_enable(clk); - clk_disable(ourhost->clk_bus[ourhost->cur_clk]); + clk_prepare_enable(clk); + clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]); /* turn clock off to card before changing clock source */ writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); @@ -607,7 +607,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) } /* enable the local io clock and keep it running for the moment. */ - clk_enable(sc->clk_io); + clk_prepare_enable(sc->clk_io); for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) { struct clk *clk; @@ -638,7 +638,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) } #ifndef CONFIG_PM_RUNTIME - clk_enable(sc->clk_bus[sc->cur_clk]); + clk_prepare_enable(sc->clk_bus[sc->cur_clk]); #endif res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -747,13 +747,14 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) sdhci_s3c_setup_card_detect_gpio(sc); #ifdef CONFIG_PM_RUNTIME - clk_disable(sc->clk_io); + if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL) + clk_disable_unprepare(sc->clk_io); #endif return 0; err_req_regs: #ifndef CONFIG_PM_RUNTIME - clk_disable(sc->clk_bus[sc->cur_clk]); + clk_disable_unprepare(sc->clk_bus[sc->cur_clk]); #endif for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { if (sc->clk_bus[ptr]) { @@ -762,7 +763,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) } err_no_busclks: - clk_disable(sc->clk_io); + clk_disable_unprepare(sc->clk_io); clk_put(sc->clk_io); err_io_clk: @@ -794,7 +795,8 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev) gpio_free(sc->ext_cd_gpio); #ifdef CONFIG_PM_RUNTIME - clk_enable(sc->clk_io); + if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL) + clk_prepare_enable(sc->clk_io); #endif sdhci_remove_host(host, 1); @@ -802,14 +804,14 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); #ifndef CONFIG_PM_RUNTIME - clk_disable(sc->clk_bus[sc->cur_clk]); + clk_disable_unprepare(sc->clk_bus[sc->cur_clk]); #endif for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { if (sc->clk_bus[ptr]) { clk_put(sc->clk_bus[ptr]); } } - clk_disable(sc->clk_io); + clk_disable_unprepare(sc->clk_io); clk_put(sc->clk_io); if (pdev->dev.of_node) { @@ -849,8 +851,8 @@ static int sdhci_s3c_runtime_suspend(struct device *dev) ret = sdhci_runtime_suspend_host(host); - clk_disable(ourhost->clk_bus[ourhost->cur_clk]); - clk_disable(busclk); + clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]); + clk_disable_unprepare(busclk); return ret; } @@ -861,8 +863,8 @@ static int sdhci_s3c_runtime_resume(struct device *dev) struct clk *busclk = ourhost->clk_io; int ret; - clk_enable(busclk); - clk_enable(ourhost->clk_bus[ourhost->cur_clk]); + clk_prepare_enable(busclk); + clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]); ret = sdhci_runtime_resume_host(host); return ret; } diff --git a/trunk/drivers/mmc/host/sdhci.c b/trunk/drivers/mmc/host/sdhci.c index 7922adb42386..c7851c0aabce 100644 --- a/trunk/drivers/mmc/host/sdhci.c +++ b/trunk/drivers/mmc/host/sdhci.c @@ -1315,16 +1315,19 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) */ if ((host->flags & SDHCI_NEEDS_RETUNING) && !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) { - /* eMMC uses cmd21 while sd and sdio use cmd19 */ - tuning_opcode = mmc->card->type == MMC_TYPE_MMC ? - MMC_SEND_TUNING_BLOCK_HS200 : - MMC_SEND_TUNING_BLOCK; - spin_unlock_irqrestore(&host->lock, flags); - sdhci_execute_tuning(mmc, tuning_opcode); - spin_lock_irqsave(&host->lock, flags); - - /* Restore original mmc_request structure */ - host->mrq = mrq; + if (mmc->card) { + /* eMMC uses cmd21 but sd and sdio use cmd19 */ + tuning_opcode = + mmc->card->type == MMC_TYPE_MMC ? + MMC_SEND_TUNING_BLOCK_HS200 : + MMC_SEND_TUNING_BLOCK; + spin_unlock_irqrestore(&host->lock, flags); + sdhci_execute_tuning(mmc, tuning_opcode); + spin_lock_irqsave(&host->lock, flags); + + /* Restore original mmc_request structure */ + host->mrq = mrq; + } } if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) @@ -2837,6 +2840,9 @@ int sdhci_add_host(struct sdhci_host *host) if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) mmc->caps |= MMC_CAP_4_BIT_DATA; + if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) + mmc->caps &= ~MMC_CAP_CMD23; + if (caps[0] & SDHCI_CAN_DO_HISPD) mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; @@ -2846,9 +2852,12 @@ int sdhci_add_host(struct sdhci_host *host) /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */ host->vqmmc = regulator_get(mmc_dev(mmc), "vqmmc"); - if (IS_ERR(host->vqmmc)) { - pr_info("%s: no vqmmc regulator found\n", mmc_hostname(mmc)); - host->vqmmc = NULL; + if (IS_ERR_OR_NULL(host->vqmmc)) { + if (PTR_ERR(host->vqmmc) < 0) { + pr_info("%s: no vqmmc regulator found\n", + mmc_hostname(mmc)); + host->vqmmc = NULL; + } } else if (regulator_is_supported_voltage(host->vqmmc, 1800000, 1800000)) regulator_enable(host->vqmmc); @@ -2904,9 +2913,12 @@ int sdhci_add_host(struct sdhci_host *host) ocr_avail = 0; host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); - if (IS_ERR(host->vmmc)) { - pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); - host->vmmc = NULL; + if (IS_ERR_OR_NULL(host->vmmc)) { + if (PTR_ERR(host->vmmc) < 0) { + pr_info("%s: no vmmc regulator found\n", + mmc_hostname(mmc)); + host->vmmc = NULL; + } } else regulator_enable(host->vmmc); diff --git a/trunk/drivers/mmc/host/sdhci.h b/trunk/drivers/mmc/host/sdhci.h index 97653ea8942b..71a4a7ed46c5 100644 --- a/trunk/drivers/mmc/host/sdhci.h +++ b/trunk/drivers/mmc/host/sdhci.h @@ -278,6 +278,7 @@ struct sdhci_ops { void (*hw_reset)(struct sdhci_host *host); void (*platform_suspend)(struct sdhci_host *host); void (*platform_resume)(struct sdhci_host *host); + void (*platform_init)(struct sdhci_host *host); }; #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS diff --git a/trunk/drivers/mmc/host/sh_mmcif.c b/trunk/drivers/mmc/host/sh_mmcif.c index 11d2bc3b51d5..d25bc97dc5c6 100644 --- a/trunk/drivers/mmc/host/sh_mmcif.c +++ b/trunk/drivers/mmc/host/sh_mmcif.c @@ -1466,9 +1466,9 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); + clk_disable(host->hclk); mmc_free_host(host->mmc); pm_runtime_put_sync(&pdev->dev); - clk_disable(host->hclk); pm_runtime_disable(&pdev->dev); return 0; diff --git a/trunk/drivers/mtd/devices/slram.c b/trunk/drivers/mtd/devices/slram.c index 8f52fc858e48..5a5cd2ace4a6 100644 --- a/trunk/drivers/mtd/devices/slram.c +++ b/trunk/drivers/mtd/devices/slram.c @@ -240,7 +240,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength) if (*(szlength) != '+') { devlength = simple_strtoul(szlength, &buffer, 0); - devlength = handle_unit(devlength, buffer) - devstart; + devlength = handle_unit(devlength, buffer); if (devlength < devstart) goto err_out; diff --git a/trunk/drivers/mtd/nand/nand_base.c b/trunk/drivers/mtd/nand/nand_base.c index ec6841d8e956..1a03b7f673ce 100644 --- a/trunk/drivers/mtd/nand/nand_base.c +++ b/trunk/drivers/mtd/nand/nand_base.c @@ -2983,13 +2983,15 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip, /* * Field definitions are in the following datasheets: * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) - * New style (6 byte ID): Samsung K9GAG08U0F (p.44) + * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22) * - * Check for ID length, cell type, and Hynix/Samsung ID to decide what - * to do. + * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung + * ID to decide what to do. */ - if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG) { + if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG && + (chip->cellinfo & NAND_CI_CELLTYPE_MSK) && + id_data[5] != 0x00) { /* Calc pagesize */ mtd->writesize = 2048 << (extid & 0x03); extid >>= 2; diff --git a/trunk/drivers/mtd/ofpart.c b/trunk/drivers/mtd/ofpart.c index 64be8f0848b0..d9127e2ed808 100644 --- a/trunk/drivers/mtd/ofpart.c +++ b/trunk/drivers/mtd/ofpart.c @@ -121,7 +121,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master, nr_parts = plen / sizeof(part[0]); *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL); - if (!pparts) + if (!*pparts) return -ENOMEM; names = of_get_property(dp, "partition-names", &plen); diff --git a/trunk/drivers/mtd/onenand/onenand_base.c b/trunk/drivers/mtd/onenand/onenand_base.c index 7153e0d27101..b3f41f200622 100644 --- a/trunk/drivers/mtd/onenand/onenand_base.c +++ b/trunk/drivers/mtd/onenand/onenand_base.c @@ -3694,7 +3694,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int * flexonenand_set_boundary - Writes the SLC boundary * @param mtd - mtd info structure */ -int flexonenand_set_boundary(struct mtd_info *mtd, int die, +static int flexonenand_set_boundary(struct mtd_info *mtd, int die, int boundary, int lock) { struct onenand_chip *this = mtd->priv; diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c index b2530b002125..5f5b69f37d2e 100644 --- a/trunk/drivers/net/bonding/bond_main.c +++ b/trunk/drivers/net/bonding/bond_main.c @@ -1379,6 +1379,8 @@ static void bond_compute_features(struct bonding *bond) struct net_device *bond_dev = bond->dev; netdev_features_t vlan_features = BOND_VLAN_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; + unsigned int gso_max_size = GSO_MAX_SIZE; + u16 gso_max_segs = GSO_MAX_SEGS; int i; unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; @@ -1394,11 +1396,16 @@ static void bond_compute_features(struct bonding *bond) dst_release_flag &= slave->dev->priv_flags; if (slave->dev->hard_header_len > max_hard_header_len) max_hard_header_len = slave->dev->hard_header_len; + + gso_max_size = min(gso_max_size, slave->dev->gso_max_size); + gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs); } done: bond_dev->vlan_features = vlan_features; bond_dev->hard_header_len = max_hard_header_len; + bond_dev->gso_max_segs = gso_max_segs; + netif_set_gso_max_size(bond_dev, gso_max_size); flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE; bond_dev->priv_flags = flags | dst_release_flag; diff --git a/trunk/drivers/net/ethernet/8390/ne.c b/trunk/drivers/net/ethernet/8390/ne.c index d04911d33b64..47618e505355 100644 --- a/trunk/drivers/net/ethernet/8390/ne.c +++ b/trunk/drivers/net/ethernet/8390/ne.c @@ -813,6 +813,7 @@ static int __init ne_drv_probe(struct platform_device *pdev) dev->irq = irq[this_dev]; dev->mem_end = bad[this_dev]; } + SET_NETDEV_DEV(dev, &pdev->dev); err = do_ne_probe(dev); if (err) { free_netdev(dev); diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index c65295dded39..6e5bdd1a31d9 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1702,7 +1702,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) SHMEM_EEE_ADV_STATUS_SHIFT); if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) { DP(BNX2X_MSG_ETHTOOL, - "Direct manipulation of EEE advertisment is not supported\n"); + "Direct manipulation of EEE advertisement is not supported\n"); return -EINVAL; } diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 6dd0dd076cc5..f6cfdc6cf20f 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -9941,7 +9941,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, else rc = bnx2x_8483x_disable_eee(phy, params, vars); if (rc) { - DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n"); + DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n"); return rc; } } else { @@ -12987,7 +12987,7 @@ static u8 bnx2x_analyze_link_error(struct link_params *params, DP(NETIF_MSG_LINK, "Analyze TX Fault\n"); break; default: - DP(NETIF_MSG_LINK, "Analyze UNKOWN\n"); + DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n"); } DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, old_status, status); diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index bd1fd3d87c24..01611b33a93d 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9545,10 +9545,13 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp) */ static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) { - u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { - BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); - REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); + if (!CHIP_IS_E1x(bp)) { + u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { + BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, + 1 << BP_FUNC(bp)); + } } } diff --git a/trunk/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/trunk/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 32eec15fe4c2..730ae2cfa49e 100644 --- a/trunk/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/trunk/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2519,6 +2519,7 @@ int t4_fw_bye(struct adapter *adap, unsigned int mbox) { struct fw_bye_cmd c; + memset(&c, 0, sizeof(c)); INIT_CMD(c, BYE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -2535,6 +2536,7 @@ int t4_early_init(struct adapter *adap, unsigned int mbox) { struct fw_initialize_cmd c; + memset(&c, 0, sizeof(c)); INIT_CMD(c, INITIALIZE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -2551,6 +2553,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) { struct fw_reset_cmd c; + memset(&c, 0, sizeof(c)); INIT_CMD(c, RESET, WRITE); c.val = htonl(reset); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); @@ -2828,7 +2831,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, HOSTPAGESIZEPF7(sge_hps)); t4_set_reg_field(adap, SGE_CONTROL, - INGPADBOUNDARY(INGPADBOUNDARY_MASK) | + INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE_MASK, INGPADBOUNDARY(fl_align_log - 5) | EGRSTATUSPAGESIZE(stat_len != 64)); @@ -3278,6 +3281,7 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, { struct fw_vi_enable_cmd c; + memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); diff --git a/trunk/drivers/net/ethernet/freescale/gianfar.c b/trunk/drivers/net/ethernet/freescale/gianfar.c index 1d03dcdd5e56..19ac096cb07b 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar.c +++ b/trunk/drivers/net/ethernet/freescale/gianfar.c @@ -1353,8 +1353,11 @@ static int gfar_restore(struct device *dev) struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; - if (!netif_running(ndev)) + if (!netif_running(ndev)) { + netif_device_attach(ndev); + return 0; + } gfar_init_bds(ndev); init_registers(ndev); diff --git a/trunk/drivers/net/ethernet/jme.c b/trunk/drivers/net/ethernet/jme.c index f8064df10cc4..60ac46f4ac08 100644 --- a/trunk/drivers/net/ethernet/jme.c +++ b/trunk/drivers/net/ethernet/jme.c @@ -1860,10 +1860,14 @@ jme_open(struct net_device *netdev) jme_clear_pm(jme); JME_NAPI_ENABLE(jme); - tasklet_enable(&jme->linkch_task); - tasklet_enable(&jme->txclean_task); - tasklet_hi_enable(&jme->rxclean_task); - tasklet_hi_enable(&jme->rxempty_task); + tasklet_init(&jme->linkch_task, jme_link_change_tasklet, + (unsigned long) jme); + tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet, + (unsigned long) jme); + tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet, + (unsigned long) jme); + tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet, + (unsigned long) jme); rc = jme_request_irq(jme); if (rc) @@ -1948,10 +1952,10 @@ jme_close(struct net_device *netdev) JME_NAPI_DISABLE(jme); - tasklet_disable(&jme->linkch_task); - tasklet_disable(&jme->txclean_task); - tasklet_disable(&jme->rxclean_task); - tasklet_disable(&jme->rxempty_task); + tasklet_kill(&jme->linkch_task); + tasklet_kill(&jme->txclean_task); + tasklet_kill(&jme->rxclean_task); + tasklet_kill(&jme->rxempty_task); jme_disable_rx_engine(jme); jme_disable_tx_engine(jme); @@ -3079,22 +3083,6 @@ jme_init_one(struct pci_dev *pdev, tasklet_init(&jme->pcc_task, jme_pcc_tasklet, (unsigned long) jme); - tasklet_init(&jme->linkch_task, - jme_link_change_tasklet, - (unsigned long) jme); - tasklet_init(&jme->txclean_task, - jme_tx_clean_tasklet, - (unsigned long) jme); - tasklet_init(&jme->rxclean_task, - jme_rx_clean_tasklet, - (unsigned long) jme); - tasklet_init(&jme->rxempty_task, - jme_rx_empty_tasklet, - (unsigned long) jme); - tasklet_disable_nosync(&jme->linkch_task); - tasklet_disable_nosync(&jme->txclean_task); - tasklet_disable_nosync(&jme->rxclean_task); - tasklet_disable_nosync(&jme->rxempty_task); jme->dpi.cur = PCC_P1; jme->reg_ghc = 0; diff --git a/trunk/drivers/net/ethernet/marvell/skge.c b/trunk/drivers/net/ethernet/marvell/skge.c index 9b9c2ac5c4c2..d19a143aa5a8 100644 --- a/trunk/drivers/net/ethernet/marvell/skge.c +++ b/trunk/drivers/net/ethernet/marvell/skge.c @@ -4026,7 +4026,7 @@ static void __devexit skge_remove(struct pci_dev *pdev) dev0 = hw->dev[0]; unregister_netdev(dev0); - tasklet_disable(&hw->phy_task); + tasklet_kill(&hw->phy_task); spin_lock_irq(&hw->hw_lock); hw->intr_mask = 0; diff --git a/trunk/drivers/net/ethernet/micrel/ksz884x.c b/trunk/drivers/net/ethernet/micrel/ksz884x.c index 318fee91c79d..69e01977a1dd 100644 --- a/trunk/drivers/net/ethernet/micrel/ksz884x.c +++ b/trunk/drivers/net/ethernet/micrel/ksz884x.c @@ -5407,8 +5407,8 @@ static int netdev_close(struct net_device *dev) /* Delay for receive task to stop scheduling itself. */ msleep(2000 / HZ); - tasklet_disable(&hw_priv->rx_tasklet); - tasklet_disable(&hw_priv->tx_tasklet); + tasklet_kill(&hw_priv->rx_tasklet); + tasklet_kill(&hw_priv->tx_tasklet); free_irq(dev->irq, hw_priv->dev); transmit_cleanup(hw_priv, 0); @@ -5459,8 +5459,10 @@ static int prepare_hardware(struct net_device *dev) rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev); if (rc) return rc; - tasklet_enable(&hw_priv->rx_tasklet); - tasklet_enable(&hw_priv->tx_tasklet); + tasklet_init(&hw_priv->rx_tasklet, rx_proc_task, + (unsigned long) hw_priv); + tasklet_init(&hw_priv->tx_tasklet, tx_proc_task, + (unsigned long) hw_priv); hw->promiscuous = 0; hw->all_multi = 0; @@ -7033,16 +7035,6 @@ static int __devinit pcidev_init(struct pci_dev *pdev, spin_lock_init(&hw_priv->hwlock); mutex_init(&hw_priv->lock); - /* tasklet is enabled. */ - tasklet_init(&hw_priv->rx_tasklet, rx_proc_task, - (unsigned long) hw_priv); - tasklet_init(&hw_priv->tx_tasklet, tx_proc_task, - (unsigned long) hw_priv); - - /* tasklet_enable will decrement the atomic counter. */ - tasklet_disable(&hw_priv->rx_tasklet); - tasklet_disable(&hw_priv->tx_tasklet); - for (i = 0; i < TOTAL_PORT_NUM; i++) init_waitqueue_head(&hw_priv->counter[i].counter); diff --git a/trunk/drivers/net/ethernet/realtek/8139cp.c b/trunk/drivers/net/ethernet/realtek/8139cp.c index 1c818254b7be..b01f83a044c4 100644 --- a/trunk/drivers/net/ethernet/realtek/8139cp.c +++ b/trunk/drivers/net/ethernet/realtek/8139cp.c @@ -979,17 +979,6 @@ static void cp_init_hw (struct cp_private *cp) cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); - cpw32_f(HiTxRingAddr, 0); - cpw32_f(HiTxRingAddr + 4, 0); - - ring_dma = cp->ring_dma; - cpw32_f(RxRingAddr, ring_dma & 0xffffffff); - cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); - - ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; - cpw32_f(TxRingAddr, ring_dma & 0xffffffff); - cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); - cp_start_hw(cp); cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ @@ -1003,6 +992,17 @@ static void cp_init_hw (struct cp_private *cp) cpw8(Config5, cpr8(Config5) & PMEStatus); + cpw32_f(HiTxRingAddr, 0); + cpw32_f(HiTxRingAddr + 4, 0); + + ring_dma = cp->ring_dma; + cpw32_f(RxRingAddr, ring_dma & 0xffffffff); + cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); + + ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; + cpw32_f(TxRingAddr, ring_dma & 0xffffffff); + cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); + cpw16(MultiIntr, 0); cpw8_f(Cfg9346, Cfg9346_Lock); diff --git a/trunk/drivers/net/ethernet/realtek/r8169.c b/trunk/drivers/net/ethernet/realtek/r8169.c index e7ff886e8047..927aa33d4349 100644 --- a/trunk/drivers/net/ethernet/realtek/r8169.c +++ b/trunk/drivers/net/ethernet/realtek/r8169.c @@ -3827,6 +3827,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: case RTL_GIGA_MAC_VER_29: case RTL_GIGA_MAC_VER_30: case RTL_GIGA_MAC_VER_32: @@ -4519,6 +4521,9 @@ static void rtl_set_rx_mode(struct net_device *dev) mc_filter[1] = swab32(data); } + if (tp->mac_version == RTL_GIGA_MAC_VER_35) + mc_filter[1] = mc_filter[0] = 0xffffffff; + RTL_W32(MAR0 + 4, mc_filter[1]); RTL_W32(MAR0 + 0, mc_filter[0]); diff --git a/trunk/drivers/net/ethernet/sis/sis900.c b/trunk/drivers/net/ethernet/sis/sis900.c index fb9f6b38511f..edf5edb13140 100644 --- a/trunk/drivers/net/ethernet/sis/sis900.c +++ b/trunk/drivers/net/ethernet/sis/sis900.c @@ -2479,7 +2479,7 @@ static int sis900_resume(struct pci_dev *pci_dev) netif_start_queue(net_dev); /* Workaround for EDB */ - sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); + sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); /* Enable all known interrupts by setting the interrupt mask. */ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); diff --git a/trunk/drivers/net/ethernet/smsc/smsc911x.c b/trunk/drivers/net/ethernet/smsc/smsc911x.c index 62d1baf111ea..c53c0f4e2ce3 100644 --- a/trunk/drivers/net/ethernet/smsc/smsc911x.c +++ b/trunk/drivers/net/ethernet/smsc/smsc911x.c @@ -2110,7 +2110,7 @@ static void __devinit smsc911x_read_mac_address(struct net_device *dev) static int __devinit smsc911x_init(struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); - unsigned int byte_test; + unsigned int byte_test, mask; unsigned int to = 100; SMSC_TRACE(pdata, probe, "Driver Parameters:"); @@ -2130,9 +2130,22 @@ static int __devinit smsc911x_init(struct net_device *dev) /* * poll the READY bit in PMT_CTRL. Any other access to the device is * forbidden while this bit isn't set. Try for 100ms + * + * Note that this test is done before the WORD_SWAP register is + * programmed. So in some configurations the READY bit is at 16 before + * WORD_SWAP is written to. This issue is worked around by waiting + * until either bit 0 or bit 16 gets set in PMT_CTRL. + * + * SMSC has confirmed that checking bit 16 (marked as reserved in + * the datasheet) is fine since these bits "will either never be set + * or can only go high after READY does (so also indicate the device + * is ready)". */ - while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) + + mask = PMT_CTRL_READY_ | swahw32(PMT_CTRL_READY_); + while (!(smsc911x_reg_read(pdata, PMT_CTRL) & mask) && --to) udelay(1000); + if (to == 0) { pr_err("Device not READY in 100ms aborting\n"); return -ENODEV; diff --git a/trunk/drivers/net/ethernet/tile/tilegx.c b/trunk/drivers/net/ethernet/tile/tilegx.c index 4e9810013850..66e025ad5df1 100644 --- a/trunk/drivers/net/ethernet/tile/tilegx.c +++ b/trunk/drivers/net/ethernet/tile/tilegx.c @@ -917,7 +917,7 @@ static int tile_net_setup_interrupts(struct net_device *dev) ingress_irq = rc; tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, - 0, NULL, NULL); + 0, "tile_net", NULL); if (rc != 0) { netdev_err(dev, "request_irq failed: %d\n", rc); destroy_irq(ingress_irq); diff --git a/trunk/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/trunk/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 0793299bd39e..a788501e978e 100644 --- a/trunk/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/trunk/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -894,6 +894,8 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) return IRQ_HANDLED; } +static void axienet_dma_err_handler(unsigned long data); + /** * axienet_open - Driver open routine. * @ndev: Pointer to net_device structure @@ -942,6 +944,10 @@ static int axienet_open(struct net_device *ndev) phy_start(lp->phy_dev); } + /* Enable tasklets for Axi DMA error handling */ + tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, + (unsigned long) lp); + /* Enable interrupts for Axi DMA Tx */ ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); if (ret) @@ -950,8 +956,7 @@ static int axienet_open(struct net_device *ndev) ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); if (ret) goto err_rx_irq; - /* Enable tasklets for Axi DMA error handling */ - tasklet_enable(&lp->dma_err_tasklet); + return 0; err_rx_irq: @@ -960,6 +965,7 @@ static int axienet_open(struct net_device *ndev) if (lp->phy_dev) phy_disconnect(lp->phy_dev); lp->phy_dev = NULL; + tasklet_kill(&lp->dma_err_tasklet); dev_err(lp->dev, "request_irq() failed\n"); return ret; } @@ -990,7 +996,7 @@ static int axienet_stop(struct net_device *ndev) axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); - tasklet_disable(&lp->dma_err_tasklet); + tasklet_kill(&lp->dma_err_tasklet); free_irq(lp->tx_irq, ndev); free_irq(lp->rx_irq, ndev); @@ -1613,10 +1619,6 @@ static int __devinit axienet_of_probe(struct platform_device *op) goto err_iounmap_2; } - tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, - (unsigned long) lp); - tasklet_disable(&lp->dma_err_tasklet); - return 0; err_iounmap_2: diff --git a/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c b/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c index 98934bdf6acf..477d6729b17f 100644 --- a/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1102,10 +1102,12 @@ static int init_queues(struct port *port) { int i; - if (!ports_open) - if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, - POOL_ALLOC_SIZE, 32, 0))) + if (!ports_open) { + dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, + POOL_ALLOC_SIZE, 32, 0); + if (!dma_pool) return -ENOMEM; + } if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys))) diff --git a/trunk/drivers/net/irda/sir_dev.c b/trunk/drivers/net/irda/sir_dev.c index 5039f08f5a5b..43e9ab4f4d7e 100644 --- a/trunk/drivers/net/irda/sir_dev.c +++ b/trunk/drivers/net/irda/sir_dev.c @@ -222,7 +222,7 @@ static void sirdev_config_fsm(struct work_struct *work) break; case SIRDEV_STATE_DONGLE_SPEED: - if (dev->dongle_drv->reset) { + if (dev->dongle_drv->set_speed) { ret = dev->dongle_drv->set_speed(dev, fsm->param); if (ret < 0) { fsm->result = ret; diff --git a/trunk/drivers/net/phy/mdio-bitbang.c b/trunk/drivers/net/phy/mdio-bitbang.c index 6428fcbbdd4b..daec9b05d168 100644 --- a/trunk/drivers/net/phy/mdio-bitbang.c +++ b/trunk/drivers/net/phy/mdio-bitbang.c @@ -234,7 +234,6 @@ void free_mdio_bitbang(struct mii_bus *bus) struct mdiobb_ctrl *ctrl = bus->priv; module_put(ctrl->ops->owner); - mdiobus_unregister(bus); mdiobus_free(bus); } EXPORT_SYMBOL(free_mdio_bitbang); diff --git a/trunk/drivers/net/phy/mdio-gpio.c b/trunk/drivers/net/phy/mdio-gpio.c index 899274f2f9b1..2ed1140df3e9 100644 --- a/trunk/drivers/net/phy/mdio-gpio.c +++ b/trunk/drivers/net/phy/mdio-gpio.c @@ -185,17 +185,20 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev) { struct mdio_gpio_platform_data *pdata; struct mii_bus *new_bus; - int ret; + int ret, bus_id; - if (pdev->dev.of_node) + if (pdev->dev.of_node) { pdata = mdio_gpio_of_get_data(pdev); - else + bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); + } else { pdata = pdev->dev.platform_data; + bus_id = pdev->id; + } if (!pdata) return -ENODEV; - new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id); + new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, bus_id); if (!new_bus) return -ENODEV; diff --git a/trunk/drivers/net/team/team_mode_broadcast.c b/trunk/drivers/net/team/team_mode_broadcast.c index 9db0171e9366..c5db428e73fa 100644 --- a/trunk/drivers/net/team/team_mode_broadcast.c +++ b/trunk/drivers/net/team/team_mode_broadcast.c @@ -29,8 +29,8 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb) if (last) { skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) { - ret = team_dev_queue_xmit(team, last, - skb2); + ret = !team_dev_queue_xmit(team, last, + skb2); if (!sum_ret) sum_ret = ret; } @@ -39,7 +39,7 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb) } } if (last) { - ret = team_dev_queue_xmit(team, last, skb); + ret = !team_dev_queue_xmit(team, last, skb); if (!sum_ret) sum_ret = ret; } diff --git a/trunk/drivers/net/usb/cdc_eem.c b/trunk/drivers/net/usb/cdc_eem.c index c81e278629ff..08d55b6bf272 100644 --- a/trunk/drivers/net/usb/cdc_eem.c +++ b/trunk/drivers/net/usb/cdc_eem.c @@ -31,6 +31,7 @@ #include #include #include +#include /* @@ -92,7 +93,7 @@ static int eem_bind(struct usbnet *dev, struct usb_interface *intf) /* no jumbogram (16K) support for now */ - dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN; + dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN + VLAN_HLEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; return 0; diff --git a/trunk/drivers/net/usb/cdc_ncm.c b/trunk/drivers/net/usb/cdc_ncm.c index 4cd582a4f625..74fab1a40156 100644 --- a/trunk/drivers/net/usb/cdc_ncm.c +++ b/trunk/drivers/net/usb/cdc_ncm.c @@ -540,10 +540,12 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) (ctx->ether_desc == NULL) || (ctx->control != intf)) goto error; - /* claim interfaces, if any */ - temp = usb_driver_claim_interface(driver, ctx->data, dev); - if (temp) - goto error; + /* claim data interface, if different from control */ + if (ctx->data != ctx->control) { + temp = usb_driver_claim_interface(driver, ctx->data, dev); + if (temp) + goto error; + } iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber; @@ -623,6 +625,10 @@ static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf) tasklet_kill(&ctx->bh); + /* handle devices with combined control and data interface */ + if (ctx->control == ctx->data) + ctx->data = NULL; + /* disconnect master --> disconnect slave */ if (intf == ctx->control && ctx->data) { usb_set_intfdata(ctx->data, NULL); @@ -1245,6 +1251,14 @@ static const struct usb_device_id cdc_devs[] = { .driver_info = (unsigned long) &wwan_info, }, + /* Huawei NCM devices disguised as vendor specific */ + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16), + .driver_info = (unsigned long)&wwan_info, + }, + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46), + .driver_info = (unsigned long)&wwan_info, + }, + /* Generic CDC-NCM devices */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), diff --git a/trunk/drivers/net/usb/smsc95xx.c b/trunk/drivers/net/usb/smsc95xx.c index 7479a5761d0d..362cb8cfeb92 100644 --- a/trunk/drivers/net/usb/smsc95xx.c +++ b/trunk/drivers/net/usb/smsc95xx.c @@ -184,7 +184,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx) /* set the address, index & direction (read from PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; - addr = (phy_id << 11) | (idx << 6) | MII_READ_; + addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_; ret = smsc95xx_write_reg(dev, MII_ADDR, addr); check_warn_goto_done(ret, "Error writing MII_ADDR"); @@ -221,7 +221,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx, /* set the address, index & direction (write to PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; - addr = (phy_id << 11) | (idx << 6) | MII_WRITE_; + addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_; ret = smsc95xx_write_reg(dev, MII_ADDR, addr); check_warn_goto_done(ret, "Error writing MII_ADDR"); @@ -1344,6 +1344,7 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev, } else { u32 csum_preamble = smsc95xx_calc_csum_preamble(skb); skb_push(skb, 4); + cpu_to_le32s(&csum_preamble); memcpy(skb->data, &csum_preamble, 4); } } diff --git a/trunk/drivers/net/usb/usbnet.c b/trunk/drivers/net/usb/usbnet.c index cb04f900cc46..edb81ed06950 100644 --- a/trunk/drivers/net/usb/usbnet.c +++ b/trunk/drivers/net/usb/usbnet.c @@ -359,10 +359,12 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, void usbnet_defer_kevent (struct usbnet *dev, int work) { set_bit (work, &dev->flags); - if (!schedule_work (&dev->kevent)) - netdev_err(dev->net, "kevent %d may have been dropped\n", work); - else + if (!schedule_work (&dev->kevent)) { + if (net_ratelimit()) + netdev_err(dev->net, "kevent %d may have been dropped\n", work); + } else { netdev_dbg(dev->net, "kevent %d scheduled\n", work); + } } EXPORT_SYMBOL_GPL(usbnet_defer_kevent); diff --git a/trunk/drivers/net/vxlan.c b/trunk/drivers/net/vxlan.c index 7b4adde93c01..8b5c61917076 100644 --- a/trunk/drivers/net/vxlan.c +++ b/trunk/drivers/net/vxlan.c @@ -1,5 +1,5 @@ /* - * VXLAN: Virtual eXtensiable Local Area Network + * VXLAN: Virtual eXtensible Local Area Network * * Copyright (c) 2012 Vyatta Inc. * @@ -50,8 +50,8 @@ #define VXLAN_N_VID (1u << 24) #define VXLAN_VID_MASK (VXLAN_N_VID - 1) -/* VLAN + IP header + UDP + VXLAN */ -#define VXLAN_HEADROOM (4 + 20 + 8 + 8) +/* IP header + UDP + VXLAN + Ethernet header */ +#define VXLAN_HEADROOM (20 + 8 + 8 + 14) #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ @@ -1102,6 +1102,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, if (!tb[IFLA_MTU]) dev->mtu = lowerdev->mtu - VXLAN_HEADROOM; + + /* update header length based on lower device */ + dev->hard_header_len = lowerdev->hard_header_len + + VXLAN_HEADROOM; } if (data[IFLA_VXLAN_TOS]) diff --git a/trunk/drivers/net/wan/ixp4xx_hss.c b/trunk/drivers/net/wan/ixp4xx_hss.c index 3f575afd8cfc..e9a3da588e95 100644 --- a/trunk/drivers/net/wan/ixp4xx_hss.c +++ b/trunk/drivers/net/wan/ixp4xx_hss.c @@ -969,10 +969,12 @@ static int init_hdlc_queues(struct port *port) { int i; - if (!ports_open) - if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, - POOL_ALLOC_SIZE, 32, 0))) + if (!ports_open) { + dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, + POOL_ALLOC_SIZE, 32, 0); + if (!dma_pool) return -ENOMEM; + } if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys))) diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.c b/trunk/drivers/net/wireless/ath/ath9k/hw.c index 8e1559aba495..1829b445d0b0 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw.c +++ b/trunk/drivers/net/wireless/ath/ath9k/hw.c @@ -1456,7 +1456,7 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) switch (type) { case ATH9K_RESET_POWER_ON: ret = ath9k_hw_set_reset_power_on(ah); - if (!ret) + if (ret) ah->reset_power_on = true; break; case ATH9K_RESET_WARM: diff --git a/trunk/drivers/net/wireless/b43legacy/pio.c b/trunk/drivers/net/wireless/b43legacy/pio.c index 192251adf986..282eedec675e 100644 --- a/trunk/drivers/net/wireless/b43legacy/pio.c +++ b/trunk/drivers/net/wireless/b43legacy/pio.c @@ -382,7 +382,7 @@ static void cancel_transfers(struct b43legacy_pioqueue *queue) { struct b43legacy_pio_txpacket *packet, *tmp_packet; - tasklet_disable(&queue->txtask); + tasklet_kill(&queue->txtask); list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list) free_txpacket(packet, 0); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index a6f1e8166008..481345c23ded 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -4401,7 +4401,7 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode) static void brcmf_wiphy_pno_params(struct wiphy *wiphy) { -#ifndef CONFIG_BRCMFISCAN +#ifndef CONFIG_BRCMISCAN /* scheduled scan settings */ wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT; wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT; diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/trunk/drivers/net/wireless/iwlwifi/dvm/mac80211.c index ff8162d4c454..2d9eee93c743 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -521,7 +521,7 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw, ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); if (iwlagn_tx_skb(priv, control->sta, skb)) - dev_kfree_skb_any(skb); + ieee80211_free_txskb(hw, skb); } static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, @@ -1354,6 +1354,20 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, vif_priv->ctx = ctx; ctx->vif = vif; + /* + * In SNIFFER device type, the firmware reports the FCS to + * the host, rather than snipping it off. Unfortunately, + * mac80211 doesn't (yet) provide a per-packet flag for + * this, so that we have to set the hardware flag based + * on the interfaces added. As the monitor interface can + * only be present by itself, and will be removed before + * other interfaces are added, this is safe. + */ + if (vif->type == NL80211_IFTYPE_MONITOR) + priv->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS; + else + priv->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS; + err = iwl_setup_interface(priv, ctx); if (!err || reset) goto out; diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/main.c b/trunk/drivers/net/wireless/iwlwifi/dvm/main.c index 7ff3f1430678..408132cf83c1 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/main.c @@ -2114,7 +2114,7 @@ static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) info = IEEE80211_SKB_CB(skb); iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); - dev_kfree_skb_any(skb); + ieee80211_free_txskb(priv->hw, skb); } static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/rx.c b/trunk/drivers/net/wireless/iwlwifi/pcie/rx.c index 17c8e5d82681..bb69f8f90b3b 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -321,6 +321,14 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) dma_map_page(trans->dev, page, 0, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + rxb->page = NULL; + spin_lock_irqsave(&rxq->lock, flags); + list_add(&rxb->list, &rxq->rx_used); + spin_unlock_irqrestore(&rxq->lock, flags); + __free_pages(page, trans_pcie->rx_page_order); + return; + } /* dma address must be no more than 36 bits */ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); /* and also 256 byte aligned! */ @@ -488,8 +496,19 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, dma_map_page(trans->dev, rxb->page, 0, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + /* + * free the page(s) as well to not break + * the invariant that the items on the used + * list have no page(s) + */ + __free_pages(rxb->page, trans_pcie->rx_page_order); + rxb->page = NULL; + list_add_tail(&rxb->list, &rxq->rx_used); + } else { + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } } else list_add_tail(&rxb->list, &rxq->rx_used); spin_unlock_irqrestore(&rxq->lock, flags); diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c b/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c index 105e3af3c621..79a4ddc002d3 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -480,20 +480,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u16 rd_ptr, wr_ptr; - int n_bd = trans_pcie->txq[txq_id].q.n_bd; if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { WARN_ONCE(1, "queue %d not used", txq_id); return; } - rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1); - wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)); - - WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]", - txq_id, rd_ptr, wr_ptr); - iwl_txq_set_inactive(trans, txq_id); IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } diff --git a/trunk/drivers/net/wireless/mwifiex/cmdevt.c b/trunk/drivers/net/wireless/mwifiex/cmdevt.c index 8d465107f52b..ae9010ed58de 100644 --- a/trunk/drivers/net/wireless/mwifiex/cmdevt.c +++ b/trunk/drivers/net/wireless/mwifiex/cmdevt.c @@ -890,9 +890,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context) return; } cmd_node = adapter->curr_cmd; - if (cmd_node->wait_q_enabled) - adapter->cmd_wait_q.status = -ETIMEDOUT; - if (cmd_node) { adapter->dbg.timeout_cmd_id = adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index]; @@ -938,6 +935,14 @@ mwifiex_cmd_timeout_func(unsigned long function_context) dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n", adapter->ps_mode, adapter->ps_state); + + if (cmd_node->wait_q_enabled) { + adapter->cmd_wait_q.status = -ETIMEDOUT; + wake_up_interruptible(&adapter->cmd_wait_q.wait); + mwifiex_cancel_pending_ioctl(adapter); + /* reset cmd_sent flag to unblock new commands */ + adapter->cmd_sent = false; + } } if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) mwifiex_init_fw_complete(adapter); diff --git a/trunk/drivers/net/wireless/mwifiex/sdio.c b/trunk/drivers/net/wireless/mwifiex/sdio.c index fc8a9bfa1248..82cf0fa2d9f6 100644 --- a/trunk/drivers/net/wireless/mwifiex/sdio.c +++ b/trunk/drivers/net/wireless/mwifiex/sdio.c @@ -161,7 +161,6 @@ static int mwifiex_sdio_suspend(struct device *dev) struct sdio_mmc_card *card; struct mwifiex_adapter *adapter; mmc_pm_flag_t pm_flag = 0; - int hs_actived = 0; int i; int ret = 0; @@ -188,12 +187,14 @@ static int mwifiex_sdio_suspend(struct device *dev) adapter = card->adapter; /* Enable the Host Sleep */ - hs_actived = mwifiex_enable_hs(adapter); - if (hs_actived) { - pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n"); - ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (!mwifiex_enable_hs(adapter)) { + dev_err(adapter->dev, "cmd: failed to suspend\n"); + return -EFAULT; } + dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n"); + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + /* Indicate device suspended */ adapter->is_suspended = true; diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 9970c2b1b199..b7e6607e6b6d 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -297,6 +297,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { /*=== Customer ID ===*/ /****** 8188CU ********/ {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/ + {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/ {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ diff --git a/trunk/drivers/net/xen-netfront.c b/trunk/drivers/net/xen-netfront.c index caa011008cd0..fc24eb9b3948 100644 --- a/trunk/drivers/net/xen-netfront.c +++ b/trunk/drivers/net/xen-netfront.c @@ -452,29 +452,85 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, /* Grant backend access to each skb fragment page. */ for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; + struct page *page = skb_frag_page(frag); - tx->flags |= XEN_NETTXF_more_data; + len = skb_frag_size(frag); + offset = frag->page_offset; - id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); - np->tx_skbs[id].skb = skb_get(skb); - tx = RING_GET_REQUEST(&np->tx, prod++); - tx->id = id; - ref = gnttab_claim_grant_reference(&np->gref_tx_head); - BUG_ON((signed short)ref < 0); + /* Data must not cross a page boundary. */ + BUG_ON(len + offset > PAGE_SIZE<xbdev->otherend_id, - mfn, GNTMAP_readonly); + /* Skip unused frames from start of page */ + page += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; - tx->gref = np->grant_tx_ref[id] = ref; - tx->offset = frag->page_offset; - tx->size = skb_frag_size(frag); - tx->flags = 0; + while (len > 0) { + unsigned long bytes; + + BUG_ON(offset >= PAGE_SIZE); + + bytes = PAGE_SIZE - offset; + if (bytes > len) + bytes = len; + + tx->flags |= XEN_NETTXF_more_data; + + id = get_id_from_freelist(&np->tx_skb_freelist, + np->tx_skbs); + np->tx_skbs[id].skb = skb_get(skb); + tx = RING_GET_REQUEST(&np->tx, prod++); + tx->id = id; + ref = gnttab_claim_grant_reference(&np->gref_tx_head); + BUG_ON((signed short)ref < 0); + + mfn = pfn_to_mfn(page_to_pfn(page)); + gnttab_grant_foreign_access_ref(ref, + np->xbdev->otherend_id, + mfn, GNTMAP_readonly); + + tx->gref = np->grant_tx_ref[id] = ref; + tx->offset = offset; + tx->size = bytes; + tx->flags = 0; + + offset += bytes; + len -= bytes; + + /* Next frame */ + if (offset == PAGE_SIZE && len) { + BUG_ON(!PageCompound(page)); + page++; + offset = 0; + } + } } np->tx.req_prod_pvt = prod; } +/* + * Count how many ring slots are required to send the frags of this + * skb. Each frag might be a compound page. + */ +static int xennet_count_skb_frag_slots(struct sk_buff *skb) +{ + int i, frags = skb_shinfo(skb)->nr_frags; + int pages = 0; + + for (i = 0; i < frags; i++) { + skb_frag_t *frag = skb_shinfo(skb)->frags + i; + unsigned long size = skb_frag_size(frag); + unsigned long offset = frag->page_offset; + + /* Skip unused frames from start of page */ + offset &= ~PAGE_MASK; + + pages += PFN_UP(offset + size); + } + + return pages; +} + static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; @@ -487,23 +543,23 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) grant_ref_t ref; unsigned long mfn; int notify; - int frags = skb_shinfo(skb)->nr_frags; + int slots; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned long flags; - frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); - if (unlikely(frags > MAX_SKB_FRAGS + 1)) { - printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", - frags); - dump_stack(); + slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + + xennet_count_skb_frag_slots(skb); + if (unlikely(slots > MAX_SKB_FRAGS + 1)) { + net_alert_ratelimited( + "xennet: skb rides the rocket: %d slots\n", slots); goto drop; } spin_lock_irqsave(&np->tx_lock, flags); if (unlikely(!netif_carrier_ok(dev) || - (frags > 1 && !xennet_can_sg(dev)) || + (slots > 1 && !xennet_can_sg(dev)) || netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&np->tx_lock, flags); goto drop; diff --git a/trunk/drivers/nfc/pn533.c b/trunk/drivers/nfc/pn533.c index 97c440a8cd61..30ae18a03a9c 100644 --- a/trunk/drivers/nfc/pn533.c +++ b/trunk/drivers/nfc/pn533.c @@ -698,13 +698,14 @@ static void pn533_wq_cmd(struct work_struct *work) cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue); + list_del(&cmd->queue); + mutex_unlock(&dev->cmd_lock); __pn533_send_cmd_frame_async(dev, cmd->out_frame, cmd->in_frame, cmd->in_frame_len, cmd->cmd_complete, cmd->arg, cmd->flags); - list_del(&cmd->queue); kfree(cmd); } @@ -1678,11 +1679,14 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, u8 *params, int params_len) { - struct pn533_cmd_jump_dep *cmd; struct pn533_cmd_jump_dep_response *resp; struct nfc_target nfc_target; u8 target_gt_len; int rc; + struct pn533_cmd_jump_dep *cmd = (struct pn533_cmd_jump_dep *)arg; + u8 active = cmd->active; + + kfree(arg); if (params_len == -ENOENT) { nfc_dev_dbg(&dev->interface->dev, ""); @@ -1704,7 +1708,6 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, } resp = (struct pn533_cmd_jump_dep_response *) params; - cmd = (struct pn533_cmd_jump_dep *) arg; rc = resp->status & PN533_CMD_RET_MASK; if (rc != PN533_CMD_RET_SUCCESS) { nfc_dev_err(&dev->interface->dev, @@ -1734,7 +1737,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, if (rc == 0) rc = nfc_dep_link_is_up(dev->nfc_dev, dev->nfc_dev->targets[0].idx, - !cmd->active, NFC_RF_INITIATOR); + !active, NFC_RF_INITIATOR); return 0; } @@ -1819,12 +1822,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame, dev->in_maxlen, pn533_in_dep_link_up_complete, cmd, GFP_KERNEL); - if (rc) - goto out; - - -out: - kfree(cmd); + if (rc < 0) + kfree(cmd); return rc; } @@ -2078,8 +2077,12 @@ static int pn533_transceive(struct nfc_dev *nfc_dev, static int pn533_tm_send_complete(struct pn533 *dev, void *arg, u8 *params, int params_len) { + struct sk_buff *skb_out = arg; + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); + dev_kfree_skb(skb_out); + if (params_len < 0) { nfc_dev_err(&dev->interface->dev, "Error %d when sending data", @@ -2117,7 +2120,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame, dev->in_maxlen, pn533_tm_send_complete, - NULL, GFP_KERNEL); + skb, GFP_KERNEL); if (rc) { nfc_dev_err(&dev->interface->dev, "Error %d when trying to send data", rc); diff --git a/trunk/drivers/pci/bus.c b/trunk/drivers/pci/bus.c index 6241fd05bd41..a543746fb354 100644 --- a/trunk/drivers/pci/bus.c +++ b/trunk/drivers/pci/bus.c @@ -320,10 +320,7 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), } else next = dev->bus_list.next; - /* Run device routines with the device locked */ - device_lock(&dev->dev); retval = cb(dev, userdata); - device_unlock(&dev->dev); if (retval) break; } diff --git a/trunk/drivers/pci/pci-driver.c b/trunk/drivers/pci/pci-driver.c index 94c6e2aa03d6..6c94fc9489e7 100644 --- a/trunk/drivers/pci/pci-driver.c +++ b/trunk/drivers/pci/pci-driver.c @@ -398,6 +398,8 @@ static void pci_device_shutdown(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *drv = pci_dev->driver; + pm_runtime_resume(dev); + if (drv && drv->shutdown) drv->shutdown(pci_dev); pci_msi_shutdown(pci_dev); @@ -408,16 +410,6 @@ static void pci_device_shutdown(struct device *dev) * continue to do DMA */ pci_disable_device(pci_dev); - - /* - * Devices may be enabled to wake up by runtime PM, but they need not - * be supposed to wake up the system from its "power off" state (e.g. - * ACPI S5). Therefore disable wakeup for all devices that aren't - * supposed to wake up the system at this point. The state argument - * will be ignored by pci_enable_wake(). - */ - if (!device_may_wakeup(dev)) - pci_enable_wake(pci_dev, PCI_UNKNOWN, false); } #ifdef CONFIG_PM diff --git a/trunk/drivers/pci/pci-sysfs.c b/trunk/drivers/pci/pci-sysfs.c index 02d107b15281..f39378d9da15 100644 --- a/trunk/drivers/pci/pci-sysfs.c +++ b/trunk/drivers/pci/pci-sysfs.c @@ -458,40 +458,6 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf) } struct device_attribute vga_attr = __ATTR_RO(boot_vga); -static void -pci_config_pm_runtime_get(struct pci_dev *pdev) -{ - struct device *dev = &pdev->dev; - struct device *parent = dev->parent; - - if (parent) - pm_runtime_get_sync(parent); - pm_runtime_get_noresume(dev); - /* - * pdev->current_state is set to PCI_D3cold during suspending, - * so wait until suspending completes - */ - pm_runtime_barrier(dev); - /* - * Only need to resume devices in D3cold, because config - * registers are still accessible for devices suspended but - * not in D3cold. - */ - if (pdev->current_state == PCI_D3cold) - pm_runtime_resume(dev); -} - -static void -pci_config_pm_runtime_put(struct pci_dev *pdev) -{ - struct device *dev = &pdev->dev; - struct device *parent = dev->parent; - - pm_runtime_put(dev); - if (parent) - pm_runtime_put_sync(parent); -} - static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, diff --git a/trunk/drivers/pci/pci.c b/trunk/drivers/pci/pci.c index 54858838f098..aabf64798bda 100644 --- a/trunk/drivers/pci/pci.c +++ b/trunk/drivers/pci/pci.c @@ -1858,6 +1858,38 @@ bool pci_dev_run_wake(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_dev_run_wake); +void pci_config_pm_runtime_get(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct device *parent = dev->parent; + + if (parent) + pm_runtime_get_sync(parent); + pm_runtime_get_noresume(dev); + /* + * pdev->current_state is set to PCI_D3cold during suspending, + * so wait until suspending completes + */ + pm_runtime_barrier(dev); + /* + * Only need to resume devices in D3cold, because config + * registers are still accessible for devices suspended but + * not in D3cold. + */ + if (pdev->current_state == PCI_D3cold) + pm_runtime_resume(dev); +} + +void pci_config_pm_runtime_put(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct device *parent = dev->parent; + + pm_runtime_put(dev); + if (parent) + pm_runtime_put_sync(parent); +} + /** * pci_pm_init - Initialize PM functions of given PCI device * @dev: PCI device to handle. diff --git a/trunk/drivers/pci/pci.h b/trunk/drivers/pci/pci.h index bacbcba69cf3..fd92aab9904b 100644 --- a/trunk/drivers/pci/pci.h +++ b/trunk/drivers/pci/pci.h @@ -72,6 +72,8 @@ extern void pci_disable_enabled_device(struct pci_dev *dev); extern int pci_finish_runtime_suspend(struct pci_dev *dev); extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); extern void pci_wakeup_bus(struct pci_bus *bus); +extern void pci_config_pm_runtime_get(struct pci_dev *dev); +extern void pci_config_pm_runtime_put(struct pci_dev *dev); extern void pci_pm_init(struct pci_dev *dev); extern void platform_pci_wakeup_init(struct pci_dev *dev); extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); diff --git a/trunk/drivers/pci/pcie/aer/aerdrv_core.c b/trunk/drivers/pci/pcie/aer/aerdrv_core.c index 06bad96af415..af4e31cd3a3b 100644 --- a/trunk/drivers/pci/pcie/aer/aerdrv_core.c +++ b/trunk/drivers/pci/pcie/aer/aerdrv_core.c @@ -213,6 +213,7 @@ static int report_error_detected(struct pci_dev *dev, void *data) struct aer_broadcast_data *result_data; result_data = (struct aer_broadcast_data *) data; + device_lock(&dev->dev); dev->error_state = result_data->state; if (!dev->driver || @@ -231,12 +232,14 @@ static int report_error_detected(struct pci_dev *dev, void *data) dev->driver ? "no AER-aware driver" : "no driver"); } - return 0; + goto out; } err_handler = dev->driver->err_handler; vote = err_handler->error_detected(dev, result_data->state); result_data->result = merge_result(result_data->result, vote); +out: + device_unlock(&dev->dev); return 0; } @@ -247,14 +250,17 @@ static int report_mmio_enabled(struct pci_dev *dev, void *data) struct aer_broadcast_data *result_data; result_data = (struct aer_broadcast_data *) data; + device_lock(&dev->dev); if (!dev->driver || !dev->driver->err_handler || !dev->driver->err_handler->mmio_enabled) - return 0; + goto out; err_handler = dev->driver->err_handler; vote = err_handler->mmio_enabled(dev); result_data->result = merge_result(result_data->result, vote); +out: + device_unlock(&dev->dev); return 0; } @@ -265,14 +271,17 @@ static int report_slot_reset(struct pci_dev *dev, void *data) struct aer_broadcast_data *result_data; result_data = (struct aer_broadcast_data *) data; + device_lock(&dev->dev); if (!dev->driver || !dev->driver->err_handler || !dev->driver->err_handler->slot_reset) - return 0; + goto out; err_handler = dev->driver->err_handler; vote = err_handler->slot_reset(dev); result_data->result = merge_result(result_data->result, vote); +out: + device_unlock(&dev->dev); return 0; } @@ -280,15 +289,18 @@ static int report_resume(struct pci_dev *dev, void *data) { const struct pci_error_handlers *err_handler; + device_lock(&dev->dev); dev->error_state = pci_channel_io_normal; if (!dev->driver || !dev->driver->err_handler || !dev->driver->err_handler->resume) - return 0; + goto out; err_handler = dev->driver->err_handler; err_handler->resume(dev); +out: + device_unlock(&dev->dev); return 0; } diff --git a/trunk/drivers/pci/pcie/portdrv_core.c b/trunk/drivers/pci/pcie/portdrv_core.c index d03a7a39b2d8..ed129b414624 100644 --- a/trunk/drivers/pci/pcie/portdrv_core.c +++ b/trunk/drivers/pci/pcie/portdrv_core.c @@ -272,7 +272,8 @@ static int get_port_device_capability(struct pci_dev *dev) } /* Hot-Plug Capable */ - if (cap_mask & PCIE_PORT_SERVICE_HP) { + if ((cap_mask & PCIE_PORT_SERVICE_HP) && + dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT) { pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, ®32); if (reg32 & PCI_EXP_SLTCAP_HPC) { services |= PCIE_PORT_SERVICE_HP; diff --git a/trunk/drivers/pci/proc.c b/trunk/drivers/pci/proc.c index eb907a8faf2a..9b8505ccc56d 100644 --- a/trunk/drivers/pci/proc.c +++ b/trunk/drivers/pci/proc.c @@ -76,6 +76,8 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp if (!access_ok(VERIFY_WRITE, buf, cnt)) return -EINVAL; + pci_config_pm_runtime_get(dev); + if ((pos & 1) && cnt) { unsigned char val; pci_user_read_config_byte(dev, pos, &val); @@ -121,6 +123,8 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp cnt--; } + pci_config_pm_runtime_put(dev); + *ppos = pos; return nbytes; } @@ -146,6 +150,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof if (!access_ok(VERIFY_READ, buf, cnt)) return -EINVAL; + pci_config_pm_runtime_get(dev); + if ((pos & 1) && cnt) { unsigned char val; __get_user(val, buf); @@ -191,6 +197,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof cnt--; } + pci_config_pm_runtime_put(dev); + *ppos = pos; i_size_write(ino, dp->size); return nbytes; diff --git a/trunk/drivers/pinctrl/Kconfig b/trunk/drivers/pinctrl/Kconfig index 7bf914df6e91..aeecf0f72cad 100644 --- a/trunk/drivers/pinctrl/Kconfig +++ b/trunk/drivers/pinctrl/Kconfig @@ -178,12 +178,14 @@ config PINCTRL_COH901 ports of 8 GPIO pins each. config PINCTRL_SAMSUNG - bool "Samsung pinctrl driver" + bool + depends on OF && GPIOLIB select PINMUX select PINCONF config PINCTRL_EXYNOS4 bool "Pinctrl driver data for Exynos4 SoC" + depends on OF && GPIOLIB select PINCTRL_SAMSUNG config PINCTRL_MVEBU diff --git a/trunk/drivers/pinctrl/spear/pinctrl-spear.c b/trunk/drivers/pinctrl/spear/pinctrl-spear.c index 5d4f44f462f0..b1fd6ee33c6c 100644 --- a/trunk/drivers/pinctrl/spear/pinctrl-spear.c +++ b/trunk/drivers/pinctrl/spear/pinctrl-spear.c @@ -244,7 +244,7 @@ static int spear_pinctrl_endisable(struct pinctrl_dev *pctldev, else temp = ~muxreg->val; - val |= temp; + val |= muxreg->mask & temp; pmx_writel(pmx, val, muxreg->reg); } } diff --git a/trunk/drivers/pinctrl/spear/pinctrl-spear1310.c b/trunk/drivers/pinctrl/spear/pinctrl-spear1310.c index d6cca8c81b92..0436fc7895d6 100644 --- a/trunk/drivers/pinctrl/spear/pinctrl-spear1310.c +++ b/trunk/drivers/pinctrl/spear/pinctrl-spear1310.c @@ -25,8 +25,8 @@ static const struct pinctrl_pin_desc spear1310_pins[] = { }; /* registers */ -#define PERIP_CFG 0x32C - #define MCIF_SEL_SHIFT 3 +#define PERIP_CFG 0x3B0 + #define MCIF_SEL_SHIFT 5 #define MCIF_SEL_SD (0x1 << MCIF_SEL_SHIFT) #define MCIF_SEL_CF (0x2 << MCIF_SEL_SHIFT) #define MCIF_SEL_XD (0x3 << MCIF_SEL_SHIFT) @@ -164,6 +164,10 @@ static const struct pinctrl_pin_desc spear1310_pins[] = { #define PMX_SSP0_CS0_MASK (1 << 29) #define PMX_SSP0_CS1_2_MASK (1 << 30) +#define PAD_DIRECTION_SEL_0 0x65C +#define PAD_DIRECTION_SEL_1 0x660 +#define PAD_DIRECTION_SEL_2 0x664 + /* combined macros */ #define PMX_GMII_MASK (PMX_GMIICLK_MASK | \ PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK | \ @@ -237,6 +241,10 @@ static struct spear_muxreg i2c0_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_I2C0_MASK, .val = PMX_I2C0_MASK, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_I2C0_MASK, + .val = PMX_I2C0_MASK, }, }; @@ -269,6 +277,10 @@ static struct spear_muxreg ssp0_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_SSP0_MASK, .val = PMX_SSP0_MASK, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_SSP0_MASK, + .val = PMX_SSP0_MASK, }, }; @@ -294,6 +306,10 @@ static struct spear_muxreg ssp0_cs0_muxreg[] = { .reg = PAD_FUNCTION_EN_2, .mask = PMX_SSP0_CS0_MASK, .val = PMX_SSP0_CS0_MASK, + }, { + .reg = PAD_DIRECTION_SEL_2, + .mask = PMX_SSP0_CS0_MASK, + .val = PMX_SSP0_CS0_MASK, }, }; @@ -319,6 +335,10 @@ static struct spear_muxreg ssp0_cs1_2_muxreg[] = { .reg = PAD_FUNCTION_EN_2, .mask = PMX_SSP0_CS1_2_MASK, .val = PMX_SSP0_CS1_2_MASK, + }, { + .reg = PAD_DIRECTION_SEL_2, + .mask = PMX_SSP0_CS1_2_MASK, + .val = PMX_SSP0_CS1_2_MASK, }, }; @@ -352,6 +372,10 @@ static struct spear_muxreg i2s0_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_I2S0_MASK, .val = PMX_I2S0_MASK, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_I2S0_MASK, + .val = PMX_I2S0_MASK, }, }; @@ -384,6 +408,10 @@ static struct spear_muxreg i2s1_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_I2S1_MASK, .val = PMX_I2S1_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_I2S1_MASK, + .val = PMX_I2S1_MASK, }, }; @@ -418,6 +446,10 @@ static struct spear_muxreg clcd_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_CLCD1_MASK, .val = PMX_CLCD1_MASK, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_CLCD1_MASK, + .val = PMX_CLCD1_MASK, }, }; @@ -443,6 +475,10 @@ static struct spear_muxreg clcd_high_res_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_CLCD2_MASK, .val = PMX_CLCD2_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_CLCD2_MASK, + .val = PMX_CLCD2_MASK, }, }; @@ -461,7 +497,7 @@ static struct spear_pingroup clcd_high_res_pingroup = { .nmodemuxs = ARRAY_SIZE(clcd_high_res_modemux), }; -static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res" }; +static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res_grp" }; static struct spear_function clcd_function = { .name = "clcd", .groups = clcd_grps, @@ -479,6 +515,14 @@ static struct spear_muxreg arm_gpio_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_EGPIO_1_GRP_MASK, .val = PMX_EGPIO_1_GRP_MASK, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_EGPIO_0_GRP_MASK, + .val = PMX_EGPIO_0_GRP_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_EGPIO_1_GRP_MASK, + .val = PMX_EGPIO_1_GRP_MASK, }, }; @@ -511,6 +555,10 @@ static struct spear_muxreg smi_2_chips_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_SMI_MASK, .val = PMX_SMI_MASK, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_SMI_MASK, + .val = PMX_SMI_MASK, }, }; @@ -539,6 +587,14 @@ static struct spear_muxreg smi_4_chips_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK, .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_SMI_MASK, + .val = PMX_SMI_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK, + .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK, }, }; @@ -573,6 +629,10 @@ static struct spear_muxreg gmii_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_GMII_MASK, .val = PMX_GMII_MASK, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_GMII_MASK, + .val = PMX_GMII_MASK, }, }; @@ -615,6 +675,18 @@ static struct spear_muxreg rgmii_muxreg[] = { .reg = PAD_FUNCTION_EN_2, .mask = PMX_RGMII_REG2_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_RGMII_REG0_MASK, + .val = PMX_RGMII_REG0_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_RGMII_REG1_MASK, + .val = PMX_RGMII_REG1_MASK, + }, { + .reg = PAD_DIRECTION_SEL_2, + .mask = PMX_RGMII_REG2_MASK, + .val = PMX_RGMII_REG2_MASK, }, }; @@ -649,6 +721,10 @@ static struct spear_muxreg smii_0_1_2_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_SMII_0_1_2_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_SMII_0_1_2_MASK, + .val = PMX_SMII_0_1_2_MASK, }, }; @@ -681,6 +757,10 @@ static struct spear_muxreg ras_mii_txclk_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_NFCE2_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_NFCE2_MASK, + .val = PMX_NFCE2_MASK, }, }; @@ -721,6 +801,14 @@ static struct spear_muxreg nand_8bit_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_NAND8BIT_1_MASK, .val = PMX_NAND8BIT_1_MASK, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_NAND8BIT_0_MASK, + .val = PMX_NAND8BIT_0_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_NAND8BIT_1_MASK, + .val = PMX_NAND8BIT_1_MASK, }, }; @@ -747,6 +835,10 @@ static struct spear_muxreg nand_16bit_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_NAND16BIT_1_MASK, .val = PMX_NAND16BIT_1_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_NAND16BIT_1_MASK, + .val = PMX_NAND16BIT_1_MASK, }, }; @@ -772,6 +864,10 @@ static struct spear_muxreg nand_4_chips_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_NAND_4CHIPS_MASK, .val = PMX_NAND_4CHIPS_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_NAND_4CHIPS_MASK, + .val = PMX_NAND_4CHIPS_MASK, }, }; @@ -833,6 +929,10 @@ static struct spear_muxreg keyboard_rowcol6_8_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_KBD_ROWCOL68_MASK, .val = PMX_KBD_ROWCOL68_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_KBD_ROWCOL68_MASK, + .val = PMX_KBD_ROWCOL68_MASK, }, }; @@ -866,6 +966,10 @@ static struct spear_muxreg uart0_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_UART0_MASK, .val = PMX_UART0_MASK, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_UART0_MASK, + .val = PMX_UART0_MASK, }, }; @@ -891,6 +995,10 @@ static struct spear_muxreg uart0_modem_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_UART0_MODEM_MASK, .val = PMX_UART0_MODEM_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_UART0_MODEM_MASK, + .val = PMX_UART0_MODEM_MASK, }, }; @@ -923,6 +1031,10 @@ static struct spear_muxreg gpt0_tmr0_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_GPT0_TMR0_MASK, .val = PMX_GPT0_TMR0_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_GPT0_TMR0_MASK, + .val = PMX_GPT0_TMR0_MASK, }, }; @@ -948,6 +1060,10 @@ static struct spear_muxreg gpt0_tmr1_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_GPT0_TMR1_MASK, .val = PMX_GPT0_TMR1_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_GPT0_TMR1_MASK, + .val = PMX_GPT0_TMR1_MASK, }, }; @@ -980,6 +1096,10 @@ static struct spear_muxreg gpt1_tmr0_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_GPT1_TMR0_MASK, .val = PMX_GPT1_TMR0_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_GPT1_TMR0_MASK, + .val = PMX_GPT1_TMR0_MASK, }, }; @@ -1005,6 +1125,10 @@ static struct spear_muxreg gpt1_tmr1_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_GPT1_TMR1_MASK, .val = PMX_GPT1_TMR1_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_GPT1_TMR1_MASK, + .val = PMX_GPT1_TMR1_MASK, }, }; @@ -1049,6 +1173,20 @@ static const unsigned mcif_pins[] = { 86, 87, 88, 89, 90, 91, 92, 93, 213, 214, .reg = PAD_FUNCTION_EN_2, \ .mask = PMX_MCIFALL_2_MASK, \ .val = PMX_MCIFALL_2_MASK, \ + }, { \ + .reg = PAD_DIRECTION_SEL_0, \ + .mask = PMX_MCI_DATA8_15_MASK, \ + .val = PMX_MCI_DATA8_15_MASK, \ + }, { \ + .reg = PAD_DIRECTION_SEL_1, \ + .mask = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \ + PMX_NFWPRT2_MASK, \ + .val = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \ + PMX_NFWPRT2_MASK, \ + }, { \ + .reg = PAD_DIRECTION_SEL_2, \ + .mask = PMX_MCIFALL_2_MASK, \ + .val = PMX_MCIFALL_2_MASK, \ } /* sdhci device */ @@ -1154,6 +1292,10 @@ static struct spear_muxreg touch_xy_muxreg[] = { .reg = PAD_FUNCTION_EN_2, .mask = PMX_TOUCH_XY_MASK, .val = PMX_TOUCH_XY_MASK, + }, { + .reg = PAD_DIRECTION_SEL_2, + .mask = PMX_TOUCH_XY_MASK, + .val = PMX_TOUCH_XY_MASK, }, }; @@ -1187,6 +1329,10 @@ static struct spear_muxreg uart1_dis_i2c_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_I2C0_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_I2C0_MASK, + .val = PMX_I2C0_MASK, }, }; @@ -1213,6 +1359,12 @@ static struct spear_muxreg uart1_dis_sd_muxreg[] = { .mask = PMX_MCIDATA1_MASK | PMX_MCIDATA2_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_MCIDATA1_MASK | + PMX_MCIDATA2_MASK, + .val = PMX_MCIDATA1_MASK | + PMX_MCIDATA2_MASK, }, }; @@ -1246,6 +1398,10 @@ static struct spear_muxreg uart2_3_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_I2S0_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_I2S0_MASK, + .val = PMX_I2S0_MASK, }, }; @@ -1278,6 +1434,10 @@ static struct spear_muxreg uart4_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK, + .val = PMX_I2S0_MASK | PMX_CLCD1_MASK, }, }; @@ -1310,6 +1470,10 @@ static struct spear_muxreg uart5_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_CLCD1_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_CLCD1_MASK, + .val = PMX_CLCD1_MASK, }, }; @@ -1344,6 +1508,10 @@ static struct spear_muxreg rs485_0_1_tdm_0_1_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_CLCD1_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_CLCD1_MASK, + .val = PMX_CLCD1_MASK, }, }; @@ -1376,6 +1544,10 @@ static struct spear_muxreg i2c_1_2_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_CLCD1_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_CLCD1_MASK, + .val = PMX_CLCD1_MASK, }, }; @@ -1409,6 +1581,10 @@ static struct spear_muxreg i2c3_dis_smi_clcd_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_CLCD1_MASK | PMX_SMI_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_CLCD1_MASK | PMX_SMI_MASK, + .val = PMX_CLCD1_MASK | PMX_SMI_MASK, }, }; @@ -1435,6 +1611,10 @@ static struct spear_muxreg i2c3_dis_sd_i2s0_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK, + .val = PMX_I2S1_MASK | PMX_MCIDATA3_MASK, }, }; @@ -1469,6 +1649,10 @@ static struct spear_muxreg i2c_4_5_dis_smi_muxreg[] = { .reg = PAD_FUNCTION_EN_0, .mask = PMX_SMI_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_SMI_MASK, + .val = PMX_SMI_MASK, }, }; @@ -1499,6 +1683,14 @@ static struct spear_muxreg i2c4_dis_sd_muxreg[] = { .reg = PAD_FUNCTION_EN_2, .mask = PMX_MCIDATA5_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_MCIDATA4_MASK, + .val = PMX_MCIDATA4_MASK, + }, { + .reg = PAD_DIRECTION_SEL_2, + .mask = PMX_MCIDATA5_MASK, + .val = PMX_MCIDATA5_MASK, }, }; @@ -1526,6 +1718,12 @@ static struct spear_muxreg i2c5_dis_sd_muxreg[] = { .mask = PMX_MCIDATA6_MASK | PMX_MCIDATA7_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_2, + .mask = PMX_MCIDATA6_MASK | + PMX_MCIDATA7_MASK, + .val = PMX_MCIDATA6_MASK | + PMX_MCIDATA7_MASK, }, }; @@ -1560,6 +1758,10 @@ static struct spear_muxreg i2c_6_7_dis_kbd_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_KBD_ROWCOL25_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_KBD_ROWCOL25_MASK, + .val = PMX_KBD_ROWCOL25_MASK, }, }; @@ -1587,6 +1789,12 @@ static struct spear_muxreg i2c6_dis_sd_muxreg[] = { .mask = PMX_MCIIORDRE_MASK | PMX_MCIIOWRWE_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_2, + .mask = PMX_MCIIORDRE_MASK | + PMX_MCIIOWRWE_MASK, + .val = PMX_MCIIORDRE_MASK | + PMX_MCIIOWRWE_MASK, }, }; @@ -1613,6 +1821,12 @@ static struct spear_muxreg i2c7_dis_sd_muxreg[] = { .mask = PMX_MCIRESETCF_MASK | PMX_MCICS0CE_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_2, + .mask = PMX_MCIRESETCF_MASK | + PMX_MCICS0CE_MASK, + .val = PMX_MCIRESETCF_MASK | + PMX_MCICS0CE_MASK, }, }; @@ -1651,6 +1865,14 @@ static struct spear_muxreg can0_dis_nor_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_NFRSTPWDWN3_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_NFRSTPWDWN2_MASK, + .val = PMX_NFRSTPWDWN2_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_NFRSTPWDWN3_MASK, + .val = PMX_NFRSTPWDWN3_MASK, }, }; @@ -1677,6 +1899,10 @@ static struct spear_muxreg can0_dis_sd_muxreg[] = { .reg = PAD_FUNCTION_EN_2, .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_2, + .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK, + .val = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK, }, }; @@ -1711,6 +1937,10 @@ static struct spear_muxreg can1_dis_sd_muxreg[] = { .reg = PAD_FUNCTION_EN_2, .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_2, + .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK, + .val = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK, }, }; @@ -1737,6 +1967,10 @@ static struct spear_muxreg can1_dis_kbd_muxreg[] = { .reg = PAD_FUNCTION_EN_1, .mask = PMX_KBD_ROWCOL25_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_KBD_ROWCOL25_MASK, + .val = PMX_KBD_ROWCOL25_MASK, }, }; @@ -1763,29 +1997,64 @@ static struct spear_function can1_function = { .ngroups = ARRAY_SIZE(can1_grps), }; -/* Pad multiplexing for pci device */ -static const unsigned pci_sata_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18, +/* Pad multiplexing for (ras-ip) pci device */ +static const unsigned pci_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 }; -#define PCI_SATA_MUXREG \ - { \ - .reg = PAD_FUNCTION_EN_0, \ - .mask = PMX_MCI_DATA8_15_MASK, \ - .val = 0, \ - }, { \ - .reg = PAD_FUNCTION_EN_1, \ - .mask = PMX_PCI_REG1_MASK, \ - .val = 0, \ - }, { \ - .reg = PAD_FUNCTION_EN_2, \ - .mask = PMX_PCI_REG2_MASK, \ - .val = 0, \ - } -/* pad multiplexing for pcie0 device */ +static struct spear_muxreg pci_muxreg[] = { + { + .reg = PAD_FUNCTION_EN_0, + .mask = PMX_MCI_DATA8_15_MASK, + .val = 0, + }, { + .reg = PAD_FUNCTION_EN_1, + .mask = PMX_PCI_REG1_MASK, + .val = 0, + }, { + .reg = PAD_FUNCTION_EN_2, + .mask = PMX_PCI_REG2_MASK, + .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_0, + .mask = PMX_MCI_DATA8_15_MASK, + .val = PMX_MCI_DATA8_15_MASK, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_PCI_REG1_MASK, + .val = PMX_PCI_REG1_MASK, + }, { + .reg = PAD_DIRECTION_SEL_2, + .mask = PMX_PCI_REG2_MASK, + .val = PMX_PCI_REG2_MASK, + }, +}; + +static struct spear_modemux pci_modemux[] = { + { + .muxregs = pci_muxreg, + .nmuxregs = ARRAY_SIZE(pci_muxreg), + }, +}; + +static struct spear_pingroup pci_pingroup = { + .name = "pci_grp", + .pins = pci_pins, + .npins = ARRAY_SIZE(pci_pins), + .modemuxs = pci_modemux, + .nmodemuxs = ARRAY_SIZE(pci_modemux), +}; + +static const char *const pci_grps[] = { "pci_grp" }; +static struct spear_function pci_function = { + .name = "pci", + .groups = pci_grps, + .ngroups = ARRAY_SIZE(pci_grps), +}; + +/* pad multiplexing for (fix-part) pcie0 device */ static struct spear_muxreg pcie0_muxreg[] = { - PCI_SATA_MUXREG, { .reg = PCIE_SATA_CFG, .mask = PCIE_CFG_VAL(0), @@ -1802,15 +2071,12 @@ static struct spear_modemux pcie0_modemux[] = { static struct spear_pingroup pcie0_pingroup = { .name = "pcie0_grp", - .pins = pci_sata_pins, - .npins = ARRAY_SIZE(pci_sata_pins), .modemuxs = pcie0_modemux, .nmodemuxs = ARRAY_SIZE(pcie0_modemux), }; -/* pad multiplexing for pcie1 device */ +/* pad multiplexing for (fix-part) pcie1 device */ static struct spear_muxreg pcie1_muxreg[] = { - PCI_SATA_MUXREG, { .reg = PCIE_SATA_CFG, .mask = PCIE_CFG_VAL(1), @@ -1827,15 +2093,12 @@ static struct spear_modemux pcie1_modemux[] = { static struct spear_pingroup pcie1_pingroup = { .name = "pcie1_grp", - .pins = pci_sata_pins, - .npins = ARRAY_SIZE(pci_sata_pins), .modemuxs = pcie1_modemux, .nmodemuxs = ARRAY_SIZE(pcie1_modemux), }; -/* pad multiplexing for pcie2 device */ +/* pad multiplexing for (fix-part) pcie2 device */ static struct spear_muxreg pcie2_muxreg[] = { - PCI_SATA_MUXREG, { .reg = PCIE_SATA_CFG, .mask = PCIE_CFG_VAL(2), @@ -1852,22 +2115,20 @@ static struct spear_modemux pcie2_modemux[] = { static struct spear_pingroup pcie2_pingroup = { .name = "pcie2_grp", - .pins = pci_sata_pins, - .npins = ARRAY_SIZE(pci_sata_pins), .modemuxs = pcie2_modemux, .nmodemuxs = ARRAY_SIZE(pcie2_modemux), }; -static const char *const pci_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp" }; -static struct spear_function pci_function = { - .name = "pci", - .groups = pci_grps, - .ngroups = ARRAY_SIZE(pci_grps), +static const char *const pcie_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp" +}; +static struct spear_function pcie_function = { + .name = "pci_express", + .groups = pcie_grps, + .ngroups = ARRAY_SIZE(pcie_grps), }; /* pad multiplexing for sata0 device */ static struct spear_muxreg sata0_muxreg[] = { - PCI_SATA_MUXREG, { .reg = PCIE_SATA_CFG, .mask = SATA_CFG_VAL(0), @@ -1884,15 +2145,12 @@ static struct spear_modemux sata0_modemux[] = { static struct spear_pingroup sata0_pingroup = { .name = "sata0_grp", - .pins = pci_sata_pins, - .npins = ARRAY_SIZE(pci_sata_pins), .modemuxs = sata0_modemux, .nmodemuxs = ARRAY_SIZE(sata0_modemux), }; /* pad multiplexing for sata1 device */ static struct spear_muxreg sata1_muxreg[] = { - PCI_SATA_MUXREG, { .reg = PCIE_SATA_CFG, .mask = SATA_CFG_VAL(1), @@ -1909,15 +2167,12 @@ static struct spear_modemux sata1_modemux[] = { static struct spear_pingroup sata1_pingroup = { .name = "sata1_grp", - .pins = pci_sata_pins, - .npins = ARRAY_SIZE(pci_sata_pins), .modemuxs = sata1_modemux, .nmodemuxs = ARRAY_SIZE(sata1_modemux), }; /* pad multiplexing for sata2 device */ static struct spear_muxreg sata2_muxreg[] = { - PCI_SATA_MUXREG, { .reg = PCIE_SATA_CFG, .mask = SATA_CFG_VAL(2), @@ -1934,8 +2189,6 @@ static struct spear_modemux sata2_modemux[] = { static struct spear_pingroup sata2_pingroup = { .name = "sata2_grp", - .pins = pci_sata_pins, - .npins = ARRAY_SIZE(pci_sata_pins), .modemuxs = sata2_modemux, .nmodemuxs = ARRAY_SIZE(sata2_modemux), }; @@ -1957,6 +2210,14 @@ static struct spear_muxreg ssp1_dis_kbd_muxreg[] = { PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK | PMX_NFCE2_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_1, + .mask = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK | + PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK | + PMX_NFCE2_MASK, + .val = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK | + PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK | + PMX_NFCE2_MASK, }, }; @@ -1983,6 +2244,12 @@ static struct spear_muxreg ssp1_dis_sd_muxreg[] = { .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK | PMX_MCICECF_MASK | PMX_MCICEXD_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_2, + .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK | + PMX_MCICECF_MASK | PMX_MCICEXD_MASK, + .val = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK | + PMX_MCICECF_MASK | PMX_MCICEXD_MASK, }, }; @@ -2017,6 +2284,12 @@ static struct spear_muxreg gpt64_muxreg[] = { .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK | PMX_MCILEDS_MASK, .val = 0, + }, { + .reg = PAD_DIRECTION_SEL_2, + .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK + | PMX_MCILEDS_MASK, + .val = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK + | PMX_MCILEDS_MASK, }, }; @@ -2093,6 +2366,7 @@ static struct spear_pingroup *spear1310_pingroups[] = { &can0_dis_sd_pingroup, &can1_dis_sd_pingroup, &can1_dis_kbd_pingroup, + &pci_pingroup, &pcie0_pingroup, &pcie1_pingroup, &pcie2_pingroup, @@ -2138,6 +2412,7 @@ static struct spear_function *spear1310_functions[] = { &can0_function, &can1_function, &pci_function, + &pcie_function, &sata_function, &ssp1_function, &gpt64_function, diff --git a/trunk/drivers/pinctrl/spear/pinctrl-spear1340.c b/trunk/drivers/pinctrl/spear/pinctrl-spear1340.c index a0eb057e55bd..0606b8cf3f2c 100644 --- a/trunk/drivers/pinctrl/spear/pinctrl-spear1340.c +++ b/trunk/drivers/pinctrl/spear/pinctrl-spear1340.c @@ -213,7 +213,7 @@ static const struct pinctrl_pin_desc spear1340_pins[] = { * Pad multiplexing for making all pads as gpio's. This is done to override the * values passed from bootloader and start from scratch. */ -static const unsigned pads_as_gpio_pins[] = { 251 }; +static const unsigned pads_as_gpio_pins[] = { 12, 88, 89, 251 }; static struct spear_muxreg pads_as_gpio_muxreg[] = { { .reg = PAD_FUNCTION_EN_1, @@ -1692,7 +1692,43 @@ static struct spear_pingroup clcd_pingroup = { .nmodemuxs = ARRAY_SIZE(clcd_modemux), }; -static const char *const clcd_grps[] = { "clcd_grp" }; +/* Disable cld runtime to save panel damage */ +static struct spear_muxreg clcd_sleep_muxreg[] = { + { + .reg = PAD_SHARED_IP_EN_1, + .mask = ARM_TRACE_MASK | MIPHY_DBG_MASK, + .val = 0, + }, { + .reg = PAD_FUNCTION_EN_5, + .mask = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK, + .val = 0x0, + }, { + .reg = PAD_FUNCTION_EN_6, + .mask = CLCD_AND_ARM_TRACE_REG5_MASK, + .val = 0x0, + }, { + .reg = PAD_FUNCTION_EN_7, + .mask = CLCD_AND_ARM_TRACE_REG6_MASK, + .val = 0x0, + }, +}; + +static struct spear_modemux clcd_sleep_modemux[] = { + { + .muxregs = clcd_sleep_muxreg, + .nmuxregs = ARRAY_SIZE(clcd_sleep_muxreg), + }, +}; + +static struct spear_pingroup clcd_sleep_pingroup = { + .name = "clcd_sleep_grp", + .pins = clcd_pins, + .npins = ARRAY_SIZE(clcd_pins), + .modemuxs = clcd_sleep_modemux, + .nmodemuxs = ARRAY_SIZE(clcd_sleep_modemux), +}; + +static const char *const clcd_grps[] = { "clcd_grp", "clcd_sleep_grp" }; static struct spear_function clcd_function = { .name = "clcd", .groups = clcd_grps, @@ -1893,6 +1929,7 @@ static struct spear_pingroup *spear1340_pingroups[] = { &sdhci_pingroup, &cf_pingroup, &xd_pingroup, + &clcd_sleep_pingroup, &clcd_pingroup, &arm_trace_pingroup, &miphy_dbg_pingroup, diff --git a/trunk/drivers/pinctrl/spear/pinctrl-spear320.c b/trunk/drivers/pinctrl/spear/pinctrl-spear320.c index 020b1e0bdb3e..ca47b0e50780 100644 --- a/trunk/drivers/pinctrl/spear/pinctrl-spear320.c +++ b/trunk/drivers/pinctrl/spear/pinctrl-spear320.c @@ -2239,6 +2239,10 @@ static struct spear_muxreg pwm2_pin_34_muxreg[] = { .reg = PMX_CONFIG_REG, .mask = PMX_SSP_CS_MASK, .val = 0, + }, { + .reg = MODE_CONFIG_REG, + .mask = PMX_PWM_MASK, + .val = PMX_PWM_MASK, }, { .reg = IP_SEL_PAD_30_39_REG, .mask = PMX_PL_34_MASK, @@ -2956,9 +2960,9 @@ static struct spear_function mii2_function = { }; /* Pad multiplexing for cadence mii 1_2 as smii or rmii device */ -static const unsigned smii0_1_pins[] = { 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, +static const unsigned rmii0_1_pins[] = { 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 }; -static const unsigned rmii0_1_pins[] = { 10, 11, 21, 22, 23, 24, 25, 26, 27 }; +static const unsigned smii0_1_pins[] = { 10, 11, 21, 22, 23, 24, 25, 26, 27 }; static struct spear_muxreg mii0_1_muxreg[] = { { .reg = PMX_CONFIG_REG, diff --git a/trunk/drivers/pinctrl/spear/pinctrl-spear3xx.h b/trunk/drivers/pinctrl/spear/pinctrl-spear3xx.h index 31f44347f17c..7860b36053c4 100644 --- a/trunk/drivers/pinctrl/spear/pinctrl-spear3xx.h +++ b/trunk/drivers/pinctrl/spear/pinctrl-spear3xx.h @@ -15,6 +15,7 @@ #include "pinctrl-spear.h" /* pad mux declarations */ +#define PMX_PWM_MASK (1 << 16) #define PMX_FIRDA_MASK (1 << 14) #define PMX_I2C_MASK (1 << 13) #define PMX_SSP_CS_MASK (1 << 12) diff --git a/trunk/drivers/rapidio/rio.c b/trunk/drivers/rapidio/rio.c index c17ae22567e0..0c6fcb461faf 100644 --- a/trunk/drivers/rapidio/rio.c +++ b/trunk/drivers/rapidio/rio.c @@ -401,7 +401,7 @@ EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); /** * rio_map_inb_region -- Map inbound memory region. * @mport: Master port. - * @lstart: physical address of memory region to be mapped + * @local: physical address of memory region to be mapped * @rbase: RIO base address assigned to this window * @size: Size of the memory region * @rflags: Flags for mapping. diff --git a/trunk/drivers/regulator/core.c b/trunk/drivers/regulator/core.c index 5c4829cba6a6..e872c8be080e 100644 --- a/trunk/drivers/regulator/core.c +++ b/trunk/drivers/regulator/core.c @@ -1381,22 +1381,14 @@ struct regulator *regulator_get_exclusive(struct device *dev, const char *id) } EXPORT_SYMBOL_GPL(regulator_get_exclusive); -/** - * regulator_put - "free" the regulator source - * @regulator: regulator source - * - * Note: drivers must ensure that all regulator_enable calls made on this - * regulator source are balanced by regulator_disable calls prior to calling - * this function. - */ -void regulator_put(struct regulator *regulator) +/* Locks held by regulator_put() */ +static void _regulator_put(struct regulator *regulator) { struct regulator_dev *rdev; if (regulator == NULL || IS_ERR(regulator)) return; - mutex_lock(®ulator_list_mutex); rdev = regulator->rdev; debugfs_remove_recursive(regulator->debugfs); @@ -1412,6 +1404,20 @@ void regulator_put(struct regulator *regulator) rdev->exclusive = 0; module_put(rdev->owner); +} + +/** + * regulator_put - "free" the regulator source + * @regulator: regulator source + * + * Note: drivers must ensure that all regulator_enable calls made on this + * regulator source are balanced by regulator_disable calls prior to calling + * this function. + */ +void regulator_put(struct regulator *regulator) +{ + mutex_lock(®ulator_list_mutex); + _regulator_put(regulator); mutex_unlock(®ulator_list_mutex); } EXPORT_SYMBOL_GPL(regulator_put); @@ -1974,7 +1980,7 @@ int regulator_is_supported_voltage(struct regulator *regulator, if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { ret = regulator_get_voltage(regulator); if (ret >= 0) - return (min_uV >= ret && ret <= max_uV); + return (min_uV <= ret && ret <= max_uV); else return ret; } @@ -3365,7 +3371,7 @@ regulator_register(const struct regulator_desc *regulator_desc, if (ret != 0) { rdev_err(rdev, "Failed to request enable GPIO%d: %d\n", config->ena_gpio, ret); - goto clean; + goto wash; } rdev->ena_gpio = config->ena_gpio; @@ -3445,10 +3451,11 @@ regulator_register(const struct regulator_desc *regulator_desc, scrub: if (rdev->supply) - regulator_put(rdev->supply); + _regulator_put(rdev->supply); if (rdev->ena_gpio) gpio_free(rdev->ena_gpio); kfree(rdev->constraints); +wash: device_unregister(&rdev->dev); /* device core frees rdev */ rdev = ERR_PTR(ret); diff --git a/trunk/drivers/s390/char/con3215.c b/trunk/drivers/s390/char/con3215.c index 9ffb6d5f17aa..4ed343e4eb41 100644 --- a/trunk/drivers/s390/char/con3215.c +++ b/trunk/drivers/s390/char/con3215.c @@ -44,7 +44,6 @@ #define RAW3215_NR_CCWS 3 #define RAW3215_TIMEOUT HZ/10 /* time for delayed output */ -#define RAW3215_FIXED 1 /* 3215 console device is not be freed */ #define RAW3215_WORKING 4 /* set if a request is being worked on */ #define RAW3215_THROTTLED 8 /* set if reading is disabled */ #define RAW3215_STOPPED 16 /* set if writing is disabled */ @@ -339,8 +338,10 @@ static void raw3215_wakeup(unsigned long data) struct tty_struct *tty; tty = tty_port_tty_get(&raw->port); - tty_wakeup(tty); - tty_kref_put(tty); + if (tty) { + tty_wakeup(tty); + tty_kref_put(tty); + } } /* @@ -629,8 +630,7 @@ static void raw3215_shutdown(struct raw3215_info *raw) DECLARE_WAITQUEUE(wait, current); unsigned long flags; - if (!(raw->port.flags & ASYNC_INITIALIZED) || - (raw->flags & RAW3215_FIXED)) + if (!(raw->port.flags & ASYNC_INITIALIZED)) return; /* Wait for outstanding requests, then free irq */ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); @@ -926,8 +926,6 @@ static int __init con3215_init(void) dev_set_drvdata(&cdev->dev, raw); cdev->handler = raw3215_irq; - raw->flags |= RAW3215_FIXED; - /* Request the console irq */ if (raw3215_startup(raw) != 0) { raw3215_free_info(raw); diff --git a/trunk/drivers/s390/cio/css.h b/trunk/drivers/s390/cio/css.h index 33bb4d891e16..4af3dfe70ef5 100644 --- a/trunk/drivers/s390/cio/css.h +++ b/trunk/drivers/s390/cio/css.h @@ -112,9 +112,6 @@ extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); extern void css_reiterate_subchannels(void); void css_update_ssd_info(struct subchannel *sch); -#define __MAX_SUBCHANNEL 65535 -#define __MAX_SSID 3 - struct channel_subsystem { u8 cssid; int valid; diff --git a/trunk/drivers/s390/cio/device.c b/trunk/drivers/s390/cio/device.c index fc916f5d7314..fd3143c291c6 100644 --- a/trunk/drivers/s390/cio/device.c +++ b/trunk/drivers/s390/cio/device.c @@ -1424,7 +1424,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch) } if (device_is_disconnected(cdev)) return IO_SCH_REPROBE; - if (cdev->online) + if (cdev->online && !cdev->private->flags.resuming) return IO_SCH_VERIFY; if (cdev->private->state == DEV_STATE_NOT_OPER) return IO_SCH_UNREG_ATTACH; @@ -1469,12 +1469,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) rc = 0; goto out_unlock; case IO_SCH_VERIFY: - if (cdev->private->flags.resuming == 1) { - if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) { - ccw_device_set_notoper(cdev); - break; - } - } /* Trigger path verification. */ io_subchannel_verify(sch); rc = 0; diff --git a/trunk/drivers/s390/cio/idset.c b/trunk/drivers/s390/cio/idset.c index 199bc6791177..65d13e38803f 100644 --- a/trunk/drivers/s390/cio/idset.c +++ b/trunk/drivers/s390/cio/idset.c @@ -125,8 +125,7 @@ int idset_is_empty(struct idset *set) void idset_add_set(struct idset *to, struct idset *from) { - int len = min(__BITOPS_WORDS(to->num_ssid * to->num_id), - __BITOPS_WORDS(from->num_ssid * from->num_id)); + int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id); bitmap_or(to->bitmap, to->bitmap, from->bitmap, len); } diff --git a/trunk/drivers/s390/net/qeth_core_main.c b/trunk/drivers/s390/net/qeth_core_main.c index 3e25d3150456..4d6ba00d0047 100644 --- a/trunk/drivers/s390/net/qeth_core_main.c +++ b/trunk/drivers/s390/net/qeth_core_main.c @@ -2942,13 +2942,33 @@ static int qeth_query_ipassists_cb(struct qeth_card *card, QETH_DBF_TEXT(SETUP, 2, "qipasscb"); cmd = (struct qeth_ipa_cmd *) data; + + switch (cmd->hdr.return_code) { + case IPA_RC_NOTSUPP: + case IPA_RC_L2_UNSUPPORTED_CMD: + QETH_DBF_TEXT(SETUP, 2, "ipaunsup"); + card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; + card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; + return -0; + default: + if (cmd->hdr.return_code) { + QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled " + "rc=%d\n", + dev_name(&card->gdev->dev), + cmd->hdr.return_code); + return 0; + } + } + if (cmd->hdr.prot_version == QETH_PROT_IPV4) { card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; - } else { + } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) { card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; - } + } else + QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected" + "\n", dev_name(&card->gdev->dev)); QETH_DBF_TEXT(SETUP, 2, "suppenbl"); QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_supported); QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_enabled); diff --git a/trunk/drivers/s390/net/qeth_l2_main.c b/trunk/drivers/s390/net/qeth_l2_main.c index e67e0258aec5..fddb62654b6a 100644 --- a/trunk/drivers/s390/net/qeth_l2_main.c +++ b/trunk/drivers/s390/net/qeth_l2_main.c @@ -626,10 +626,13 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "doL2init"); QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card)); - rc = qeth_query_setadapterparms(card); - if (rc) { - QETH_DBF_MESSAGE(2, "could not query adapter parameters on " - "device %s: x%x\n", CARD_BUS_ID(card), rc); + if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { + rc = qeth_query_setadapterparms(card); + if (rc) { + QETH_DBF_MESSAGE(2, "could not query adapter " + "parameters on device %s: x%x\n", + CARD_BUS_ID(card), rc); + } } if (card->info.type == QETH_CARD_TYPE_IQD || @@ -676,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) return -ERESTARTSYS; } rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); - if (!rc) + if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND)) rc = qeth_l2_send_setmac(card, addr->sa_data); return rc ? -EINVAL : 0; } diff --git a/trunk/drivers/scsi/isci/request.c b/trunk/drivers/scsi/isci/request.c index c1bafc3f3fb1..9594ab62702b 100644 --- a/trunk/drivers/scsi/isci/request.c +++ b/trunk/drivers/scsi/isci/request.c @@ -1972,7 +1972,7 @@ sci_io_request_frame_handler(struct isci_request *ireq, frame_index, (void **)&frame_buffer); - sci_controller_copy_sata_response(&ireq->stp.req, + sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); diff --git a/trunk/drivers/scsi/qlogicpti.c b/trunk/drivers/scsi/qlogicpti.c index b191dd549207..71fddbc60f18 100644 --- a/trunk/drivers/scsi/qlogicpti.c +++ b/trunk/drivers/scsi/qlogicpti.c @@ -1294,26 +1294,19 @@ static struct scsi_host_template qpti_template = { static const struct of_device_id qpti_match[]; static int __devinit qpti_sbus_probe(struct platform_device *op) { - const struct of_device_id *match; - struct scsi_host_template *tpnt; struct device_node *dp = op->dev.of_node; struct Scsi_Host *host; struct qlogicpti *qpti; static int nqptis; const char *fcode; - match = of_match_device(qpti_match, &op->dev); - if (!match) - return -EINVAL; - tpnt = match->data; - /* Sometimes Antares cards come up not completely * setup, and we get a report of a zero IRQ. */ if (op->archdata.irqs[0] == 0) return -ENODEV; - host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti)); + host = scsi_host_alloc(&qpti_template, sizeof(struct qlogicpti)); if (!host) return -ENOMEM; @@ -1445,19 +1438,15 @@ static int __devexit qpti_sbus_remove(struct platform_device *op) static const struct of_device_id qpti_match[] = { { .name = "ptisp", - .data = &qpti_template, }, { .name = "PTI,ptisp", - .data = &qpti_template, }, { .name = "QLGC,isp", - .data = &qpti_template, }, { .name = "SUNW,isp", - .data = &qpti_template, }, {}, }; diff --git a/trunk/drivers/scsi/scsi.c b/trunk/drivers/scsi/scsi.c index 2936b447cae9..2c0d0ec8150b 100644 --- a/trunk/drivers/scsi/scsi.c +++ b/trunk/drivers/scsi/scsi.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #include @@ -1061,6 +1062,50 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, } EXPORT_SYMBOL_GPL(scsi_get_vpd_page); +/** + * scsi_report_opcode - Find out if a given command opcode is supported + * @sdev: scsi device to query + * @buffer: scratch buffer (must be at least 20 bytes long) + * @len: length of buffer + * @opcode: opcode for command to look up + * + * Uses the REPORT SUPPORTED OPERATION CODES to look up the given + * opcode. Returns 0 if RSOC fails or if the command opcode is + * unsupported. Returns 1 if the device claims to support the command. + */ +int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, + unsigned int len, unsigned char opcode) +{ + unsigned char cmd[16]; + struct scsi_sense_hdr sshdr; + int result; + + if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3) + return 0; + + memset(cmd, 0, 16); + cmd[0] = MAINTENANCE_IN; + cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES; + cmd[2] = 1; /* One command format */ + cmd[3] = opcode; + put_unaligned_be32(len, &cmd[6]); + memset(buffer, 0, len); + + result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, + &sshdr, 30 * HZ, 3, NULL); + + if (result && scsi_sense_valid(&sshdr) && + sshdr.sense_key == ILLEGAL_REQUEST && + (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00) + return 0; + + if ((buffer[1] & 3) == 3) /* Command supported */ + return 1; + + return 0; +} +EXPORT_SYMBOL(scsi_report_opcode); + /** * scsi_device_get - get an additional reference to a scsi_device * @sdev: device to get a reference to diff --git a/trunk/drivers/scsi/scsi_lib.c b/trunk/drivers/scsi/scsi_lib.c index da36a3a81a9e..9032e910bca3 100644 --- a/trunk/drivers/scsi/scsi_lib.c +++ b/trunk/drivers/scsi/scsi_lib.c @@ -900,11 +900,23 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) action = ACTION_FAIL; error = -EILSEQ; /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ - } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) && - (cmd->cmnd[0] == UNMAP || - cmd->cmnd[0] == WRITE_SAME_16 || - cmd->cmnd[0] == WRITE_SAME)) { - description = "Discard failure"; + } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { + switch (cmd->cmnd[0]) { + case UNMAP: + description = "Discard failure"; + break; + case WRITE_SAME: + case WRITE_SAME_16: + if (cmd->cmnd[1] & 0x8) + description = "Discard failure"; + else + description = + "Write same failure"; + break; + default: + description = "Invalid command failure"; + break; + } action = ACTION_FAIL; error = -EREMOTEIO; } else diff --git a/trunk/drivers/scsi/sd.c b/trunk/drivers/scsi/sd.c index 12f6fdfc1147..352bc77b7c88 100644 --- a/trunk/drivers/scsi/sd.c +++ b/trunk/drivers/scsi/sd.c @@ -99,6 +99,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); #endif static void sd_config_discard(struct scsi_disk *, unsigned int); +static void sd_config_write_same(struct scsi_disk *); static int sd_revalidate_disk(struct gendisk *); static void sd_unlock_native_capacity(struct gendisk *disk); static int sd_probe(struct device *); @@ -395,6 +396,45 @@ sd_store_max_medium_access_timeouts(struct device *dev, return err ? err : count; } +static ssize_t +sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return snprintf(buf, 20, "%u\n", sdkp->max_ws_blocks); +} + +static ssize_t +sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + unsigned long max; + int err; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (sdp->type != TYPE_DISK) + return -EINVAL; + + err = kstrtoul(buf, 10, &max); + + if (err) + return err; + + if (max == 0) + sdp->no_write_same = 1; + else if (max <= SD_MAX_WS16_BLOCKS) + sdkp->max_ws_blocks = max; + + sd_config_write_same(sdkp); + + return count; +} + static struct device_attribute sd_disk_attrs[] = { __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, sd_store_cache_type), @@ -410,6 +450,8 @@ static struct device_attribute sd_disk_attrs[] = { __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode, sd_store_provisioning_mode), + __ATTR(max_write_same_blocks, S_IRUGO|S_IWUSR, + sd_show_write_same_blocks, sd_store_write_same_blocks), __ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR, sd_show_max_medium_access_timeouts, sd_store_max_medium_access_timeouts), @@ -561,19 +603,23 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) return; case SD_LBP_UNMAP: - max_blocks = min_not_zero(sdkp->max_unmap_blocks, 0xffffffff); + max_blocks = min_not_zero(sdkp->max_unmap_blocks, + (u32)SD_MAX_WS16_BLOCKS); break; case SD_LBP_WS16: - max_blocks = min_not_zero(sdkp->max_ws_blocks, 0xffffffff); + max_blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS16_BLOCKS); break; case SD_LBP_WS10: - max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff); + max_blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS10_BLOCKS); break; case SD_LBP_ZERO: - max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff); + max_blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS10_BLOCKS); q->limits.discard_zeroes_data = 1; break; } @@ -583,29 +629,26 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) } /** - * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device + * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device * @sdp: scsi device to operate one * @rq: Request to prepare * * Will issue either UNMAP or WRITE SAME(16) depending on preference * indicated by target device. **/ -static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) +static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) { struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); - struct bio *bio = rq->bio; - sector_t sector = bio->bi_sector; - unsigned int nr_sectors = bio_sectors(bio); + sector_t sector = blk_rq_pos(rq); + unsigned int nr_sectors = blk_rq_sectors(rq); + unsigned int nr_bytes = blk_rq_bytes(rq); unsigned int len; int ret; char *buf; struct page *page; - if (sdkp->device->sector_size == 4096) { - sector >>= 3; - nr_sectors >>= 3; - } - + sector >>= ilog2(sdp->sector_size) - 9; + nr_sectors >>= ilog2(sdp->sector_size) - 9; rq->timeout = SD_TIMEOUT; memset(rq->cmd, 0, rq->cmd_len); @@ -660,6 +703,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) blk_add_request_payload(rq, page, len); ret = scsi_setup_blk_pc_cmnd(sdp, rq); rq->buffer = page_address(page); + rq->__data_len = nr_bytes; out: if (ret != BLKPREP_OK) { @@ -669,6 +713,83 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) return ret; } +static void sd_config_write_same(struct scsi_disk *sdkp) +{ + struct request_queue *q = sdkp->disk->queue; + unsigned int logical_block_size = sdkp->device->sector_size; + unsigned int blocks = 0; + + if (sdkp->device->no_write_same) { + sdkp->max_ws_blocks = 0; + goto out; + } + + /* Some devices can not handle block counts above 0xffff despite + * supporting WRITE SAME(16). Consequently we default to 64k + * blocks per I/O unless the device explicitly advertises a + * bigger limit. + */ + if (sdkp->max_ws_blocks == 0) + sdkp->max_ws_blocks = SD_MAX_WS10_BLOCKS; + + if (sdkp->ws16 || sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) + blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS16_BLOCKS); + else + blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS10_BLOCKS); + +out: + blk_queue_max_write_same_sectors(q, blocks * (logical_block_size >> 9)); +} + +/** + * sd_setup_write_same_cmnd - write the same data to multiple blocks + * @sdp: scsi device to operate one + * @rq: Request to prepare + * + * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on + * preference indicated by target device. + **/ +static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq) +{ + struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); + struct bio *bio = rq->bio; + sector_t sector = blk_rq_pos(rq); + unsigned int nr_sectors = blk_rq_sectors(rq); + unsigned int nr_bytes = blk_rq_bytes(rq); + int ret; + + if (sdkp->device->no_write_same) + return BLKPREP_KILL; + + BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size); + + sector >>= ilog2(sdp->sector_size) - 9; + nr_sectors >>= ilog2(sdp->sector_size) - 9; + + rq->__data_len = sdp->sector_size; + rq->timeout = SD_WRITE_SAME_TIMEOUT; + memset(rq->cmd, 0, rq->cmd_len); + + if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) { + rq->cmd_len = 16; + rq->cmd[0] = WRITE_SAME_16; + put_unaligned_be64(sector, &rq->cmd[2]); + put_unaligned_be32(nr_sectors, &rq->cmd[10]); + } else { + rq->cmd_len = 10; + rq->cmd[0] = WRITE_SAME; + put_unaligned_be32(sector, &rq->cmd[2]); + put_unaligned_be16(nr_sectors, &rq->cmd[7]); + } + + ret = scsi_setup_blk_pc_cmnd(sdp, rq); + rq->__data_len = nr_bytes; + + return ret; +} + static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) { rq->timeout = SD_FLUSH_TIMEOUT; @@ -712,7 +833,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) * block PC requests to make life easier. */ if (rq->cmd_flags & REQ_DISCARD) { - ret = scsi_setup_discard_cmnd(sdp, rq); + ret = sd_setup_discard_cmnd(sdp, rq); + goto out; + } else if (rq->cmd_flags & REQ_WRITE_SAME) { + ret = sd_setup_write_same_cmnd(sdp, rq); goto out; } else if (rq->cmd_flags & REQ_FLUSH) { ret = scsi_setup_flush_cmnd(sdp, rq); @@ -1482,12 +1606,21 @@ static int sd_done(struct scsi_cmnd *SCpnt) unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); struct scsi_sense_hdr sshdr; struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); + struct request *req = SCpnt->request; int sense_valid = 0; int sense_deferred = 0; unsigned char op = SCpnt->cmnd[0]; + unsigned char unmap = SCpnt->cmnd[1] & 8; - if ((SCpnt->request->cmd_flags & REQ_DISCARD) && !result) - scsi_set_resid(SCpnt, 0); + if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) { + if (!result) { + good_bytes = blk_rq_bytes(req); + scsi_set_resid(SCpnt, 0); + } else { + good_bytes = 0; + scsi_set_resid(SCpnt, blk_rq_bytes(req)); + } + } if (result) { sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); @@ -1536,9 +1669,25 @@ static int sd_done(struct scsi_cmnd *SCpnt) if (sshdr.asc == 0x10) /* DIX: Host detected corruption */ good_bytes = sd_completed_bytes(SCpnt); /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ - if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) && - (op == UNMAP || op == WRITE_SAME_16 || op == WRITE_SAME)) - sd_config_discard(sdkp, SD_LBP_DISABLE); + if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { + switch (op) { + case UNMAP: + sd_config_discard(sdkp, SD_LBP_DISABLE); + break; + case WRITE_SAME_16: + case WRITE_SAME: + if (unmap) + sd_config_discard(sdkp, SD_LBP_DISABLE); + else { + sdkp->device->no_write_same = 1; + sd_config_write_same(sdkp); + + good_bytes = 0; + req->__data_len = blk_rq_bytes(req); + req->cmd_flags |= REQ_QUIET; + } + } + } break; default: break; @@ -2374,9 +2523,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) if (buffer[3] == 0x3c) { unsigned int lba_count, desc_count; - sdkp->max_ws_blocks = - (u32) min_not_zero(get_unaligned_be64(&buffer[36]), - (u64)0xffffffff); + sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]); if (!sdkp->lbpme) goto out; @@ -2469,6 +2616,13 @@ static void sd_read_block_provisioning(struct scsi_disk *sdkp) kfree(buffer); } +static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) +{ + if (scsi_report_opcode(sdkp->device, buffer, SD_BUF_SIZE, + WRITE_SAME_16)) + sdkp->ws16 = 1; +} + static int sd_try_extended_inquiry(struct scsi_device *sdp) { /* @@ -2528,6 +2682,7 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_write_protect_flag(sdkp, buffer); sd_read_cache_type(sdkp, buffer); sd_read_app_tag_own(sdkp, buffer); + sd_read_write_same(sdkp, buffer); } sdkp->first_scan = 0; @@ -2545,6 +2700,7 @@ static int sd_revalidate_disk(struct gendisk *disk) blk_queue_flush(sdkp->disk->queue, flush); set_capacity(disk, sdkp->capacity); + sd_config_write_same(sdkp); kfree(buffer); out: diff --git a/trunk/drivers/scsi/sd.h b/trunk/drivers/scsi/sd.h index 47c52a6d733c..74a1e4ca5401 100644 --- a/trunk/drivers/scsi/sd.h +++ b/trunk/drivers/scsi/sd.h @@ -14,6 +14,7 @@ #define SD_TIMEOUT (30 * HZ) #define SD_MOD_TIMEOUT (75 * HZ) #define SD_FLUSH_TIMEOUT (60 * HZ) +#define SD_WRITE_SAME_TIMEOUT (120 * HZ) /* * Number of allowed retries @@ -38,6 +39,11 @@ enum { SD_MEMPOOL_SIZE = 2, /* CDB pool size */ }; +enum { + SD_MAX_WS10_BLOCKS = 0xffff, + SD_MAX_WS16_BLOCKS = 0x7fffff, +}; + enum { SD_LBP_FULL = 0, /* Full logical block provisioning */ SD_LBP_UNMAP, /* Use UNMAP command */ @@ -77,6 +83,7 @@ struct scsi_disk { unsigned lbpws : 1; unsigned lbpws10 : 1; unsigned lbpvpd : 1; + unsigned ws16 : 1; }; #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) diff --git a/trunk/drivers/staging/android/android_alarm.h b/trunk/drivers/staging/android/android_alarm.h index f2ffd963f1c3..d0cafd637199 100644 --- a/trunk/drivers/staging/android/android_alarm.h +++ b/trunk/drivers/staging/android/android_alarm.h @@ -51,12 +51,10 @@ enum android_alarm_return_flags { #define ANDROID_ALARM_WAIT _IO('a', 1) #define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size) -#define ALARM_IOR(c, type, size) _IOR('a', (c) | ((type) << 4), size) - /* Set alarm */ #define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec) #define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec) -#define ANDROID_ALARM_GET_TIME(type) ALARM_IOR(4, type, struct timespec) +#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec) #define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec) #define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0))) #define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4) diff --git a/trunk/drivers/tty/hvc/hvc_console.c b/trunk/drivers/tty/hvc/hvc_console.c index a5dec1ca1b82..13ee53bd0bf6 100644 --- a/trunk/drivers/tty/hvc/hvc_console.c +++ b/trunk/drivers/tty/hvc/hvc_console.c @@ -424,7 +424,6 @@ static void hvc_hangup(struct tty_struct *tty) { struct hvc_struct *hp = tty->driver_data; unsigned long flags; - int temp_open_count; if (!hp) return; @@ -444,7 +443,6 @@ static void hvc_hangup(struct tty_struct *tty) return; } - temp_open_count = hp->port.count; hp->port.count = 0; spin_unlock_irqrestore(&hp->port.lock, flags); tty_port_tty_set(&hp->port, NULL); @@ -453,11 +451,6 @@ static void hvc_hangup(struct tty_struct *tty) if (hp->ops->notifier_hangup) hp->ops->notifier_hangup(hp, hp->data); - - while(temp_open_count) { - --temp_open_count; - tty_port_put(&hp->port); - } } /* diff --git a/trunk/drivers/tty/serial/max310x.c b/trunk/drivers/tty/serial/max310x.c index 2bc28a59d385..1ab1d2c66de4 100644 --- a/trunk/drivers/tty/serial/max310x.c +++ b/trunk/drivers/tty/serial/max310x.c @@ -1239,6 +1239,7 @@ static int __devexit max310x_remove(struct spi_device *spi) static const struct spi_device_id max310x_id_table[] = { { "max3107", MAX310X_TYPE_MAX3107 }, { "max3108", MAX310X_TYPE_MAX3108 }, + { } }; MODULE_DEVICE_TABLE(spi, max310x_id_table); diff --git a/trunk/drivers/usb/core/hcd.c b/trunk/drivers/usb/core/hcd.c index 1e741bca0265..f034716190ff 100644 --- a/trunk/drivers/usb/core/hcd.c +++ b/trunk/drivers/usb/core/hcd.c @@ -2151,8 +2151,15 @@ EXPORT_SYMBOL_GPL(usb_bus_start_enum); irqreturn_t usb_hcd_irq (int irq, void *__hcd) { struct usb_hcd *hcd = __hcd; + unsigned long flags; irqreturn_t rc; + /* IRQF_DISABLED doesn't work correctly with shared IRQs + * when the first handler doesn't use it. So let's just + * assume it's never used. + */ + local_irq_save(flags); + if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) rc = IRQ_NONE; else if (hcd->driver->irq(hcd) == IRQ_NONE) @@ -2160,6 +2167,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd) else rc = IRQ_HANDLED; + local_irq_restore(flags); return rc; } EXPORT_SYMBOL_GPL(usb_hcd_irq); @@ -2347,6 +2355,14 @@ static int usb_hcd_request_irqs(struct usb_hcd *hcd, int retval; if (hcd->driver->irq) { + + /* IRQF_DISABLED doesn't work as advertised when used together + * with IRQF_SHARED. As usb_hcd_irq() will always disable + * interrupts we can remove it here. + */ + if (irqflags & IRQF_SHARED) + irqflags &= ~IRQF_DISABLED; + snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", hcd->driver->description, hcd->self.busnum); retval = request_irq(irqnum, &usb_hcd_irq, irqflags, diff --git a/trunk/drivers/usb/early/ehci-dbgp.c b/trunk/drivers/usb/early/ehci-dbgp.c index e426ad626d74..4bfa78af379c 100644 --- a/trunk/drivers/usb/early/ehci-dbgp.c +++ b/trunk/drivers/usb/early/ehci-dbgp.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -614,12 +615,6 @@ static int _dbgp_external_startup(void) return -ENODEV; } -int dbgp_external_startup(struct usb_hcd *hcd) -{ - return xen_dbgp_external_startup(hcd) ?: _dbgp_external_startup(); -} -EXPORT_SYMBOL_GPL(dbgp_external_startup); - static int ehci_reset_port(int port) { u32 portsc; @@ -979,6 +974,7 @@ struct console early_dbgp_console = { .index = -1, }; +#if IS_ENABLED(CONFIG_USB_EHCI_HCD) int dbgp_reset_prep(struct usb_hcd *hcd) { int ret = xen_dbgp_reset_prep(hcd); @@ -1007,6 +1003,13 @@ int dbgp_reset_prep(struct usb_hcd *hcd) } EXPORT_SYMBOL_GPL(dbgp_reset_prep); +int dbgp_external_startup(struct usb_hcd *hcd) +{ + return xen_dbgp_external_startup(hcd) ?: _dbgp_external_startup(); +} +EXPORT_SYMBOL_GPL(dbgp_external_startup); +#endif /* USB_EHCI_HCD */ + #ifdef CONFIG_KGDB static char kgdbdbgp_buf[DBGP_MAX_PACKET]; diff --git a/trunk/drivers/usb/gadget/u_ether.c b/trunk/drivers/usb/gadget/u_ether.c index 6458764994ef..4ec3c0d7a18b 100644 --- a/trunk/drivers/usb/gadget/u_ether.c +++ b/trunk/drivers/usb/gadget/u_ether.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "u_ether.h" @@ -295,7 +296,7 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req) while (skb2) { if (status < 0 || ETH_HLEN > skb2->len - || skb2->len > ETH_FRAME_LEN) { + || skb2->len > VLAN_ETH_FRAME_LEN) { dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; DBG(dev, "rx length %d\n", skb2->len); diff --git a/trunk/drivers/usb/host/ehci-ls1x.c b/trunk/drivers/usb/host/ehci-ls1x.c index ca759652626b..aa0f328922df 100644 --- a/trunk/drivers/usb/host/ehci-ls1x.c +++ b/trunk/drivers/usb/host/ehci-ls1x.c @@ -113,7 +113,7 @@ static int ehci_hcd_ls1x_probe(struct platform_device *pdev) goto err_put_hcd; } - ret = usb_add_hcd(hcd, irq, IRQF_SHARED); + ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); if (ret) goto err_put_hcd; diff --git a/trunk/drivers/usb/host/ohci-xls.c b/trunk/drivers/usb/host/ohci-xls.c index 84201cd1a472..41e378f17c66 100644 --- a/trunk/drivers/usb/host/ohci-xls.c +++ b/trunk/drivers/usb/host/ohci-xls.c @@ -56,7 +56,7 @@ static int ohci_xls_probe_internal(const struct hc_driver *driver, goto err3; } - retval = usb_add_hcd(hcd, irq, IRQF_SHARED); + retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); if (retval != 0) goto err4; return retval; diff --git a/trunk/drivers/usb/musb/musb_gadget.c b/trunk/drivers/usb/musb/musb_gadget.c index d0b87e7b4abf..b6b84dacc791 100644 --- a/trunk/drivers/usb/musb/musb_gadget.c +++ b/trunk/drivers/usb/musb/musb_gadget.c @@ -707,11 +707,12 @@ static void rxstate(struct musb *musb, struct musb_request *req) fifo_count = musb_readw(epio, MUSB_RXCOUNT); /* - * use mode 1 only if we expect data of at least ep packet_sz - * and have not yet received a short packet + * Enable Mode 1 on RX transfers only when short_not_ok flag + * is set. Currently short_not_ok flag is set only from + * file_storage and f_mass_storage drivers */ - if ((request->length - request->actual >= musb_ep->packet_sz) && - (fifo_count >= musb_ep->packet_sz)) + + if (request->short_not_ok && fifo_count == musb_ep->packet_sz) use_mode_1 = 1; else use_mode_1 = 0; @@ -727,6 +728,27 @@ static void rxstate(struct musb *musb, struct musb_request *req) c = musb->dma_controller; channel = musb_ep->dma; + /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in + * mode 0 only. So we do not get endpoint interrupts due to DMA + * completion. We only get interrupts from DMA controller. + * + * We could operate in DMA mode 1 if we knew the size of the tranfer + * in advance. For mass storage class, request->length = what the host + * sends, so that'd work. But for pretty much everything else, + * request->length is routinely more than what the host sends. For + * most these gadgets, end of is signified either by a short packet, + * or filling the last byte of the buffer. (Sending extra data in + * that last pckate should trigger an overflow fault.) But in mode 1, + * we don't get DMA completion interrupt for short packets. + * + * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), + * to get endpoint interrupt on every DMA req, but that didn't seem + * to work reliably. + * + * REVISIT an updated g_file_storage can set req->short_not_ok, which + * then becomes usable as a runtime "use mode 1" hint... + */ + /* Experimental: Mode1 works with mass storage use cases */ if (use_mode_1) { csr |= MUSB_RXCSR_AUTOCLEAR; diff --git a/trunk/drivers/usb/musb/ux500.c b/trunk/drivers/usb/musb/ux500.c index d62a91fedc22..0e62f504410e 100644 --- a/trunk/drivers/usb/musb/ux500.c +++ b/trunk/drivers/usb/musb/ux500.c @@ -65,7 +65,7 @@ static int __devinit ux500_probe(struct platform_device *pdev) struct platform_device *musb; struct ux500_glue *glue; struct clk *clk; - + int musbid; int ret = -ENOMEM; glue = kzalloc(sizeof(*glue), GFP_KERNEL); diff --git a/trunk/drivers/usb/otg/Kconfig b/trunk/drivers/usb/otg/Kconfig index d8c8a42bff3e..6223062d5d1b 100644 --- a/trunk/drivers/usb/otg/Kconfig +++ b/trunk/drivers/usb/otg/Kconfig @@ -58,7 +58,7 @@ config USB_ULPI_VIEWPORT config TWL4030_USB tristate "TWL4030 USB Transceiver Driver" - depends on TWL4030_CORE && REGULATOR_TWL4030 + depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS select USB_OTG_UTILS help Enable this to support the USB OTG transceiver on TWL4030 @@ -68,7 +68,7 @@ config TWL4030_USB config TWL6030_USB tristate "TWL6030 USB Transceiver Driver" - depends on TWL4030_CORE && OMAP_USB2 + depends on TWL4030_CORE && OMAP_USB2 && USB_MUSB_OMAP2PLUS select USB_OTG_UTILS help Enable this to support the USB OTG transceiver on TWL6030 diff --git a/trunk/drivers/usb/serial/keyspan.c b/trunk/drivers/usb/serial/keyspan.c index 7179b0c5f814..cff8dd5b462d 100644 --- a/trunk/drivers/usb/serial/keyspan.c +++ b/trunk/drivers/usb/serial/keyspan.c @@ -2430,7 +2430,7 @@ static void keyspan_release(struct usb_serial *serial) static int keyspan_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; - struct keyspan_port_private *s_priv; + struct keyspan_serial_private *s_priv; struct keyspan_port_private *p_priv; const struct keyspan_device_details *d_details; struct callbacks *cback; @@ -2445,7 +2445,6 @@ static int keyspan_port_probe(struct usb_serial_port *port) if (!p_priv) return -ENOMEM; - s_priv = usb_get_serial_data(port->serial); p_priv->device_details = d_details; /* Setup values for the various callback routines */ diff --git a/trunk/drivers/usb/serial/option.c b/trunk/drivers/usb/serial/option.c index 5dee7d61241e..edc64bb6f457 100644 --- a/trunk/drivers/usb/serial/option.c +++ b/trunk/drivers/usb/serial/option.c @@ -158,6 +158,7 @@ static void option_instat_callback(struct urb *urb); #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 +#define NOVATELWIRELESS_PRODUCT_E362 0x9010 #define NOVATELWIRELESS_PRODUCT_G1 0xA001 #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002 #define NOVATELWIRELESS_PRODUCT_G2 0xA010 @@ -193,6 +194,9 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181 #define DELL_PRODUCT_5730_MINICARD_VZW 0x8182 +#define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */ +#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ + #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da #define KYOCERA_PRODUCT_KPC680 0x180a @@ -283,6 +287,7 @@ static void option_instat_callback(struct urb *urb); /* ALCATEL PRODUCTS */ #define ALCATEL_VENDOR_ID 0x1bbb #define ALCATEL_PRODUCT_X060S_X200 0x0000 +#define ALCATEL_PRODUCT_X220_X500D 0x0017 #define PIRELLI_VENDOR_ID 0x1266 #define PIRELLI_PRODUCT_C100_1 0x1002 @@ -706,6 +711,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) }, /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, @@ -728,6 +734,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -1157,6 +1165,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), diff --git a/trunk/drivers/usb/serial/usb_wwan.c b/trunk/drivers/usb/serial/usb_wwan.c index 61a73ad1a187..a3e9c095f0d8 100644 --- a/trunk/drivers/usb/serial/usb_wwan.c +++ b/trunk/drivers/usb/serial/usb_wwan.c @@ -455,9 +455,6 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port, struct usb_serial *serial = port->serial; struct urb *urb; - if (endpoint == -1) - return NULL; /* endpoint not needed */ - urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */ if (urb == NULL) { dev_dbg(&serial->interface->dev, @@ -489,6 +486,9 @@ int usb_wwan_port_probe(struct usb_serial_port *port) init_usb_anchor(&portdata->delayed); for (i = 0; i < N_IN_URB; i++) { + if (!port->bulk_in_size) + break; + buffer = (u8 *)__get_free_page(GFP_KERNEL); if (!buffer) goto bail_out_error; @@ -502,8 +502,8 @@ int usb_wwan_port_probe(struct usb_serial_port *port) } for (i = 0; i < N_OUT_URB; i++) { - if (port->bulk_out_endpointAddress == -1) - continue; + if (!port->bulk_out_size) + break; buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL); if (!buffer) diff --git a/trunk/drivers/usb/storage/scsiglue.c b/trunk/drivers/usb/storage/scsiglue.c index a3d54366afcc..92f35abee92d 100644 --- a/trunk/drivers/usb/storage/scsiglue.c +++ b/trunk/drivers/usb/storage/scsiglue.c @@ -186,6 +186,12 @@ static int slave_configure(struct scsi_device *sdev) /* Some devices don't handle VPD pages correctly */ sdev->skip_vpd_pages = 1; + /* Do not attempt to use REPORT SUPPORTED OPERATION CODES */ + sdev->no_report_opcodes = 1; + + /* Do not attempt to use WRITE SAME */ + sdev->no_write_same = 1; + /* Some disks return the total number of blocks in response * to READ CAPACITY rather than the highest block number. * If this device makes that mistake, tell the sd driver. */ diff --git a/trunk/drivers/video/omap2/dss/dsi.c b/trunk/drivers/video/omap2/dss/dsi.c index d64ac3842884..bee92846cfab 100644 --- a/trunk/drivers/video/omap2/dss/dsi.c +++ b/trunk/drivers/video/omap2/dss/dsi.c @@ -365,11 +365,20 @@ struct platform_device *dsi_get_dsidev_from_id(int module) struct omap_dss_output *out; enum omap_dss_output_id id; - id = module == 0 ? OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2; + switch (module) { + case 0: + id = OMAP_DSS_OUTPUT_DSI1; + break; + case 1: + id = OMAP_DSS_OUTPUT_DSI2; + break; + default: + return NULL; + } out = omap_dss_get_output(id); - return out->pdev; + return out ? out->pdev : NULL; } static inline void dsi_write_reg(struct platform_device *dsidev, diff --git a/trunk/drivers/video/omap2/dss/dss.c b/trunk/drivers/video/omap2/dss/dss.c index 2ab1c3e96553..5f6eea801b06 100644 --- a/trunk/drivers/video/omap2/dss/dss.c +++ b/trunk/drivers/video/omap2/dss/dss.c @@ -697,11 +697,15 @@ static int dss_get_clocks(void) dss.dss_clk = clk; - clk = clk_get(NULL, dss.feat->clk_name); - if (IS_ERR(clk)) { - DSSERR("Failed to get %s\n", dss.feat->clk_name); - r = PTR_ERR(clk); - goto err; + if (dss.feat->clk_name) { + clk = clk_get(NULL, dss.feat->clk_name); + if (IS_ERR(clk)) { + DSSERR("Failed to get %s\n", dss.feat->clk_name); + r = PTR_ERR(clk); + goto err; + } + } else { + clk = NULL; } dss.dpll4_m4_ck = clk; @@ -805,10 +809,10 @@ static int __init dss_init_features(struct device *dev) if (cpu_is_omap24xx()) src = &omap24xx_dss_feats; - else if (cpu_is_omap34xx()) - src = &omap34xx_dss_feats; else if (cpu_is_omap3630()) src = &omap3630_dss_feats; + else if (cpu_is_omap34xx()) + src = &omap34xx_dss_feats; else if (cpu_is_omap44xx()) src = &omap44xx_dss_feats; else if (soc_is_omap54xx()) diff --git a/trunk/drivers/video/omap2/dss/hdmi.c b/trunk/drivers/video/omap2/dss/hdmi.c index a48a7dd75b33..8c9b8b3b7f77 100644 --- a/trunk/drivers/video/omap2/dss/hdmi.c +++ b/trunk/drivers/video/omap2/dss/hdmi.c @@ -644,8 +644,10 @@ static void hdmi_dump_regs(struct seq_file *s) { mutex_lock(&hdmi.lock); - if (hdmi_runtime_get()) + if (hdmi_runtime_get()) { + mutex_unlock(&hdmi.lock); return; + } hdmi.ip_data.ops->dump_wrapper(&hdmi.ip_data, s); hdmi.ip_data.ops->dump_pll(&hdmi.ip_data, s); diff --git a/trunk/drivers/video/omap2/omapfb/omapfb-ioctl.c b/trunk/drivers/video/omap2/omapfb/omapfb-ioctl.c index 606b89f12351..d630b26a005c 100644 --- a/trunk/drivers/video/omap2/omapfb/omapfb-ioctl.c +++ b/trunk/drivers/video/omap2/omapfb/omapfb-ioctl.c @@ -787,7 +787,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) case OMAPFB_WAITFORVSYNC: DBG("ioctl WAITFORVSYNC\n"); - if (!display && !display->output && !display->output->manager) { + if (!display || !display->output || !display->output->manager) { r = -EINVAL; break; } diff --git a/trunk/drivers/virtio/virtio.c b/trunk/drivers/virtio/virtio.c index 1e8659ca27ef..809b0de59c09 100644 --- a/trunk/drivers/virtio/virtio.c +++ b/trunk/drivers/virtio/virtio.c @@ -225,8 +225,10 @@ EXPORT_SYMBOL_GPL(register_virtio_device); void unregister_virtio_device(struct virtio_device *dev) { + int index = dev->index; /* save for after device release */ + device_unregister(&dev->dev); - ida_simple_remove(&virtio_index_ida, dev->index); + ida_simple_remove(&virtio_index_ida, index); } EXPORT_SYMBOL_GPL(unregister_virtio_device); diff --git a/trunk/drivers/xen/Makefile b/trunk/drivers/xen/Makefile index 0e8637035457..74354708c6c4 100644 --- a/trunk/drivers/xen/Makefile +++ b/trunk/drivers/xen/Makefile @@ -2,6 +2,7 @@ ifneq ($(CONFIG_ARM),y) obj-y += manage.o balloon.o obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o endif +obj-$(CONFIG_X86) += fallback.o obj-y += grant-table.o features.o events.o obj-y += xenbus/ diff --git a/trunk/drivers/xen/events.c b/trunk/drivers/xen/events.c index 912ac81b6dbf..0be4df39e953 100644 --- a/trunk/drivers/xen/events.c +++ b/trunk/drivers/xen/events.c @@ -1395,10 +1395,10 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); + irq_enter(); #ifdef CONFIG_X86 exit_idle(); #endif - irq_enter(); __xen_evtchn_do_upcall(); diff --git a/trunk/drivers/xen/fallback.c b/trunk/drivers/xen/fallback.c new file mode 100644 index 000000000000..0ef7c4d40f86 --- /dev/null +++ b/trunk/drivers/xen/fallback.c @@ -0,0 +1,80 @@ +#include +#include +#include +#include +#include +#include + +int xen_event_channel_op_compat(int cmd, void *arg) +{ + struct evtchn_op op; + int rc; + + op.cmd = cmd; + memcpy(&op.u, arg, sizeof(op.u)); + rc = _hypercall1(int, event_channel_op_compat, &op); + + switch (cmd) { + case EVTCHNOP_close: + case EVTCHNOP_send: + case EVTCHNOP_bind_vcpu: + case EVTCHNOP_unmask: + /* no output */ + break; + +#define COPY_BACK(eop) \ + case EVTCHNOP_##eop: \ + memcpy(arg, &op.u.eop, sizeof(op.u.eop)); \ + break + + COPY_BACK(bind_interdomain); + COPY_BACK(bind_virq); + COPY_BACK(bind_pirq); + COPY_BACK(status); + COPY_BACK(alloc_unbound); + COPY_BACK(bind_ipi); +#undef COPY_BACK + + default: + WARN_ON(rc != -ENOSYS); + break; + } + + return rc; +} +EXPORT_SYMBOL_GPL(xen_event_channel_op_compat); + +int HYPERVISOR_physdev_op_compat(int cmd, void *arg) +{ + struct physdev_op op; + int rc; + + op.cmd = cmd; + memcpy(&op.u, arg, sizeof(op.u)); + rc = _hypercall1(int, physdev_op_compat, &op); + + switch (cmd) { + case PHYSDEVOP_IRQ_UNMASK_NOTIFY: + case PHYSDEVOP_set_iopl: + case PHYSDEVOP_set_iobitmap: + case PHYSDEVOP_apic_write: + /* no output */ + break; + +#define COPY_BACK(pop, fld) \ + case PHYSDEVOP_##pop: \ + memcpy(arg, &op.u.fld, sizeof(op.u.fld)); \ + break + + COPY_BACK(irq_status_query, irq_status_query); + COPY_BACK(apic_read, apic_op); + COPY_BACK(ASSIGN_VECTOR, irq_op); +#undef COPY_BACK + + default: + WARN_ON(rc != -ENOSYS); + break; + } + + return rc; +} diff --git a/trunk/drivers/xen/privcmd.c b/trunk/drivers/xen/privcmd.c index 8adb9cc267f9..71f5c459b088 100644 --- a/trunk/drivers/xen/privcmd.c +++ b/trunk/drivers/xen/privcmd.c @@ -361,13 +361,13 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) down_write(&mm->mmap_sem); vma = find_vma(mm, m.addr); - ret = -EINVAL; if (!vma || vma->vm_ops != &privcmd_vm_ops || (m.addr != vma->vm_start) || ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) || !privcmd_enforce_singleshot_mapping(vma)) { up_write(&mm->mmap_sem); + ret = -EINVAL; goto out; } @@ -383,12 +383,16 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) up_write(&mm->mmap_sem); - if (state.global_error && (version == 1)) { - /* Write back errors in second pass. */ - state.user_mfn = (xen_pfn_t *)m.arr; - state.err = err_array; - ret = traverse_pages(m.num, sizeof(xen_pfn_t), - &pagelist, mmap_return_errors_v1, &state); + if (version == 1) { + if (state.global_error) { + /* Write back errors in second pass. */ + state.user_mfn = (xen_pfn_t *)m.arr; + state.err = err_array; + ret = traverse_pages(m.num, sizeof(xen_pfn_t), + &pagelist, mmap_return_errors_v1, &state); + } else + ret = 0; + } else if (version == 2) { ret = __copy_to_user(m.err, err_array, m.num * sizeof(int)); if (ret) diff --git a/trunk/fs/cifs/cifsacl.c b/trunk/fs/cifs/cifsacl.c index fc783e264420..0fb15bbbe43c 100644 --- a/trunk/fs/cifs/cifsacl.c +++ b/trunk/fs/cifs/cifsacl.c @@ -224,6 +224,13 @@ sid_to_str(struct cifs_sid *sidptr, char *sidstr) } } +static void +cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src) +{ + memcpy(dst, src, sizeof(*dst)); + dst->num_subauth = min_t(u8, src->num_subauth, NUM_SUBAUTHS); +} + static void id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr, struct cifs_sid_id **psidid, char *typestr) @@ -248,7 +255,7 @@ id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr, } } - memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid)); + cifs_copy_sid(&(*psidid)->sid, sidptr); (*psidid)->time = jiffies - (SID_MAP_RETRY + 1); (*psidid)->refcount = 0; @@ -354,7 +361,7 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid) * any fields of the node after a reference is put . */ if (test_bit(SID_ID_MAPPED, &psidid->state)) { - memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid)); + cifs_copy_sid(ssid, &psidid->sid); psidid->time = jiffies; /* update ts for accessing */ goto id_sid_out; } @@ -370,14 +377,14 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid) if (IS_ERR(sidkey)) { rc = -EINVAL; cFYI(1, "%s: Can't map and id to a SID", __func__); + } else if (sidkey->datalen < sizeof(struct cifs_sid)) { + rc = -EIO; + cFYI(1, "%s: Downcall contained malformed key " + "(datalen=%hu)", __func__, sidkey->datalen); } else { lsid = (struct cifs_sid *)sidkey->payload.data; - memcpy(&psidid->sid, lsid, - sidkey->datalen < sizeof(struct cifs_sid) ? - sidkey->datalen : sizeof(struct cifs_sid)); - memcpy(ssid, &psidid->sid, - sidkey->datalen < sizeof(struct cifs_sid) ? - sidkey->datalen : sizeof(struct cifs_sid)); + cifs_copy_sid(&psidid->sid, lsid); + cifs_copy_sid(ssid, &psidid->sid); set_bit(SID_ID_MAPPED, &psidid->state); key_put(sidkey); kfree(psidid->sidstr); @@ -396,7 +403,7 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid) return rc; } if (test_bit(SID_ID_MAPPED, &psidid->state)) - memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid)); + cifs_copy_sid(ssid, &psidid->sid); else rc = -EINVAL; } @@ -675,8 +682,6 @@ int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid) static void copy_sec_desc(const struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd, __u32 sidsoffset) { - int i; - struct cifs_sid *owner_sid_ptr, *group_sid_ptr; struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr; @@ -692,26 +697,14 @@ static void copy_sec_desc(const struct cifs_ntsd *pntsd, owner_sid_ptr = (struct cifs_sid *)((char *)pntsd + le32_to_cpu(pntsd->osidoffset)); nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset); - - nowner_sid_ptr->revision = owner_sid_ptr->revision; - nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth; - for (i = 0; i < 6; i++) - nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i]; - for (i = 0; i < 5; i++) - nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i]; + cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr); /* copy group sid */ group_sid_ptr = (struct cifs_sid *)((char *)pntsd + le32_to_cpu(pntsd->gsidoffset)); ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset + sizeof(struct cifs_sid)); - - ngroup_sid_ptr->revision = group_sid_ptr->revision; - ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth; - for (i = 0; i < 6; i++) - ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i]; - for (i = 0; i < 5; i++) - ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i]; + cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr); return; } @@ -1120,8 +1113,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd, kfree(nowner_sid_ptr); return rc; } - memcpy(owner_sid_ptr, nowner_sid_ptr, - sizeof(struct cifs_sid)); + cifs_copy_sid(owner_sid_ptr, nowner_sid_ptr); kfree(nowner_sid_ptr); *aclflag = CIFS_ACL_OWNER; } @@ -1139,8 +1131,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd, kfree(ngroup_sid_ptr); return rc; } - memcpy(group_sid_ptr, ngroup_sid_ptr, - sizeof(struct cifs_sid)); + cifs_copy_sid(group_sid_ptr, ngroup_sid_ptr); kfree(ngroup_sid_ptr); *aclflag = CIFS_ACL_GROUP; } diff --git a/trunk/fs/cifs/dir.c b/trunk/fs/cifs/dir.c index 7c0a81283645..d3671f2acb29 100644 --- a/trunk/fs/cifs/dir.c +++ b/trunk/fs/cifs/dir.c @@ -398,7 +398,16 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, * in network traffic in the other paths. */ if (!(oflags & O_CREAT)) { - struct dentry *res = cifs_lookup(inode, direntry, 0); + struct dentry *res; + + /* + * Check for hashed negative dentry. We have already revalidated + * the dentry and it is fine. No need to perform another lookup. + */ + if (!d_unhashed(direntry)) + return -ENOENT; + + res = cifs_lookup(inode, direntry, 0); if (IS_ERR(res)) return PTR_ERR(res); diff --git a/trunk/fs/eventpoll.c b/trunk/fs/eventpoll.c index da72250ddc1c..cd96649bfe62 100644 --- a/trunk/fs/eventpoll.c +++ b/trunk/fs/eventpoll.c @@ -346,7 +346,7 @@ static inline struct epitem *ep_item_from_epqueue(poll_table *p) /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */ static inline int ep_op_has_event(int op) { - return op == EPOLL_CTL_ADD || op == EPOLL_CTL_MOD; + return op != EPOLL_CTL_DEL; } /* Initialize the poll safe wake up structure */ @@ -676,34 +676,6 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) return 0; } -/* - * Disables a "struct epitem" in the eventpoll set. Returns -EBUSY if the item - * had no event flags set, indicating that another thread may be currently - * handling that item's events (in the case that EPOLLONESHOT was being - * used). Otherwise a zero result indicates that the item has been disabled - * from receiving events. A disabled item may be re-enabled via - * EPOLL_CTL_MOD. Must be called with "mtx" held. - */ -static int ep_disable(struct eventpoll *ep, struct epitem *epi) -{ - int result = 0; - unsigned long flags; - - spin_lock_irqsave(&ep->lock, flags); - if (epi->event.events & ~EP_PRIVATE_BITS) { - if (ep_is_linked(&epi->rdllink)) - list_del_init(&epi->rdllink); - /* Ensure ep_poll_callback will not add epi back onto ready - list: */ - epi->event.events &= EP_PRIVATE_BITS; - } - else - result = -EBUSY; - spin_unlock_irqrestore(&ep->lock, flags); - - return result; -} - static void ep_free(struct eventpoll *ep) { struct rb_node *rbp; @@ -1048,6 +1020,8 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi) rb_insert_color(&epi->rbn, &ep->rbr); } + + #define PATH_ARR_SIZE 5 /* * These are the number paths of length 1 to 5, that we are allowing to emanate @@ -1813,12 +1787,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, } else error = -ENOENT; break; - case EPOLL_CTL_DISABLE: - if (epi) - error = ep_disable(ep, epi); - else - error = -ENOENT; - break; } mutex_unlock(&ep->mtx); diff --git a/trunk/fs/ext3/balloc.c b/trunk/fs/ext3/balloc.c index 7320a66e958f..22548f56197b 100644 --- a/trunk/fs/ext3/balloc.c +++ b/trunk/fs/ext3/balloc.c @@ -2101,8 +2101,9 @@ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range) end = start + (range->len >> sb->s_blocksize_bits) - 1; minlen = range->minlen >> sb->s_blocksize_bits; - if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)) || - unlikely(start >= max_blks)) + if (minlen > EXT3_BLOCKS_PER_GROUP(sb) || + start >= max_blks || + range->len < sb->s_blocksize) return -EINVAL; if (end >= max_blks) end = max_blks - 1; diff --git a/trunk/fs/file.c b/trunk/fs/file.c index 708d997a7748..7cb71b992603 100644 --- a/trunk/fs/file.c +++ b/trunk/fs/file.c @@ -685,7 +685,6 @@ void do_close_on_exec(struct files_struct *files) struct fdtable *fdt; /* exec unshares first */ - BUG_ON(atomic_read(&files->count) != 1); spin_lock(&files->file_lock); for (i = 0; ; i++) { unsigned long set; diff --git a/trunk/fs/gfs2/file.c b/trunk/fs/gfs2/file.c index 0def0504afc1..e056b4ce4877 100644 --- a/trunk/fs/gfs2/file.c +++ b/trunk/fs/gfs2/file.c @@ -516,15 +516,13 @@ static int gfs2_mmap(struct file *file, struct vm_area_struct *vma) struct gfs2_holder i_gh; int error; - gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); - error = gfs2_glock_nq(&i_gh); - if (error == 0) { - file_accessed(file); - gfs2_glock_dq(&i_gh); - } - gfs2_holder_uninit(&i_gh); + error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, + &i_gh); if (error) return error; + /* grab lock to update inode */ + gfs2_glock_dq_uninit(&i_gh); + file_accessed(file); } vma->vm_ops = &gfs2_vm_ops; @@ -677,10 +675,8 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov, size_t writesize = iov_length(iov, nr_segs); struct dentry *dentry = file->f_dentry; struct gfs2_inode *ip = GFS2_I(dentry->d_inode); - struct gfs2_sbd *sdp; int ret; - sdp = GFS2_SB(file->f_mapping->host); ret = gfs2_rs_alloc(ip); if (ret) return ret; diff --git a/trunk/fs/gfs2/lops.c b/trunk/fs/gfs2/lops.c index 8ff95a2d54ee..9ceccb1595a3 100644 --- a/trunk/fs/gfs2/lops.c +++ b/trunk/fs/gfs2/lops.c @@ -393,12 +393,10 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) struct gfs2_meta_header *mh; struct gfs2_trans *tr; - lock_buffer(bd->bd_bh); - gfs2_log_lock(sdp); tr = current->journal_info; tr->tr_touched = 1; if (!list_empty(&bd->bd_list)) - goto out; + return; set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); mh = (struct gfs2_meta_header *)bd->bd_bh->b_data; @@ -414,9 +412,6 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) sdp->sd_log_num_buf++; list_add(&bd->bd_list, &sdp->sd_log_le_buf); tr->tr_num_buf_new++; -out: - gfs2_log_unlock(sdp); - unlock_buffer(bd->bd_bh); } static void gfs2_check_magic(struct buffer_head *bh) @@ -621,7 +616,6 @@ static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) static void revoke_lo_before_commit(struct gfs2_sbd *sdp) { - struct gfs2_log_descriptor *ld; struct gfs2_meta_header *mh; unsigned int offset; struct list_head *head = &sdp->sd_log_le_revoke; @@ -634,7 +628,6 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp) length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64)); page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke); - ld = page_address(page); offset = sizeof(struct gfs2_log_descriptor); list_for_each_entry(bd, head, bd_list) { @@ -777,12 +770,10 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) struct address_space *mapping = bd->bd_bh->b_page->mapping; struct gfs2_inode *ip = GFS2_I(mapping->host); - lock_buffer(bd->bd_bh); - gfs2_log_lock(sdp); if (tr) tr->tr_touched = 1; if (!list_empty(&bd->bd_list)) - goto out; + return; set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); if (gfs2_is_jdata(ip)) { @@ -793,9 +784,6 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) } else { list_add_tail(&bd->bd_list, &sdp->sd_log_le_ordered); } -out: - gfs2_log_unlock(sdp); - unlock_buffer(bd->bd_bh); } /** diff --git a/trunk/fs/gfs2/quota.c b/trunk/fs/gfs2/quota.c index 40c4b0d42fa8..c5af8e18f27a 100644 --- a/trunk/fs/gfs2/quota.c +++ b/trunk/fs/gfs2/quota.c @@ -497,8 +497,11 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid) struct gfs2_quota_data **qd; int error; - if (ip->i_res == NULL) - gfs2_rs_alloc(ip); + if (ip->i_res == NULL) { + error = gfs2_rs_alloc(ip); + if (error) + return error; + } qd = ip->i_res->rs_qa_qd; diff --git a/trunk/fs/gfs2/rgrp.c b/trunk/fs/gfs2/rgrp.c index 3cc402ce6fea..38fe18f2f055 100644 --- a/trunk/fs/gfs2/rgrp.c +++ b/trunk/fs/gfs2/rgrp.c @@ -553,7 +553,6 @@ void gfs2_free_clones(struct gfs2_rgrpd *rgd) */ int gfs2_rs_alloc(struct gfs2_inode *ip) { - int error = 0; struct gfs2_blkreserv *res; if (ip->i_res) @@ -561,7 +560,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip) res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS); if (!res) - error = -ENOMEM; + return -ENOMEM; RB_CLEAR_NODE(&res->rs_node); @@ -571,7 +570,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip) else ip->i_res = res; up_write(&ip->i_rw_mutex); - return error; + return 0; } static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs) @@ -1263,7 +1262,9 @@ int gfs2_fitrim(struct file *filp, void __user *argp) int ret = 0; u64 amt; u64 trimmed = 0; + u64 start, end, minlen; unsigned int x; + unsigned bs_shift = sdp->sd_sb.sb_bsize_shift; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1271,19 +1272,25 @@ int gfs2_fitrim(struct file *filp, void __user *argp) if (!blk_queue_discard(q)) return -EOPNOTSUPP; - if (argp == NULL) { - r.start = 0; - r.len = ULLONG_MAX; - r.minlen = 0; - } else if (copy_from_user(&r, argp, sizeof(r))) + if (copy_from_user(&r, argp, sizeof(r))) return -EFAULT; ret = gfs2_rindex_update(sdp); if (ret) return ret; - rgd = gfs2_blk2rgrpd(sdp, r.start, 0); - rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0); + start = r.start >> bs_shift; + end = start + (r.len >> bs_shift); + minlen = max_t(u64, r.minlen, + q->limits.discard_granularity) >> bs_shift; + + rgd = gfs2_blk2rgrpd(sdp, start, 0); + rgd_end = gfs2_blk2rgrpd(sdp, end - 1, 0); + + if (end <= start || + minlen > sdp->sd_max_rg_data || + start > rgd_end->rd_data0 + rgd_end->rd_data) + return -EINVAL; while (1) { @@ -1295,7 +1302,9 @@ int gfs2_fitrim(struct file *filp, void __user *argp) /* Trim each bitmap in the rgrp */ for (x = 0; x < rgd->rd_length; x++) { struct gfs2_bitmap *bi = rgd->rd_bits + x; - ret = gfs2_rgrp_send_discards(sdp, rgd->rd_data0, NULL, bi, r.minlen, &amt); + ret = gfs2_rgrp_send_discards(sdp, + rgd->rd_data0, NULL, bi, minlen, + &amt); if (ret) { gfs2_glock_dq_uninit(&gh); goto out; @@ -1324,7 +1333,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp) out: r.len = trimmed << 9; - if (argp && copy_to_user(argp, &r, sizeof(r))) + if (copy_to_user(argp, &r, sizeof(r))) return -EFAULT; return ret; diff --git a/trunk/fs/gfs2/super.c b/trunk/fs/gfs2/super.c index bc737261f234..d6488674d916 100644 --- a/trunk/fs/gfs2/super.c +++ b/trunk/fs/gfs2/super.c @@ -810,7 +810,8 @@ static void gfs2_dirty_inode(struct inode *inode, int flags) return; } need_unlock = 1; - } + } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) + return; if (current->journal_info == NULL) { ret = gfs2_trans_begin(sdp, RES_DINODE, 0); diff --git a/trunk/fs/gfs2/trans.c b/trunk/fs/gfs2/trans.c index adbd27875ef9..413627072f36 100644 --- a/trunk/fs/gfs2/trans.c +++ b/trunk/fs/gfs2/trans.c @@ -155,14 +155,22 @@ void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta) struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_bufdata *bd; + lock_buffer(bh); + gfs2_log_lock(sdp); bd = bh->b_private; if (bd) gfs2_assert(sdp, bd->bd_gl == gl); else { + gfs2_log_unlock(sdp); + unlock_buffer(bh); gfs2_attach_bufdata(gl, bh, meta); bd = bh->b_private; + lock_buffer(bh); + gfs2_log_lock(sdp); } lops_add(sdp, bd); + gfs2_log_unlock(sdp); + unlock_buffer(bh); } void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) diff --git a/trunk/fs/jffs2/file.c b/trunk/fs/jffs2/file.c index 60ef3fb707ff..1506673c087e 100644 --- a/trunk/fs/jffs2/file.c +++ b/trunk/fs/jffs2/file.c @@ -138,33 +138,39 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, struct page *pg; struct inode *inode = mapping->host; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); + struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); + struct jffs2_raw_inode ri; + uint32_t alloc_len = 0; pgoff_t index = pos >> PAGE_CACHE_SHIFT; uint32_t pageofs = index << PAGE_CACHE_SHIFT; int ret = 0; + jffs2_dbg(1, "%s()\n", __func__); + + if (pageofs > inode->i_size) { + ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, + ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); + if (ret) + return ret; + } + + mutex_lock(&f->sem); pg = grab_cache_page_write_begin(mapping, index, flags); - if (!pg) + if (!pg) { + if (alloc_len) + jffs2_complete_reservation(c); + mutex_unlock(&f->sem); return -ENOMEM; + } *pagep = pg; - jffs2_dbg(1, "%s()\n", __func__); - - if (pageofs > inode->i_size) { + if (alloc_len) { /* Make new hole frag from old EOF to new page */ - struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); - struct jffs2_raw_inode ri; struct jffs2_full_dnode *fn; - uint32_t alloc_len; jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", (unsigned int)inode->i_size, pageofs); - ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, - ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); - if (ret) - goto out_page; - - mutex_lock(&f->sem); memset(&ri, 0, sizeof(ri)); ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); @@ -191,7 +197,6 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, if (IS_ERR(fn)) { ret = PTR_ERR(fn); jffs2_complete_reservation(c); - mutex_unlock(&f->sem); goto out_page; } ret = jffs2_add_full_dnode_to_inode(c, f, fn); @@ -206,12 +211,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, jffs2_mark_node_obsolete(c, fn->raw); jffs2_free_full_dnode(fn); jffs2_complete_reservation(c); - mutex_unlock(&f->sem); goto out_page; } jffs2_complete_reservation(c); inode->i_size = pageofs; - mutex_unlock(&f->sem); } /* @@ -220,18 +223,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, * case of a short-copy. */ if (!PageUptodate(pg)) { - mutex_lock(&f->sem); ret = jffs2_do_readpage_nolock(inode, pg); - mutex_unlock(&f->sem); if (ret) goto out_page; } + mutex_unlock(&f->sem); jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); return ret; out_page: unlock_page(pg); page_cache_release(pg); + mutex_unlock(&f->sem); return ret; } diff --git a/trunk/fs/notify/fanotify/fanotify.c b/trunk/fs/notify/fanotify/fanotify.c index f35794b97e8e..a50636025364 100644 --- a/trunk/fs/notify/fanotify/fanotify.c +++ b/trunk/fs/notify/fanotify/fanotify.c @@ -21,6 +21,7 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new) if ((old->path.mnt == new->path.mnt) && (old->path.dentry == new->path.dentry)) return true; + break; case (FSNOTIFY_EVENT_NONE): return true; default: diff --git a/trunk/fs/notify/fanotify/fanotify_user.c b/trunk/fs/notify/fanotify/fanotify_user.c index 721d692fa8d4..6fcaeb8c902e 100644 --- a/trunk/fs/notify/fanotify/fanotify_user.c +++ b/trunk/fs/notify/fanotify/fanotify_user.c @@ -258,7 +258,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, if (ret) goto out_close_fd; - fd_install(fd, f); + if (fd != FAN_NOFD) + fd_install(fd, f); return fanotify_event_metadata.event_len; out_close_fd: diff --git a/trunk/fs/proc/base.c b/trunk/fs/proc/base.c index 144a96732dd7..3c231adf8450 100644 --- a/trunk/fs/proc/base.c +++ b/trunk/fs/proc/base.c @@ -873,6 +873,113 @@ static const struct file_operations proc_environ_operations = { .release = mem_release, }; +static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); + char buffer[PROC_NUMBUF]; + int oom_adj = OOM_ADJUST_MIN; + size_t len; + unsigned long flags; + + if (!task) + return -ESRCH; + if (lock_task_sighand(task, &flags)) { + if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX) + oom_adj = OOM_ADJUST_MAX; + else + oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) / + OOM_SCORE_ADJ_MAX; + unlock_task_sighand(task, &flags); + } + put_task_struct(task); + len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj); + return simple_read_from_buffer(buf, count, ppos, buffer, len); +} + +static ssize_t oom_adj_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct task_struct *task; + char buffer[PROC_NUMBUF]; + int oom_adj; + unsigned long flags; + int err; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + err = -EFAULT; + goto out; + } + + err = kstrtoint(strstrip(buffer), 0, &oom_adj); + if (err) + goto out; + if ((oom_adj < OOM_ADJUST_MIN || oom_adj > OOM_ADJUST_MAX) && + oom_adj != OOM_DISABLE) { + err = -EINVAL; + goto out; + } + + task = get_proc_task(file->f_path.dentry->d_inode); + if (!task) { + err = -ESRCH; + goto out; + } + + task_lock(task); + if (!task->mm) { + err = -EINVAL; + goto err_task_lock; + } + + if (!lock_task_sighand(task, &flags)) { + err = -ESRCH; + goto err_task_lock; + } + + /* + * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum + * value is always attainable. + */ + if (oom_adj == OOM_ADJUST_MAX) + oom_adj = OOM_SCORE_ADJ_MAX; + else + oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE; + + if (oom_adj < task->signal->oom_score_adj && + !capable(CAP_SYS_RESOURCE)) { + err = -EACCES; + goto err_sighand; + } + + /* + * /proc/pid/oom_adj is provided for legacy purposes, ask users to use + * /proc/pid/oom_score_adj instead. + */ + printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n", + current->comm, task_pid_nr(current), task_pid_nr(task), + task_pid_nr(task)); + + task->signal->oom_score_adj = oom_adj; + trace_oom_score_adj_update(task); +err_sighand: + unlock_task_sighand(task, &flags); +err_task_lock: + task_unlock(task); + put_task_struct(task); +out: + return err < 0 ? err : count; +} + +static const struct file_operations proc_oom_adj_operations = { + .read = oom_adj_read, + .write = oom_adj_write, + .llseek = generic_file_llseek, +}; + static ssize_t oom_score_adj_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -2598,6 +2705,7 @@ static const struct pid_entry tgid_base_stuff[] = { REG("cgroup", S_IRUGO, proc_cgroup_operations), #endif INF("oom_score", S_IRUGO, proc_oom_score), + REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), #ifdef CONFIG_AUDITSYSCALL REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), @@ -2964,6 +3072,7 @@ static const struct pid_entry tid_base_stuff[] = { REG("cgroup", S_IRUGO, proc_cgroup_operations), #endif INF("oom_score", S_IRUGO, proc_oom_score), + REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), #ifdef CONFIG_AUDITSYSCALL REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), diff --git a/trunk/fs/pstore/platform.c b/trunk/fs/pstore/platform.c index a40da07e93d6..947fbe06c3b1 100644 --- a/trunk/fs/pstore/platform.c +++ b/trunk/fs/pstore/platform.c @@ -161,6 +161,7 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c) while (s < e) { unsigned long flags; + u64 id; if (c > psinfo->bufsize) c = psinfo->bufsize; @@ -172,7 +173,7 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c) spin_lock_irqsave(&psinfo->buf_lock, flags); } memcpy(psinfo->buf, s, c); - psinfo->write(PSTORE_TYPE_CONSOLE, 0, NULL, 0, c, psinfo); + psinfo->write(PSTORE_TYPE_CONSOLE, 0, &id, 0, c, psinfo); spin_unlock_irqrestore(&psinfo->buf_lock, flags); s += c; c = e - s; diff --git a/trunk/fs/reiserfs/inode.c b/trunk/fs/reiserfs/inode.c index f27f01a98aa2..d83736fbc26c 100644 --- a/trunk/fs/reiserfs/inode.c +++ b/trunk/fs/reiserfs/inode.c @@ -1782,8 +1782,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, BUG_ON(!th->t_trans_id); - dquot_initialize(inode); + reiserfs_write_unlock(inode->i_sb); err = dquot_alloc_inode(inode); + reiserfs_write_lock(inode->i_sb); if (err) goto out_end_trans; if (!dir->i_nlink) { @@ -1979,8 +1980,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, out_end_trans: journal_end(th, th->t_super, th->t_blocks_allocated); + reiserfs_write_unlock(inode->i_sb); /* Drop can be outside and it needs more credits so it's better to have it outside */ dquot_drop(inode); + reiserfs_write_lock(inode->i_sb); inode->i_flags |= S_NOQUOTA; make_bad_inode(inode); @@ -3103,10 +3106,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) /* must be turned off for recursive notify_change calls */ ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID); - depth = reiserfs_write_lock_once(inode->i_sb); if (is_quota_modification(inode, attr)) dquot_initialize(inode); - + depth = reiserfs_write_lock_once(inode->i_sb); if (attr->ia_valid & ATTR_SIZE) { /* version 2 items will be caught by the s_maxbytes check ** done for us in vmtruncate @@ -3170,7 +3172,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) error = journal_begin(&th, inode->i_sb, jbegin_count); if (error) goto out; + reiserfs_write_unlock_once(inode->i_sb, depth); error = dquot_transfer(inode, attr); + depth = reiserfs_write_lock_once(inode->i_sb); if (error) { journal_end(&th, inode->i_sb, jbegin_count); goto out; diff --git a/trunk/fs/reiserfs/stree.c b/trunk/fs/reiserfs/stree.c index f8afa4b162b8..2f40a4c70a4d 100644 --- a/trunk/fs/reiserfs/stree.c +++ b/trunk/fs/reiserfs/stree.c @@ -1968,7 +1968,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree key2type(&(key->on_disk_key))); #endif + reiserfs_write_unlock(inode->i_sb); retval = dquot_alloc_space_nodirty(inode, pasted_size); + reiserfs_write_lock(inode->i_sb); if (retval) { pathrelse(search_path); return retval; @@ -2061,9 +2063,11 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, "reiserquota insert_item(): allocating %u id=%u type=%c", quota_bytes, inode->i_uid, head2type(ih)); #endif + reiserfs_write_unlock(inode->i_sb); /* We can't dirty inode here. It would be immediately written but * appropriate stat item isn't inserted yet... */ retval = dquot_alloc_space_nodirty(inode, quota_bytes); + reiserfs_write_lock(inode->i_sb); if (retval) { pathrelse(path); return retval; diff --git a/trunk/fs/reiserfs/super.c b/trunk/fs/reiserfs/super.c index 1078ae179993..418bdc3a57da 100644 --- a/trunk/fs/reiserfs/super.c +++ b/trunk/fs/reiserfs/super.c @@ -298,7 +298,9 @@ static int finish_unfinished(struct super_block *s) retval = remove_save_link_only(s, &save_link_key, 0); continue; } + reiserfs_write_unlock(s); dquot_initialize(inode); + reiserfs_write_lock(s); if (truncate && S_ISDIR(inode->i_mode)) { /* We got a truncate request for a dir which is impossible. @@ -1335,7 +1337,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) kfree(qf_names[i]); #endif err = -EINVAL; - goto out_err; + goto out_unlock; } #ifdef CONFIG_QUOTA handle_quota_files(s, qf_names, &qfmt); @@ -1379,7 +1381,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) if (blocks) { err = reiserfs_resize(s, blocks); if (err != 0) - goto out_err; + goto out_unlock; } if (*mount_flags & MS_RDONLY) { @@ -1389,9 +1391,15 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) /* it is read-only already */ goto out_ok; + /* + * Drop write lock. Quota will retake it when needed and lock + * ordering requires calling dquot_suspend() without it. + */ + reiserfs_write_unlock(s); err = dquot_suspend(s, -1); if (err < 0) goto out_err; + reiserfs_write_lock(s); /* try to remount file system with read-only permissions */ if (sb_umount_state(rs) == REISERFS_VALID_FS @@ -1401,7 +1409,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) err = journal_begin(&th, s, 10); if (err) - goto out_err; + goto out_unlock; /* Mounting a rw partition read-only. */ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); @@ -1416,7 +1424,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) if (reiserfs_is_journal_aborted(journal)) { err = journal->j_errno; - goto out_err; + goto out_unlock; } handle_data_mode(s, mount_options); @@ -1425,7 +1433,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */ err = journal_begin(&th, s, 10); if (err) - goto out_err; + goto out_unlock; /* Mount a partition which is read-only, read-write */ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); @@ -1442,10 +1450,16 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) SB_JOURNAL(s)->j_must_wait = 1; err = journal_end(&th, s, 10); if (err) - goto out_err; + goto out_unlock; if (!(*mount_flags & MS_RDONLY)) { + /* + * Drop write lock. Quota will retake it when needed and lock + * ordering requires calling dquot_resume() without it. + */ + reiserfs_write_unlock(s); dquot_resume(s, -1); + reiserfs_write_lock(s); finish_unfinished(s); reiserfs_xattr_init(s, *mount_flags); } @@ -1455,9 +1469,10 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) reiserfs_write_unlock(s); return 0; +out_unlock: + reiserfs_write_unlock(s); out_err: kfree(new_opts); - reiserfs_write_unlock(s); return err; } @@ -2095,13 +2110,15 @@ static int reiserfs_write_dquot(struct dquot *dquot) REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (ret) goto out; + reiserfs_write_unlock(dquot->dq_sb); ret = dquot_commit(dquot); + reiserfs_write_lock(dquot->dq_sb); err = journal_end(&th, dquot->dq_sb, REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (!ret && err) ret = err; - out: +out: reiserfs_write_unlock(dquot->dq_sb); return ret; } @@ -2117,13 +2134,15 @@ static int reiserfs_acquire_dquot(struct dquot *dquot) REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (ret) goto out; + reiserfs_write_unlock(dquot->dq_sb); ret = dquot_acquire(dquot); + reiserfs_write_lock(dquot->dq_sb); err = journal_end(&th, dquot->dq_sb, REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (!ret && err) ret = err; - out: +out: reiserfs_write_unlock(dquot->dq_sb); return ret; } @@ -2137,19 +2156,21 @@ static int reiserfs_release_dquot(struct dquot *dquot) ret = journal_begin(&th, dquot->dq_sb, REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); + reiserfs_write_unlock(dquot->dq_sb); if (ret) { /* Release dquot anyway to avoid endless cycle in dqput() */ dquot_release(dquot); goto out; } ret = dquot_release(dquot); + reiserfs_write_lock(dquot->dq_sb); err = journal_end(&th, dquot->dq_sb, REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); if (!ret && err) ret = err; - out: reiserfs_write_unlock(dquot->dq_sb); +out: return ret; } @@ -2174,11 +2195,13 @@ static int reiserfs_write_info(struct super_block *sb, int type) ret = journal_begin(&th, sb, 2); if (ret) goto out; + reiserfs_write_unlock(sb); ret = dquot_commit_info(sb, type); + reiserfs_write_lock(sb); err = journal_end(&th, sb, 2); if (!ret && err) ret = err; - out: +out: reiserfs_write_unlock(sb); return ret; } @@ -2203,8 +2226,11 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, struct reiserfs_transaction_handle th; int opt = type == USRQUOTA ? REISERFS_USRQUOTA : REISERFS_GRPQUOTA; - if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt))) - return -EINVAL; + reiserfs_write_lock(sb); + if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt))) { + err = -EINVAL; + goto out; + } /* Quotafile not on the same filesystem? */ if (path->dentry->d_sb != sb) { @@ -2246,8 +2272,10 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, if (err) goto out; } - err = dquot_quota_on(sb, type, format_id, path); + reiserfs_write_unlock(sb); + return dquot_quota_on(sb, type, format_id, path); out: + reiserfs_write_unlock(sb); return err; } @@ -2320,7 +2348,9 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type, tocopy = sb->s_blocksize - offset < towrite ? sb->s_blocksize - offset : towrite; tmp_bh.b_state = 0; + reiserfs_write_lock(sb); err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE); + reiserfs_write_unlock(sb); if (err) goto out; if (offset || tocopy != sb->s_blocksize) @@ -2336,10 +2366,12 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type, flush_dcache_page(bh->b_page); set_buffer_uptodate(bh); unlock_buffer(bh); + reiserfs_write_lock(sb); reiserfs_prepare_for_journal(sb, bh, 1); journal_mark_dirty(current->journal_info, sb, bh); if (!journal_quota) reiserfs_add_ordered_list(inode, bh); + reiserfs_write_unlock(sb); brelse(bh); offset = 0; towrite -= tocopy; diff --git a/trunk/fs/ubifs/find.c b/trunk/fs/ubifs/find.c index 28ec13af28d9..2dcf3d473fec 100644 --- a/trunk/fs/ubifs/find.c +++ b/trunk/fs/ubifs/find.c @@ -681,8 +681,16 @@ int ubifs_find_free_leb_for_idx(struct ubifs_info *c) if (!lprops) { lprops = ubifs_fast_find_freeable(c); if (!lprops) { - ubifs_assert(c->freeable_cnt == 0); - if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { + /* + * The first condition means the following: go scan the + * LPT if there are uncategorized lprops, which means + * there may be freeable LEBs there (UBIFS does not + * store the information about freeable LEBs in the + * master node). + */ + if (c->in_a_category_cnt != c->main_lebs || + c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { + ubifs_assert(c->freeable_cnt == 0); lprops = scan_for_leb_for_idx(c); if (IS_ERR(lprops)) { err = PTR_ERR(lprops); diff --git a/trunk/fs/ubifs/lprops.c b/trunk/fs/ubifs/lprops.c index e5a2a35a46dc..46190a7c42a6 100644 --- a/trunk/fs/ubifs/lprops.c +++ b/trunk/fs/ubifs/lprops.c @@ -300,8 +300,11 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, default: ubifs_assert(0); } + lprops->flags &= ~LPROPS_CAT_MASK; lprops->flags |= cat; + c->in_a_category_cnt += 1; + ubifs_assert(c->in_a_category_cnt <= c->main_lebs); } /** @@ -334,6 +337,9 @@ static void ubifs_remove_from_cat(struct ubifs_info *c, default: ubifs_assert(0); } + + c->in_a_category_cnt -= 1; + ubifs_assert(c->in_a_category_cnt >= 0); } /** diff --git a/trunk/fs/ubifs/ubifs.h b/trunk/fs/ubifs/ubifs.h index 5486346d0a3f..d133c276fe05 100644 --- a/trunk/fs/ubifs/ubifs.h +++ b/trunk/fs/ubifs/ubifs.h @@ -1183,6 +1183,8 @@ struct ubifs_debug_info; * @freeable_list: list of freeable non-index LEBs (free + dirty == @leb_size) * @frdi_idx_list: list of freeable index LEBs (free + dirty == @leb_size) * @freeable_cnt: number of freeable LEBs in @freeable_list + * @in_a_category_cnt: count of lprops which are in a certain category, which + * basically meants that they were loaded from the flash * * @ltab_lnum: LEB number of LPT's own lprops table * @ltab_offs: offset of LPT's own lprops table @@ -1412,6 +1414,7 @@ struct ubifs_info { struct list_head freeable_list; struct list_head frdi_idx_list; int freeable_cnt; + int in_a_category_cnt; int ltab_lnum; int ltab_offs; diff --git a/trunk/fs/xfs/xfs_alloc.c b/trunk/fs/xfs/xfs_alloc.c index 4f33c32affe3..335206a9c698 100644 --- a/trunk/fs/xfs/xfs_alloc.c +++ b/trunk/fs/xfs/xfs_alloc.c @@ -1866,6 +1866,7 @@ xfs_alloc_fix_freelist( /* * Initialize the args structure. */ + memset(&targs, 0, sizeof(targs)); targs.tp = tp; targs.mp = mp; targs.agbp = agbp; @@ -2207,7 +2208,7 @@ xfs_alloc_read_agf( * group or loop over the allocation groups to find the result. */ int /* error */ -__xfs_alloc_vextent( +xfs_alloc_vextent( xfs_alloc_arg_t *args) /* allocation argument structure */ { xfs_agblock_t agsize; /* allocation group size */ @@ -2417,46 +2418,6 @@ __xfs_alloc_vextent( return error; } -static void -xfs_alloc_vextent_worker( - struct work_struct *work) -{ - struct xfs_alloc_arg *args = container_of(work, - struct xfs_alloc_arg, work); - unsigned long pflags; - - /* we are in a transaction context here */ - current_set_flags_nested(&pflags, PF_FSTRANS); - - args->result = __xfs_alloc_vextent(args); - complete(args->done); - - current_restore_flags_nested(&pflags, PF_FSTRANS); -} - -/* - * Data allocation requests often come in with little stack to work on. Push - * them off to a worker thread so there is lots of stack to use. Metadata - * requests, OTOH, are generally from low stack usage paths, so avoid the - * context switch overhead here. - */ -int -xfs_alloc_vextent( - struct xfs_alloc_arg *args) -{ - DECLARE_COMPLETION_ONSTACK(done); - - if (!args->userdata) - return __xfs_alloc_vextent(args); - - - args->done = &done; - INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker); - queue_work(xfs_alloc_wq, &args->work); - wait_for_completion(&done); - return args->result; -} - /* * Free an extent. * Just break up the extent address and hand off to xfs_free_ag_extent diff --git a/trunk/fs/xfs/xfs_alloc.h b/trunk/fs/xfs/xfs_alloc.h index 93be4a667ca1..feacb061bab7 100644 --- a/trunk/fs/xfs/xfs_alloc.h +++ b/trunk/fs/xfs/xfs_alloc.h @@ -120,9 +120,6 @@ typedef struct xfs_alloc_arg { char isfl; /* set if is freelist blocks - !acctg */ char userdata; /* set if this is user data */ xfs_fsblock_t firstblock; /* io first block allocated */ - struct completion *done; - struct work_struct work; - int result; } xfs_alloc_arg_t; /* diff --git a/trunk/fs/xfs/xfs_alloc_btree.c b/trunk/fs/xfs/xfs_alloc_btree.c index f1647caace8f..f7876c6d6165 100644 --- a/trunk/fs/xfs/xfs_alloc_btree.c +++ b/trunk/fs/xfs/xfs_alloc_btree.c @@ -121,6 +121,8 @@ xfs_allocbt_free_block( xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1, XFS_EXTENT_BUSY_SKIP_DISCARD); xfs_trans_agbtree_delta(cur->bc_tp, -1); + + xfs_trans_binval(cur->bc_tp, bp); return 0; } diff --git a/trunk/fs/xfs/xfs_aops.c b/trunk/fs/xfs/xfs_aops.c index e562dd43f41f..e57e2daa357c 100644 --- a/trunk/fs/xfs/xfs_aops.c +++ b/trunk/fs/xfs/xfs_aops.c @@ -481,11 +481,17 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) * * The fix is two passes across the ioend list - one to start writeback on the * buffer_heads, and then submit them for I/O on the second pass. + * + * If @fail is non-zero, it means that we have a situation where some part of + * the submission process has failed after we have marked paged for writeback + * and unlocked them. In this situation, we need to fail the ioend chain rather + * than submit it to IO. This typically only happens on a filesystem shutdown. */ STATIC void xfs_submit_ioend( struct writeback_control *wbc, - xfs_ioend_t *ioend) + xfs_ioend_t *ioend, + int fail) { xfs_ioend_t *head = ioend; xfs_ioend_t *next; @@ -506,6 +512,18 @@ xfs_submit_ioend( next = ioend->io_list; bio = NULL; + /* + * If we are failing the IO now, just mark the ioend with an + * error and finish it. This will run IO completion immediately + * as there is only one reference to the ioend at this point in + * time. + */ + if (fail) { + ioend->io_error = -fail; + xfs_finish_ioend(ioend); + continue; + } + for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { if (!bio) { @@ -1060,7 +1078,18 @@ xfs_vm_writepage( xfs_start_page_writeback(page, 1, count); - if (ioend && imap_valid) { + /* if there is no IO to be submitted for this page, we are done */ + if (!ioend) + return 0; + + ASSERT(iohead); + + /* + * Any errors from this point onwards need tobe reported through the IO + * completion path as we have marked the initial page as under writeback + * and unlocked it. + */ + if (imap_valid) { xfs_off_t end_index; end_index = imap.br_startoff + imap.br_blockcount; @@ -1079,20 +1108,15 @@ xfs_vm_writepage( wbc, end_index); } - if (iohead) { - /* - * Reserve log space if we might write beyond the on-disk - * inode size. - */ - if (ioend->io_type != XFS_IO_UNWRITTEN && - xfs_ioend_is_append(ioend)) { - err = xfs_setfilesize_trans_alloc(ioend); - if (err) - goto error; - } - xfs_submit_ioend(wbc, iohead); - } + /* + * Reserve log space if we might write beyond the on-disk inode size. + */ + err = 0; + if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend)) + err = xfs_setfilesize_trans_alloc(ioend); + + xfs_submit_ioend(wbc, iohead, err); return 0; diff --git a/trunk/fs/xfs/xfs_attr_leaf.c b/trunk/fs/xfs/xfs_attr_leaf.c index d330111ca738..70eec1829776 100644 --- a/trunk/fs/xfs/xfs_attr_leaf.c +++ b/trunk/fs/xfs/xfs_attr_leaf.c @@ -1291,6 +1291,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, leaf2 = blk2->bp->b_addr; ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); + ASSERT(leaf2->hdr.count == 0); args = state->args; trace_xfs_attr_leaf_rebalance(args); @@ -1361,6 +1362,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * I assert that since all callers pass in an empty * second buffer, this code should never execute. */ + ASSERT(0); /* * Figure the total bytes to be added to the destination leaf. @@ -1422,10 +1424,24 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, args->index2 = 0; args->blkno2 = blk2->blkno; } else { + /* + * On a double leaf split, the original attr location + * is already stored in blkno2/index2, so don't + * overwrite it overwise we corrupt the tree. + */ blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count); - args->index = args->index2 = blk2->index; - args->blkno = args->blkno2 = blk2->blkno; + args->index = blk2->index; + args->blkno = blk2->blkno; + if (!state->extravalid) { + /* + * set the new attr location to match the old + * one and let the higher level split code + * decide where in the leaf to place it. + */ + args->index2 = blk2->index; + args->blkno2 = blk2->blkno; + } } } else { ASSERT(state->inleaf == 1); diff --git a/trunk/fs/xfs/xfs_bmap.c b/trunk/fs/xfs/xfs_bmap.c index 848ffa77707b..83d0cf3df930 100644 --- a/trunk/fs/xfs/xfs_bmap.c +++ b/trunk/fs/xfs/xfs_bmap.c @@ -2437,6 +2437,7 @@ xfs_bmap_btalloc( * Normal allocation, done through xfs_alloc_vextent. */ tryagain = isaligned = 0; + memset(&args, 0, sizeof(args)); args.tp = ap->tp; args.mp = mp; args.fsbno = ap->blkno; @@ -3082,6 +3083,7 @@ xfs_bmap_extents_to_btree( * Convert to a btree with two levels, one record in root. */ XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); + memset(&args, 0, sizeof(args)); args.tp = tp; args.mp = mp; args.firstblock = *firstblock; @@ -3237,6 +3239,7 @@ xfs_bmap_local_to_extents( xfs_buf_t *bp; /* buffer for extent block */ xfs_bmbt_rec_host_t *ep;/* extent record pointer */ + memset(&args, 0, sizeof(args)); args.tp = tp; args.mp = ip->i_mount; args.firstblock = *firstblock; @@ -4616,12 +4619,11 @@ xfs_bmapi_delay( STATIC int -xfs_bmapi_allocate( - struct xfs_bmalloca *bma, - int flags) +__xfs_bmapi_allocate( + struct xfs_bmalloca *bma) { struct xfs_mount *mp = bma->ip->i_mount; - int whichfork = (flags & XFS_BMAPI_ATTRFORK) ? + int whichfork = (bma->flags & XFS_BMAPI_ATTRFORK) ? XFS_ATTR_FORK : XFS_DATA_FORK; struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); int tmp_logflags = 0; @@ -4654,24 +4656,27 @@ xfs_bmapi_allocate( * Indicate if this is the first user data in the file, or just any * user data. */ - if (!(flags & XFS_BMAPI_METADATA)) { + if (!(bma->flags & XFS_BMAPI_METADATA)) { bma->userdata = (bma->offset == 0) ? XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA; } - bma->minlen = (flags & XFS_BMAPI_CONTIG) ? bma->length : 1; + bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1; /* * Only want to do the alignment at the eof if it is userdata and * allocation length is larger than a stripe unit. */ if (mp->m_dalign && bma->length >= mp->m_dalign && - !(flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { + !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) { error = xfs_bmap_isaeof(bma, whichfork); if (error) return error; } + if (bma->flags & XFS_BMAPI_STACK_SWITCH) + bma->stack_switch = 1; + error = xfs_bmap_alloc(bma); if (error) return error; @@ -4706,7 +4711,7 @@ xfs_bmapi_allocate( * A wasdelay extent has been initialized, so shouldn't be flagged * as unwritten. */ - if (!bma->wasdel && (flags & XFS_BMAPI_PREALLOC) && + if (!bma->wasdel && (bma->flags & XFS_BMAPI_PREALLOC) && xfs_sb_version_hasextflgbit(&mp->m_sb)) bma->got.br_state = XFS_EXT_UNWRITTEN; @@ -4734,6 +4739,45 @@ xfs_bmapi_allocate( return 0; } +static void +xfs_bmapi_allocate_worker( + struct work_struct *work) +{ + struct xfs_bmalloca *args = container_of(work, + struct xfs_bmalloca, work); + unsigned long pflags; + + /* we are in a transaction context here */ + current_set_flags_nested(&pflags, PF_FSTRANS); + + args->result = __xfs_bmapi_allocate(args); + complete(args->done); + + current_restore_flags_nested(&pflags, PF_FSTRANS); +} + +/* + * Some allocation requests often come in with little stack to work on. Push + * them off to a worker thread so there is lots of stack to use. Otherwise just + * call directly to avoid the context switch overhead here. + */ +int +xfs_bmapi_allocate( + struct xfs_bmalloca *args) +{ + DECLARE_COMPLETION_ONSTACK(done); + + if (!args->stack_switch) + return __xfs_bmapi_allocate(args); + + + args->done = &done; + INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker); + queue_work(xfs_alloc_wq, &args->work); + wait_for_completion(&done); + return args->result; +} + STATIC int xfs_bmapi_convert_unwritten( struct xfs_bmalloca *bma, @@ -4919,6 +4963,7 @@ xfs_bmapi_write( bma.conv = !!(flags & XFS_BMAPI_CONVERT); bma.wasdel = wasdelay; bma.offset = bno; + bma.flags = flags; /* * There's a 32/64 bit type mismatch between the @@ -4934,7 +4979,7 @@ xfs_bmapi_write( ASSERT(len > 0); ASSERT(bma.length > 0); - error = xfs_bmapi_allocate(&bma, flags); + error = xfs_bmapi_allocate(&bma); if (error) goto error0; if (bma.blkno == NULLFSBLOCK) diff --git a/trunk/fs/xfs/xfs_bmap.h b/trunk/fs/xfs/xfs_bmap.h index 803b56d7ce16..5f469c3516eb 100644 --- a/trunk/fs/xfs/xfs_bmap.h +++ b/trunk/fs/xfs/xfs_bmap.h @@ -77,6 +77,7 @@ typedef struct xfs_bmap_free * from written to unwritten, otherwise convert from unwritten to written. */ #define XFS_BMAPI_CONVERT 0x040 +#define XFS_BMAPI_STACK_SWITCH 0x080 #define XFS_BMAPI_FLAGS \ { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ @@ -85,7 +86,8 @@ typedef struct xfs_bmap_free { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ { XFS_BMAPI_CONTIG, "CONTIG" }, \ - { XFS_BMAPI_CONVERT, "CONVERT" } + { XFS_BMAPI_CONVERT, "CONVERT" }, \ + { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" } static inline int xfs_bmapi_aflag(int w) @@ -133,6 +135,11 @@ typedef struct xfs_bmalloca { char userdata;/* set if is user data */ char aeof; /* allocated space at eof */ char conv; /* overwriting unwritten extents */ + char stack_switch; + int flags; + struct completion *done; + struct work_struct work; + int result; } xfs_bmalloca_t; /* diff --git a/trunk/fs/xfs/xfs_buf.c b/trunk/fs/xfs/xfs_buf.c index 933b7930b863..4b0b8dd1b7b0 100644 --- a/trunk/fs/xfs/xfs_buf.c +++ b/trunk/fs/xfs/xfs_buf.c @@ -1197,9 +1197,14 @@ xfs_buf_bio_end_io( { xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; - xfs_buf_ioerror(bp, -error); + /* + * don't overwrite existing errors - otherwise we can lose errors on + * buffers that require multiple bios to complete. + */ + if (!bp->b_error) + xfs_buf_ioerror(bp, -error); - if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) + if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); _xfs_buf_ioend(bp, 1); @@ -1279,6 +1284,11 @@ xfs_buf_ioapply_map( if (size) goto next_chunk; } else { + /* + * This is guaranteed not to be the last io reference count + * because the caller (xfs_buf_iorequest) holds a count itself. + */ + atomic_dec(&bp->b_io_remaining); xfs_buf_ioerror(bp, EIO); bio_put(bio); } diff --git a/trunk/fs/xfs/xfs_buf_item.c b/trunk/fs/xfs/xfs_buf_item.c index a8d0ed911196..becf4a97efc6 100644 --- a/trunk/fs/xfs/xfs_buf_item.c +++ b/trunk/fs/xfs/xfs_buf_item.c @@ -526,7 +526,25 @@ xfs_buf_item_unpin( } xfs_buf_relse(bp); } else if (freed && remove) { + /* + * There are currently two references to the buffer - the active + * LRU reference and the buf log item. What we are about to do + * here - simulate a failed IO completion - requires 3 + * references. + * + * The LRU reference is removed by the xfs_buf_stale() call. The + * buf item reference is removed by the xfs_buf_iodone() + * callback that is run by xfs_buf_do_callbacks() during ioend + * processing (via the bp->b_iodone callback), and then finally + * the ioend processing will drop the IO reference if the buffer + * is marked XBF_ASYNC. + * + * Hence we need to take an additional reference here so that IO + * completion processing doesn't free the buffer prematurely. + */ xfs_buf_lock(bp); + xfs_buf_hold(bp); + bp->b_flags |= XBF_ASYNC; xfs_buf_ioerror(bp, EIO); XFS_BUF_UNDONE(bp); xfs_buf_stale(bp); diff --git a/trunk/fs/xfs/xfs_fsops.c b/trunk/fs/xfs/xfs_fsops.c index c25b094efbf7..4beaede43277 100644 --- a/trunk/fs/xfs/xfs_fsops.c +++ b/trunk/fs/xfs/xfs_fsops.c @@ -399,9 +399,26 @@ xfs_growfs_data_private( /* update secondary superblocks. */ for (agno = 1; agno < nagcount; agno++) { - error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, + error = 0; + /* + * new secondary superblocks need to be zeroed, not read from + * disk as the contents of the new area we are growing into is + * completely unknown. + */ + if (agno < oagcount) { + error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), XFS_FSS_TO_BB(mp, 1), 0, &bp); + } else { + bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp, + XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), + XFS_FSS_TO_BB(mp, 1), 0); + if (bp) + xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); + else + error = ENOMEM; + } + if (error) { xfs_warn(mp, "error %d reading secondary superblock for ag %d", @@ -423,7 +440,7 @@ xfs_growfs_data_private( break; /* no point in continuing */ } } - return 0; + return error; error0: xfs_trans_cancel(tp, XFS_TRANS_ABORT); diff --git a/trunk/fs/xfs/xfs_ialloc.c b/trunk/fs/xfs/xfs_ialloc.c index 445bf1aef31c..c5c4ef4f2bdb 100644 --- a/trunk/fs/xfs/xfs_ialloc.c +++ b/trunk/fs/xfs/xfs_ialloc.c @@ -250,6 +250,7 @@ xfs_ialloc_ag_alloc( /* boundary */ struct xfs_perag *pag; + memset(&args, 0, sizeof(args)); args.tp = tp; args.mp = tp->t_mountp; diff --git a/trunk/fs/xfs/xfs_inode.c b/trunk/fs/xfs/xfs_inode.c index 2778258fcfa2..1938b41ee9f5 100644 --- a/trunk/fs/xfs/xfs_inode.c +++ b/trunk/fs/xfs/xfs_inode.c @@ -1509,7 +1509,8 @@ xfs_ifree_cluster( * to mark all the active inodes on the buffer stale. */ bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, - mp->m_bsize * blks_per_cluster, 0); + mp->m_bsize * blks_per_cluster, + XBF_UNMAPPED); if (!bp) return ENOMEM; diff --git a/trunk/fs/xfs/xfs_ioctl.c b/trunk/fs/xfs/xfs_ioctl.c index 8305f2ac6773..c1df3c623de2 100644 --- a/trunk/fs/xfs/xfs_ioctl.c +++ b/trunk/fs/xfs/xfs_ioctl.c @@ -70,7 +70,7 @@ xfs_find_handle( int hsize; xfs_handle_t handle; struct inode *inode; - struct fd f; + struct fd f = {0}; struct path path; int error; struct xfs_inode *ip; diff --git a/trunk/fs/xfs/xfs_iomap.c b/trunk/fs/xfs/xfs_iomap.c index 973dff6ad935..7f537663365b 100644 --- a/trunk/fs/xfs/xfs_iomap.c +++ b/trunk/fs/xfs/xfs_iomap.c @@ -584,7 +584,9 @@ xfs_iomap_write_allocate( * pointer that the caller gave to us. */ error = xfs_bmapi_write(tp, ip, map_start_fsb, - count_fsb, 0, &first_block, 1, + count_fsb, + XFS_BMAPI_STACK_SWITCH, + &first_block, 1, imap, &nimaps, &free_list); if (error) goto trans_cancel; diff --git a/trunk/fs/xfs/xfs_log.c b/trunk/fs/xfs/xfs_log.c index 7f4f9370d0e7..4dad756962d0 100644 --- a/trunk/fs/xfs/xfs_log.c +++ b/trunk/fs/xfs/xfs_log.c @@ -2387,14 +2387,27 @@ xlog_state_do_callback( /* - * update the last_sync_lsn before we drop the + * Completion of a iclog IO does not imply that + * a transaction has completed, as transactions + * can be large enough to span many iclogs. We + * cannot change the tail of the log half way + * through a transaction as this may be the only + * transaction in the log and moving th etail to + * point to the middle of it will prevent + * recovery from finding the start of the + * transaction. Hence we should only update the + * last_sync_lsn if this iclog contains + * transaction completion callbacks on it. + * + * We have to do this before we drop the * icloglock to ensure we are the only one that * can update it. */ ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); - atomic64_set(&log->l_last_sync_lsn, - be64_to_cpu(iclog->ic_header.h_lsn)); + if (iclog->ic_callback) + atomic64_set(&log->l_last_sync_lsn, + be64_to_cpu(iclog->ic_header.h_lsn)); } else ioerrors++; diff --git a/trunk/fs/xfs/xfs_log_recover.c b/trunk/fs/xfs/xfs_log_recover.c index 5da3ace352bf..d308749fabf1 100644 --- a/trunk/fs/xfs/xfs_log_recover.c +++ b/trunk/fs/xfs/xfs_log_recover.c @@ -3541,7 +3541,7 @@ xlog_do_recovery_pass( * - order is important. */ error = xlog_bread_offset(log, 0, - bblks - split_bblks, hbp, + bblks - split_bblks, dbp, offset + BBTOB(split_bblks)); if (error) goto bread_err2; diff --git a/trunk/include/drm/drmP.h b/trunk/include/drm/drmP.h index fad21c927a38..3fd82809b2d4 100644 --- a/trunk/include/drm/drmP.h +++ b/trunk/include/drm/drmP.h @@ -1431,8 +1431,6 @@ extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); extern u32 drm_vblank_count(struct drm_device *dev, int crtc); extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, struct timeval *vblanktime); -extern void drm_send_vblank_event(struct drm_device *dev, int crtc, - struct drm_pending_vblank_event *e); extern bool drm_handle_vblank(struct drm_device *dev, int crtc); extern int drm_vblank_get(struct drm_device *dev, int crtc); extern void drm_vblank_put(struct drm_device *dev, int crtc); @@ -1505,7 +1503,6 @@ extern unsigned int drm_debug; extern unsigned int drm_vblank_offdelay; extern unsigned int drm_timestamp_precision; -extern unsigned int drm_timestamp_monotonic; extern struct class *drm_class; extern struct proc_dir_entry *drm_proc_root; diff --git a/trunk/include/drm/drm_crtc.h b/trunk/include/drm/drm_crtc.h index ee9b0b59237f..3fa18b7e9497 100644 --- a/trunk/include/drm/drm_crtc.h +++ b/trunk/include/drm/drm_crtc.h @@ -792,7 +792,6 @@ struct drm_mode_config { /* output poll support */ bool poll_enabled; - bool poll_running; struct delayed_work output_poll_work; /* pointers to standard properties */ @@ -888,14 +887,14 @@ extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_ extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, const struct drm_display_mode *mode); -extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); +extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode); extern void drm_mode_config_init(struct drm_device *dev); extern void drm_mode_config_reset(struct drm_device *dev); extern void drm_mode_config_cleanup(struct drm_device *dev); extern void drm_mode_set_name(struct drm_display_mode *mode); -extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); -extern int drm_mode_width(const struct drm_display_mode *mode); -extern int drm_mode_height(const struct drm_display_mode *mode); +extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2); +extern int drm_mode_width(struct drm_display_mode *mode); +extern int drm_mode_height(struct drm_display_mode *mode); /* for us by fb module */ extern int drm_mode_attachmode_crtc(struct drm_device *dev, @@ -920,6 +919,12 @@ extern void drm_mode_set_crtcinfo(struct drm_display_mode *p, extern void drm_mode_connector_list_update(struct drm_connector *connector); extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, struct edid *edid); +extern int drm_connector_property_set_value(struct drm_connector *connector, + struct drm_property *property, + uint64_t value); +extern int drm_connector_property_get_value(struct drm_connector *connector, + struct drm_property *property, + uint64_t *value); extern int drm_object_property_set_value(struct drm_mode_object *obj, struct drm_property *property, uint64_t val); @@ -941,6 +946,8 @@ extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY); extern bool drm_crtc_in_use(struct drm_crtc *crtc); +extern void drm_connector_attach_property(struct drm_connector *connector, + struct drm_property *property, uint64_t init_val); extern void drm_object_attach_property(struct drm_mode_object *obj, struct drm_property *property, uint64_t init_val); @@ -1030,7 +1037,6 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern u8 *drm_find_cea_extension(struct edid *edid); -extern u8 drm_match_cea_mode(struct drm_display_mode *to_match); extern bool drm_detect_hdmi_monitor(struct edid *edid); extern bool drm_detect_monitor_audio(struct edid *edid); extern int drm_mode_page_flip_ioctl(struct drm_device *dev, diff --git a/trunk/include/drm/drm_crtc_helper.h b/trunk/include/drm/drm_crtc_helper.h index f43d556bf40b..e01cc80c9c30 100644 --- a/trunk/include/drm/drm_crtc_helper.h +++ b/trunk/include/drm/drm_crtc_helper.h @@ -137,8 +137,6 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder); extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode); -extern void drm_helper_move_panel_connectors_to_head(struct drm_device *); - extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, struct drm_mode_fb_cmd2 *mode_cmd); @@ -164,7 +162,6 @@ extern int drm_helper_resume_force_mode(struct drm_device *dev); extern void drm_kms_helper_poll_init(struct drm_device *dev); extern void drm_kms_helper_poll_fini(struct drm_device *dev); extern void drm_helper_hpd_irq_event(struct drm_device *dev); -extern void drm_kms_helper_hotplug_event(struct drm_device *dev); extern void drm_kms_helper_poll_disable(struct drm_device *dev); extern void drm_kms_helper_poll_enable(struct drm_device *dev); diff --git a/trunk/include/drm/drm_dp_helper.h b/trunk/include/drm/drm_dp_helper.h index e8e1417af3d9..fe061489f91f 100644 --- a/trunk/include/drm/drm_dp_helper.h +++ b/trunk/include/drm/drm_dp_helper.h @@ -25,7 +25,6 @@ #include #include -#include /* * Unless otherwise noted, all values are from the DP 1.1a spec. Note that @@ -312,14 +311,6 @@ #define MODE_I2C_READ 4 #define MODE_I2C_STOP 8 -/** - * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp - * aux algorithm - * @running: set by the algo indicating whether an i2c is ongoing or whether - * the i2c bus is quiescent - * @address: i2c target address for the currently ongoing transfer - * @aux_ch: driver callback to transfer a single byte of the i2c payload - */ struct i2c_algo_dp_aux_data { bool running; u16 address; @@ -331,34 +322,4 @@ struct i2c_algo_dp_aux_data { int i2c_dp_aux_add_bus(struct i2c_adapter *adapter); - -#define DP_LINK_STATUS_SIZE 6 -bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], - int lane_count); -bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], - int lane_count); -u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], - int lane); -u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], - int lane); - -#define DP_RECEIVER_CAP_SIZE 0xf -void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); -void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); - -u8 drm_dp_link_rate_to_bw_code(int link_rate); -int drm_dp_bw_code_to_link_rate(u8 link_bw); - -static inline int -drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); -} - -static inline u8 -drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; -} - #endif /* _DRM_DP_HELPER_H_ */ diff --git a/trunk/include/drm/drm_hashtab.h b/trunk/include/drm/drm_hashtab.h index fce2ef3fdfff..3650d5d011ee 100644 --- a/trunk/include/drm/drm_hashtab.h +++ b/trunk/include/drm/drm_hashtab.h @@ -61,19 +61,5 @@ extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); extern void drm_ht_remove(struct drm_open_hash *ht); -/* - * RCU-safe interface - * - * The user of this API needs to make sure that two or more instances of the - * hash table manipulation functions are never run simultaneously. - * The lookup function drm_ht_find_item_rcu may, however, run simultaneously - * with any of the manipulation functions as long as it's called from within - * an RCU read-locked section. - */ -#define drm_ht_insert_item_rcu drm_ht_insert_item -#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please -#define drm_ht_remove_key_rcu drm_ht_remove_key -#define drm_ht_remove_item_rcu drm_ht_remove_item -#define drm_ht_find_item_rcu drm_ht_find_item #endif diff --git a/trunk/include/drm/drm_pciids.h b/trunk/include/drm/drm_pciids.h index af1cbaf535ed..c5c35e629426 100644 --- a/trunk/include/drm/drm_pciids.h +++ b/trunk/include/drm/drm_pciids.h @@ -210,6 +210,7 @@ {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ diff --git a/trunk/include/drm/intel-gtt.h b/trunk/include/drm/intel-gtt.h index 6eb76a1f11ab..2e37e9f02e71 100644 --- a/trunk/include/drm/intel-gtt.h +++ b/trunk/include/drm/intel-gtt.h @@ -3,7 +3,7 @@ #ifndef _DRM_INTEL_GTT_H #define _DRM_INTEL_GTT_H -struct intel_gtt { +const struct intel_gtt { /* Size of memory reserved for graphics by the BIOS */ unsigned int stolen_size; /* Total number of gtt entries. */ @@ -17,7 +17,6 @@ struct intel_gtt { unsigned int do_idle_maps : 1; /* Share the scratch page dma with ppgtts. */ dma_addr_t scratch_page_dma; - struct page *scratch_page; /* for ppgtt PDE access */ u32 __iomem *gtt; /* needed for ioremap in drm/i915 */ @@ -40,6 +39,10 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); #define AGP_DCACHE_MEMORY 1 #define AGP_PHYS_MEMORY 2 +/* New caching attributes for gen6/sandybridge */ +#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2) +#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4) + /* flag for GFDT type */ #define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) diff --git a/trunk/include/drm/ttm/ttm_bo_api.h b/trunk/include/drm/ttm/ttm_bo_api.h index c6cae733ddef..e8028ade567f 100644 --- a/trunk/include/drm/ttm/ttm_bo_api.h +++ b/trunk/include/drm/ttm/ttm_bo_api.h @@ -141,6 +141,8 @@ struct ttm_tt; * struct ttm_buffer_object * * @bdev: Pointer to the buffer object device structure. + * @buffer_start: The virtual user-space start address of ttm_bo_type_user + * buffers. * @type: The bo type. * @destroy: Destruction function. If NULL, kfree is used. * @num_pages: Actual number of pages. @@ -170,6 +172,7 @@ struct ttm_tt; * @seq_valid: The value of @val_seq is valid. This value is protected by * the bo_device::lru_lock. * @reserved: Deadlock-free lock used for synchronization state transitions. + * @sync_obj_arg: Opaque argument to synchronization object function. * @sync_obj: Pointer to a synchronization object. * @priv_flags: Flags describing buffer object internal state. * @vm_rb: Rb node for the vm rb tree. @@ -197,6 +200,7 @@ struct ttm_buffer_object { struct ttm_bo_global *glob; struct ttm_bo_device *bdev; + unsigned long buffer_start; enum ttm_bo_type type; void (*destroy) (struct ttm_buffer_object *); unsigned long num_pages; @@ -251,6 +255,7 @@ struct ttm_buffer_object { * checking NULL while reserved but not holding the mentioned lock. */ + void *sync_obj_arg; void *sync_obj; unsigned long priv_flags; @@ -424,9 +429,8 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, * @no_wait: Return immediately if buffer is busy. * * Synchronizes a buffer object for CPU RW access. This means - * command submission that affects the buffer will return -EBUSY - * until ttm_bo_synccpu_write_release is called. - * + * blocking command submission that affects the buffer and + * waiting for buffer idle. This lock is recursive. * Returns * -EBUSY if the buffer is busy and no_wait is true. * -ERESTARTSYS if interrupted by a signal. @@ -468,6 +472,8 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, * @type: Requested type of buffer object. * @flags: Initial placement flags. * @page_alignment: Data alignment in pages. + * @buffer_start: Virtual address of user space data backing a + * user buffer object. * @interruptible: If needing to sleep to wait for GPU resources, * sleep interruptible. * @persistent_swap_storage: Usually the swap storage is deleted for buffers @@ -499,6 +505,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, + unsigned long buffer_start, bool interrubtible, struct file *persistent_swap_storage, size_t acc_size, @@ -514,6 +521,8 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev, * @type: Requested type of buffer object. * @flags: Initial placement flags. * @page_alignment: Data alignment in pages. + * @buffer_start: Virtual address of user space data backing a + * user buffer object. * @interruptible: If needing to sleep while waiting for GPU resources, * sleep interruptible. * @persistent_swap_storage: Usually the swap storage is deleted for buffers @@ -536,6 +545,7 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, + unsigned long buffer_start, bool interruptible, struct file *persistent_swap_storage, struct ttm_buffer_object **p_bo); @@ -726,18 +736,4 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); -/** - * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved - * - * @bo: The buffer object to check. - * - * This function returns an indication if a bo is reserved or not, and should - * only be used to print an error when it is not from incorrect api usage, since - * there's no guarantee that it is the caller that is holding the reservation. - */ -static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo) -{ - return atomic_read(&bo->reserved); -} - #endif diff --git a/trunk/include/drm/ttm/ttm_bo_driver.h b/trunk/include/drm/ttm/ttm_bo_driver.h index dd96442cdc2a..d803b92b0324 100644 --- a/trunk/include/drm/ttm/ttm_bo_driver.h +++ b/trunk/include/drm/ttm/ttm_bo_driver.h @@ -422,10 +422,10 @@ struct ttm_bo_driver { * documentation. */ - bool (*sync_obj_signaled) (void *sync_obj); - int (*sync_obj_wait) (void *sync_obj, + bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg); + int (*sync_obj_wait) (void *sync_obj, void *sync_arg, bool lazy, bool interruptible); - int (*sync_obj_flush) (void *sync_obj); + int (*sync_obj_flush) (void *sync_obj, void *sync_arg); void (*sync_obj_unref) (void **sync_obj); void *(*sync_obj_ref) (void *sync_obj); @@ -521,6 +521,8 @@ struct ttm_bo_global { * lru_lock: Spinlock that protects the buffer+device lru lists and * ddestroy lists. * @val_seq: Current validation sequence. + * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. + * If a GPU lockup has been detected, this is forced to 0. * @dev_mapping: A pointer to the struct address_space representing the * device address space. * @wq: Work queue structure for the delayed delete workqueue. @@ -554,6 +556,7 @@ struct ttm_bo_device { * Protected by load / firstopen / lastclose /unload sync. */ + bool nice_mode; struct address_space *dev_mapping; /* @@ -726,6 +729,20 @@ extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); +/** + * ttm_bo_wait_for_cpu + * + * @bo: Pointer to a struct ttm_buffer_object. + * @no_wait: Don't sleep while waiting. + * + * Wait until a buffer object is no longer sync'ed for CPU access. + * Returns: + * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). + * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. + */ + +extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); + extern void ttm_bo_global_release(struct drm_global_reference *ref); extern int ttm_bo_global_init(struct drm_global_reference *ref); @@ -956,6 +973,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); * * @bo: A pointer to a struct ttm_buffer_object. * @sync_obj: A sync object that signals when moving is complete. + * @sync_obj_arg: An argument to pass to the sync object idle / wait + * functions. * @evict: This is an evict move. Don't return until the buffer is idle. * @no_wait_reserve: Return immediately if other buffers are busy. * @no_wait_gpu: Return immediately if the GPU is busy. @@ -971,6 +990,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, void *sync_obj, + void *sync_obj_arg, bool evict, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem); diff --git a/trunk/include/drm/ttm/ttm_execbuf_util.h b/trunk/include/drm/ttm/ttm_execbuf_util.h index 547e19f06e57..1926cae373ba 100644 --- a/trunk/include/drm/ttm/ttm_execbuf_util.h +++ b/trunk/include/drm/ttm/ttm_execbuf_util.h @@ -39,6 +39,8 @@ * * @head: list head for thread-private list. * @bo: refcounted buffer object pointer. + * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once + * adding a new sync object. * @reserved: Indicates whether @bo has been reserved for validation. * @removed: Indicates whether @bo has been removed from lru lists. * @put_count: Number of outstanding references on bo::list_kref. @@ -48,6 +50,7 @@ struct ttm_validate_buffer { struct list_head head; struct ttm_buffer_object *bo; + void *new_sync_obj_arg; bool reserved; bool removed; int put_count; diff --git a/trunk/include/drm/ttm/ttm_memory.h b/trunk/include/drm/ttm/ttm_memory.h index 72dcbe81dd07..d6d1da468c97 100644 --- a/trunk/include/drm/ttm/ttm_memory.h +++ b/trunk/include/drm/ttm/ttm_memory.h @@ -60,6 +60,7 @@ struct ttm_mem_shrink { * for the GPU, and this will otherwise block other workqueue tasks(?) * At this point we use only a single-threaded workqueue. * @work: The workqueue callback for the shrink queue. + * @queue: Wait queue for processes suspended waiting for memory. * @lock: Lock to protect the @shrink - and the memory accounting members, * that is, essentially the whole structure with some exceptions. * @zones: Array of pointers to accounting zones. @@ -79,6 +80,7 @@ struct ttm_mem_global { struct ttm_mem_shrink *shrink; struct workqueue_struct *swap_queue; struct work_struct work; + wait_queue_head_t queue; spinlock_t lock; struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; unsigned int num_zones; diff --git a/trunk/include/drm/ttm/ttm_object.h b/trunk/include/drm/ttm/ttm_object.h index fc0cf0649901..b01c563b2751 100644 --- a/trunk/include/drm/ttm/ttm_object.h +++ b/trunk/include/drm/ttm/ttm_object.h @@ -40,7 +40,6 @@ #include #include #include -#include #include /** @@ -121,7 +120,6 @@ struct ttm_object_device; */ struct ttm_base_object { - struct rcu_head rhead; struct drm_hash_item hash; enum ttm_object_type object_type; bool shareable; @@ -270,6 +268,4 @@ extern struct ttm_object_device *ttm_object_device_init extern void ttm_object_device_release(struct ttm_object_device **p_tdev); -#define ttm_base_object_kfree(__object, __base)\ - kfree_rcu(__object, __base.rhead) #endif diff --git a/trunk/include/linux/clk-provider.h b/trunk/include/linux/clk-provider.h index c12731582920..f9f5e9eeb9dd 100644 --- a/trunk/include/linux/clk-provider.h +++ b/trunk/include/linux/clk-provider.h @@ -335,8 +335,8 @@ const char *__clk_get_name(struct clk *clk); struct clk_hw *__clk_get_hw(struct clk *clk); u8 __clk_get_num_parents(struct clk *clk); struct clk *__clk_get_parent(struct clk *clk); -inline int __clk_get_enable_count(struct clk *clk); -inline int __clk_get_prepare_count(struct clk *clk); +int __clk_get_enable_count(struct clk *clk); +int __clk_get_prepare_count(struct clk *clk); unsigned long __clk_get_rate(struct clk *clk); unsigned long __clk_get_flags(struct clk *clk); int __clk_is_enabled(struct clk *clk); diff --git a/trunk/include/linux/dma-attrs.h b/trunk/include/linux/dma-attrs.h index c8e1831d7572..f83f793223ff 100644 --- a/trunk/include/linux/dma-attrs.h +++ b/trunk/include/linux/dma-attrs.h @@ -17,7 +17,6 @@ enum dma_attr { DMA_ATTR_NON_CONSISTENT, DMA_ATTR_NO_KERNEL_MAPPING, DMA_ATTR_SKIP_CPU_SYNC, - DMA_ATTR_FORCE_CONTIGUOUS, DMA_ATTR_MAX, }; diff --git a/trunk/include/linux/i2c-omap.h b/trunk/include/linux/i2c-omap.h index df804ba73e0b..92a0dc75bc74 100644 --- a/trunk/include/linux/i2c-omap.h +++ b/trunk/include/linux/i2c-omap.h @@ -34,6 +34,7 @@ struct omap_i2c_bus_platform_data { u32 clkrate; u32 rev; u32 flags; + void (*set_mpu_wkup_lat)(struct device *dev, long set); }; #endif diff --git a/trunk/include/linux/kref.h b/trunk/include/linux/kref.h index 4972e6e9ca93..65af6887872f 100644 --- a/trunk/include/linux/kref.h +++ b/trunk/include/linux/kref.h @@ -111,25 +111,4 @@ static inline int kref_put_mutex(struct kref *kref, } return 0; } - -/** - * kref_get_unless_zero - Increment refcount for object unless it is zero. - * @kref: object. - * - * Return non-zero if the increment succeeded. Otherwise return 0. - * - * This function is intended to simplify locking around refcounting for - * objects that can be looked up from a lookup structure, and which are - * removed from that lookup structure in the object destructor. - * Operations on such objects require at least a read lock around - * lookup + kref_get, and a write lock around kref_put + remove from lookup - * structure. Furthermore, RCU implementations become extremely tricky. - * With a lookup followed by a kref_get_unless_zero *with return value check* - * locking in the kref_put path can be deferred to the actual removal from - * the lookup structure and RCU lookups become trivial. - */ -static inline int __must_check kref_get_unless_zero(struct kref *kref) -{ - return atomic_add_unless(&kref->refcount, 1, 0); -} #endif /* _KREF_H_ */ diff --git a/trunk/include/linux/mm.h b/trunk/include/linux/mm.h index fa0680402738..bcaab4e6fe91 100644 --- a/trunk/include/linux/mm.h +++ b/trunk/include/linux/mm.h @@ -1684,9 +1684,5 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; } static inline bool page_is_guard(struct page *page) { return false; } #endif /* CONFIG_DEBUG_PAGEALLOC */ -extern void reset_zone_present_pages(void); -extern void fixup_zone_present_pages(int nid, unsigned long start_pfn, - unsigned long end_pfn); - #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/trunk/include/linux/mmc/dw_mmc.h b/trunk/include/linux/mmc/dw_mmc.h index 7c6a1139d8fa..96531664a061 100644 --- a/trunk/include/linux/mmc/dw_mmc.h +++ b/trunk/include/linux/mmc/dw_mmc.h @@ -137,7 +137,7 @@ struct dw_mci { dma_addr_t sg_dma; void *sg_cpu; - struct dw_mci_dma_ops *dma_ops; + const struct dw_mci_dma_ops *dma_ops; #ifdef CONFIG_MMC_DW_IDMAC unsigned int ring_size; #else @@ -162,7 +162,7 @@ struct dw_mci { u16 data_offset; struct device *dev; struct dw_mci_board *pdata; - struct dw_mci_drv_data *drv_data; + const struct dw_mci_drv_data *drv_data; void *priv; struct clk *biu_clk; struct clk *ciu_clk; @@ -186,7 +186,7 @@ struct dw_mci { struct regulator *vmmc; /* Power regulator */ unsigned long irq_flags; /* IRQ flags */ - unsigned int irq; + int irq; }; /* DMA ops for Internal/External DMAC interface */ diff --git a/trunk/include/linux/mmc/sdhci.h b/trunk/include/linux/mmc/sdhci.h index fa8529a859b8..1edcb4dad8c4 100644 --- a/trunk/include/linux/mmc/sdhci.h +++ b/trunk/include/linux/mmc/sdhci.h @@ -91,6 +91,7 @@ struct sdhci_host { unsigned int quirks2; /* More deviations from spec. */ #define SDHCI_QUIRK2_HOST_OFF_CARD_ON (1<<0) +#define SDHCI_QUIRK2_HOST_NO_CMD23 (1<<1) int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ diff --git a/trunk/include/linux/mmzone.h b/trunk/include/linux/mmzone.h index 50aaca81f63d..a23923ba8263 100644 --- a/trunk/include/linux/mmzone.h +++ b/trunk/include/linux/mmzone.h @@ -752,7 +752,7 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, unsigned long size, enum memmap_context context); -extern void lruvec_init(struct lruvec *lruvec, struct zone *zone); +extern void lruvec_init(struct lruvec *lruvec); static inline struct zone *lruvec_zone(struct lruvec *lruvec) { diff --git a/trunk/include/linux/of_address.h b/trunk/include/linux/of_address.h index a1984dd037da..0506eb53519b 100644 --- a/trunk/include/linux/of_address.h +++ b/trunk/include/linux/of_address.h @@ -28,11 +28,13 @@ static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } #endif #else /* CONFIG_OF_ADDRESS */ +#ifndef of_address_to_resource static inline int of_address_to_resource(struct device_node *dev, int index, struct resource *r) { return -EINVAL; } +#endif static inline struct device_node *of_find_matching_node_by_address( struct device_node *from, const struct of_device_id *matches, @@ -40,10 +42,12 @@ static inline struct device_node *of_find_matching_node_by_address( { return NULL; } +#ifndef of_iomap static inline void __iomem *of_iomap(struct device_node *device, int index) { return NULL; } +#endif static inline const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, unsigned int *flags) { diff --git a/trunk/include/linux/platform_data/omap_ocp2scp.h b/trunk/include/linux/platform_data/omap_ocp2scp.h new file mode 100644 index 000000000000..5c6c3939355f --- /dev/null +++ b/trunk/include/linux/platform_data/omap_ocp2scp.h @@ -0,0 +1,31 @@ +/* + * omap_ocp2scp.h -- ocp2scp header file + * + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Author: Kishon Vijay Abraham I + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __DRIVERS_OMAP_OCP2SCP_H +#define __DRIVERS_OMAP_OCP2SCP_H + +struct omap_ocp2scp_dev { + const char *drv_name; + struct resource *res; +}; + +struct omap_ocp2scp_platform_data { + int dev_cnt; + struct omap_ocp2scp_dev **devices; +}; +#endif /* __DRIVERS_OMAP_OCP2SCP_H */ diff --git a/trunk/include/linux/ptp_clock_kernel.h b/trunk/include/linux/ptp_clock_kernel.h index f2dc6d8fc680..38a993508327 100644 --- a/trunk/include/linux/ptp_clock_kernel.h +++ b/trunk/include/linux/ptp_clock_kernel.h @@ -54,7 +54,8 @@ struct ptp_clock_request { * clock operations * * @adjfreq: Adjusts the frequency of the hardware clock. - * parameter delta: Desired period change in parts per billion. + * parameter delta: Desired frequency offset from nominal frequency + * in parts per billion * * @adjtime: Shifts the time of the hardware clock. * parameter delta: Desired change in nanoseconds. diff --git a/trunk/include/linux/rio.h b/trunk/include/linux/rio.h index 4187da511006..a3e784278667 100644 --- a/trunk/include/linux/rio.h +++ b/trunk/include/linux/rio.h @@ -275,9 +275,11 @@ struct rio_id_table { * struct rio_net - RIO network info * @node: Node in global list of RIO networks * @devices: List of devices in this network + * @switches: List of switches in this netowrk * @mports: List of master ports accessing this network * @hport: Default port for accessing this network * @id: RIO network ID + * @destid_table: destID allocation table */ struct rio_net { struct list_head node; /* node in list of networks */ diff --git a/trunk/include/linux/spi/ads7846.h b/trunk/include/linux/spi/ads7846.h index c64de9dd7631..2f694f3846a9 100644 --- a/trunk/include/linux/spi/ads7846.h +++ b/trunk/include/linux/spi/ads7846.h @@ -46,8 +46,9 @@ struct ads7846_platform_data { u16 debounce_rep; /* additional consecutive good readings * required after the first two */ int gpio_pendown; /* the GPIO used to decide the pendown - * state if get_pendown_state == NULL - */ + * state if get_pendown_state == NULL */ + int gpio_pendown_debounce; /* platform specific debounce time for + * the gpio_pendown */ int (*get_pendown_state)(void); int (*filter_init) (const struct ads7846_platform_data *pdata, void **filter_data); diff --git a/trunk/include/net/xfrm.h b/trunk/include/net/xfrm.h index 6f0ba01afe73..63445ede48bb 100644 --- a/trunk/include/net/xfrm.h +++ b/trunk/include/net/xfrm.h @@ -1351,7 +1351,7 @@ struct xfrm6_tunnel { }; extern void xfrm_init(void); -extern void xfrm4_init(int rt_hash_size); +extern void xfrm4_init(void); extern int xfrm_state_init(struct net *net); extern void xfrm_state_fini(struct net *net); extern void xfrm4_state_init(void); diff --git a/trunk/include/scsi/scsi_device.h b/trunk/include/scsi/scsi_device.h index 88fae8d20154..55367b04dc94 100644 --- a/trunk/include/scsi/scsi_device.h +++ b/trunk/include/scsi/scsi_device.h @@ -135,6 +135,8 @@ struct scsi_device { * because we did a bus reset. */ unsigned use_10_for_rw:1; /* first try 10-byte read / write */ unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */ + unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */ + unsigned no_write_same:1; /* no WRITE SAME command */ unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */ unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */ unsigned skip_vpd_pages:1; /* do not read VPD pages */ @@ -362,6 +364,8 @@ extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, struct scsi_sense_hdr *sshdr); extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf, int buf_len); +extern int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, + unsigned int len, unsigned char opcode); extern int scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state); extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, diff --git a/trunk/include/uapi/drm/drm.h b/trunk/include/uapi/drm/drm.h index 8d1e2bbee83a..1e3481edf062 100644 --- a/trunk/include/uapi/drm/drm.h +++ b/trunk/include/uapi/drm/drm.h @@ -778,7 +778,6 @@ struct drm_event_vblank { #define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 #define DRM_CAP_DUMB_PREFER_SHADOW 0x4 #define DRM_CAP_PRIME 0x5 -#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 #define DRM_PRIME_CAP_IMPORT 0x1 #define DRM_PRIME_CAP_EXPORT 0x2 diff --git a/trunk/include/uapi/drm/exynos_drm.h b/trunk/include/uapi/drm/exynos_drm.h index 49f010f2b27f..c0494d586e23 100644 --- a/trunk/include/uapi/drm/exynos_drm.h +++ b/trunk/include/uapi/drm/exynos_drm.h @@ -133,26 +133,17 @@ struct drm_exynos_g2d_cmd { __u32 data; }; -enum drm_exynos_g2d_buf_type { - G2D_BUF_USERPTR = 1 << 31, -}; - enum drm_exynos_g2d_event_type { G2D_EVENT_NOT, G2D_EVENT_NONSTOP, G2D_EVENT_STOP, /* not yet */ }; -struct drm_exynos_g2d_userptr { - unsigned long userptr; - unsigned long size; -}; - struct drm_exynos_g2d_set_cmdlist { __u64 cmd; - __u64 cmd_buf; + __u64 cmd_gem; __u32 cmd_nr; - __u32 cmd_buf_nr; + __u32 cmd_gem_nr; /* for g2d event */ __u64 event_type; diff --git a/trunk/include/uapi/drm/i915_drm.h b/trunk/include/uapi/drm/i915_drm.h index b746a3cf5fa9..4322b1e7d2ed 100644 --- a/trunk/include/uapi/drm/i915_drm.h +++ b/trunk/include/uapi/drm/i915_drm.h @@ -306,7 +306,6 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_SEMAPHORES 20 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 #define I915_PARAM_RSVD_FOR_FUTURE_USE 22 -#define I915_PARAM_HAS_SECURE_BATCHES 23 typedef struct drm_i915_getparam { int param; @@ -672,11 +671,6 @@ struct drm_i915_gem_execbuffer2 { /** Resets the SO write offset registers for transform feedback on gen7. */ #define I915_EXEC_GEN7_SOL_RESET (1<<8) -/** Request a privileged ("secure") batch buffer. Note only available for - * DRM_ROOT_ONLY | DRM_MASTER processes. - */ -#define I915_EXEC_SECURE (1<<9) - #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) #define i915_execbuffer2_set_context_id(eb2, context) \ (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK diff --git a/trunk/include/uapi/linux/eventpoll.h b/trunk/include/uapi/linux/eventpoll.h index 8c99ce7202c5..2c267bcbb85c 100644 --- a/trunk/include/uapi/linux/eventpoll.h +++ b/trunk/include/uapi/linux/eventpoll.h @@ -25,7 +25,6 @@ #define EPOLL_CTL_ADD 1 #define EPOLL_CTL_DEL 2 #define EPOLL_CTL_MOD 3 -#define EPOLL_CTL_DISABLE 4 /* * Request the handling of system wakeup events so as to prevent system suspends diff --git a/trunk/include/uapi/linux/oom.h b/trunk/include/uapi/linux/oom.h index a49c4afc7060..b29272d621ce 100644 --- a/trunk/include/uapi/linux/oom.h +++ b/trunk/include/uapi/linux/oom.h @@ -8,4 +8,13 @@ #define OOM_SCORE_ADJ_MIN (-1000) #define OOM_SCORE_ADJ_MAX 1000 +/* + * /proc//oom_adj set to -17 protects from the oom killer for legacy + * purposes. + */ +#define OOM_DISABLE (-17) +/* inclusive */ +#define OOM_ADJUST_MIN (-16) +#define OOM_ADJUST_MAX 15 + #endif /* _UAPI__INCLUDE_LINUX_OOM_H */ diff --git a/trunk/include/xen/hvm.h b/trunk/include/xen/hvm.h index b193fa2f9fdd..13e43e41637d 100644 --- a/trunk/include/xen/hvm.h +++ b/trunk/include/xen/hvm.h @@ -5,6 +5,36 @@ #include #include +static const char *param_name(int op) +{ +#define PARAM(x) [HVM_PARAM_##x] = #x + static const char *const names[] = { + PARAM(CALLBACK_IRQ), + PARAM(STORE_PFN), + PARAM(STORE_EVTCHN), + PARAM(PAE_ENABLED), + PARAM(IOREQ_PFN), + PARAM(BUFIOREQ_PFN), + PARAM(TIMER_MODE), + PARAM(HPET_ENABLED), + PARAM(IDENT_PT), + PARAM(DM_DOMAIN), + PARAM(ACPI_S_STATE), + PARAM(VM86_TSS), + PARAM(VPT_ALIGN), + PARAM(CONSOLE_PFN), + PARAM(CONSOLE_EVTCHN), + }; +#undef PARAM + + if (op >= ARRAY_SIZE(names)) + return "unknown"; + + if (!names[op]) + return "reserved"; + + return names[op]; +} static inline int hvm_get_parameter(int idx, uint64_t *value) { struct xen_hvm_param xhv; @@ -14,8 +44,8 @@ static inline int hvm_get_parameter(int idx, uint64_t *value) xhv.index = idx; r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); if (r < 0) { - printk(KERN_ERR "Cannot get hvm parameter %d: %d!\n", - idx, r); + printk(KERN_ERR "Cannot get hvm parameter %s (%d): %d!\n", + param_name(idx), idx, r); return r; } *value = xhv.value; diff --git a/trunk/kernel/futex.c b/trunk/kernel/futex.c index 3717e7b306e0..20ef219bbe9b 100644 --- a/trunk/kernel/futex.c +++ b/trunk/kernel/futex.c @@ -716,7 +716,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, struct futex_pi_state **ps, struct task_struct *task, int set_waiters) { - int lock_taken, ret, ownerdied = 0; + int lock_taken, ret, force_take = 0; u32 uval, newval, curval, vpid = task_pid_vnr(task); retry: @@ -755,17 +755,15 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, newval = curval | FUTEX_WAITERS; /* - * There are two cases, where a futex might have no owner (the - * owner TID is 0): OWNER_DIED. We take over the futex in this - * case. We also do an unconditional take over, when the owner - * of the futex died. - * - * This is safe as we are protected by the hash bucket lock ! + * Should we force take the futex? See below. */ - if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { - /* Keep the OWNER_DIED bit */ + if (unlikely(force_take)) { + /* + * Keep the OWNER_DIED and the WAITERS bit and set the + * new TID value. + */ newval = (curval & ~FUTEX_TID_MASK) | vpid; - ownerdied = 0; + force_take = 0; lock_taken = 1; } @@ -775,7 +773,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, goto retry; /* - * We took the lock due to owner died take over. + * We took the lock due to forced take over. */ if (unlikely(lock_taken)) return 1; @@ -790,20 +788,25 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, switch (ret) { case -ESRCH: /* - * No owner found for this futex. Check if the - * OWNER_DIED bit is set to figure out whether - * this is a robust futex or not. + * We failed to find an owner for this + * futex. So we have no pi_state to block + * on. This can happen in two cases: + * + * 1) The owner died + * 2) A stale FUTEX_WAITERS bit + * + * Re-read the futex value. */ if (get_futex_value_locked(&curval, uaddr)) return -EFAULT; /* - * We simply start over in case of a robust - * futex. The code above will take the futex - * and return happy. + * If the owner died or we have a stale + * WAITERS bit the owner TID in the user space + * futex is 0. */ - if (curval & FUTEX_OWNER_DIED) { - ownerdied = 1; + if (!(curval & FUTEX_TID_MASK)) { + force_take = 1; goto retry; } default: diff --git a/trunk/kernel/module.c b/trunk/kernel/module.c index 6085f5ef88ea..6e48c3a43599 100644 --- a/trunk/kernel/module.c +++ b/trunk/kernel/module.c @@ -2293,12 +2293,17 @@ static void layout_symtab(struct module *mod, struct load_info *info) src = (void *)info->hdr + symsect->sh_offset; nsrc = symsect->sh_size / sizeof(*src); + /* strtab always starts with a nul, so offset 0 is the empty string. */ + strtab_size = 1; + /* Compute total space required for the core symbols' strtab. */ - for (ndst = i = strtab_size = 1; i < nsrc; ++i, ++src) - if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) { - strtab_size += strlen(&info->strtab[src->st_name]) + 1; + for (ndst = i = 0; i < nsrc; i++) { + if (i == 0 || + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { + strtab_size += strlen(&info->strtab[src[i].st_name])+1; ndst++; } + } /* Append room for core symbols at end of core part. */ info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); @@ -2332,15 +2337,15 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) mod->core_symtab = dst = mod->module_core + info->symoffs; mod->core_strtab = s = mod->module_core + info->stroffs; src = mod->symtab; - *dst = *src; *s++ = 0; - for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) { - if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) - continue; - - dst[ndst] = *src; - dst[ndst++].st_name = s - mod->core_strtab; - s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1; + for (ndst = i = 0; i < mod->num_symtab; i++) { + if (i == 0 || + is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { + dst[ndst] = src[i]; + dst[ndst++].st_name = s - mod->core_strtab; + s += strlcpy(s, &mod->strtab[src[i].st_name], + KSYM_NAME_LEN) + 1; + } } mod->core_num_syms = ndst; } diff --git a/trunk/lib/mpi/longlong.h b/trunk/lib/mpi/longlong.h index 678ce4f1e124..095ab157a521 100644 --- a/trunk/lib/mpi/longlong.h +++ b/trunk/lib/mpi/longlong.h @@ -641,7 +641,14 @@ do { \ ************** MIPS ***************** ***************************************/ #if defined(__mips__) && W_TYPE_SIZE == 32 -#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7 +#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 +#define umul_ppmm(w1, w0, u, v) \ +do { \ + UDItype __ll = (UDItype)(u) * (v); \ + w1 = __ll >> 32; \ + w0 = __ll; \ +} while (0) +#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7 #define umul_ppmm(w1, w0, u, v) \ __asm__ ("multu %2,%3" \ : "=l" ((USItype)(w0)), \ @@ -666,7 +673,15 @@ do { \ ************** MIPS/64 ************** ***************************************/ #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 -#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7 +#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 +#define umul_ppmm(w1, w0, u, v) \ +do { \ + typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ + __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \ + w1 = __ll >> 64; \ + w0 = __ll; \ +} while (0) +#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7 #define umul_ppmm(w1, w0, u, v) \ __asm__ ("dmultu %2,%3" \ : "=l" ((UDItype)(w0)), \ diff --git a/trunk/mm/bootmem.c b/trunk/mm/bootmem.c index 434be4ae7a04..f468185b3b28 100644 --- a/trunk/mm/bootmem.c +++ b/trunk/mm/bootmem.c @@ -198,8 +198,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) int order = ilog2(BITS_PER_LONG); __free_pages_bootmem(pfn_to_page(start), order); - fixup_zone_present_pages(page_to_nid(pfn_to_page(start)), - start, start + BITS_PER_LONG); count += BITS_PER_LONG; start += BITS_PER_LONG; } else { @@ -210,9 +208,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) if (vec & 1) { page = pfn_to_page(start + off); __free_pages_bootmem(page, 0); - fixup_zone_present_pages( - page_to_nid(page), - start + off, start + off + 1); count++; } vec >>= 1; @@ -226,11 +221,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) pages = bdata->node_low_pfn - bdata->node_min_pfn; pages = bootmem_bootmap_pages(pages); count += pages; - while (pages--) { - fixup_zone_present_pages(page_to_nid(page), - page_to_pfn(page), page_to_pfn(page) + 1); + while (pages--) __free_pages_bootmem(page++, 0); - } bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); diff --git a/trunk/mm/highmem.c b/trunk/mm/highmem.c index d517cd16a6eb..2da13a5c50e2 100644 --- a/trunk/mm/highmem.c +++ b/trunk/mm/highmem.c @@ -98,7 +98,7 @@ struct page *kmap_to_page(void *vaddr) { unsigned long addr = (unsigned long)vaddr; - if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) { + if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) { int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT; return pte_page(pkmap_page_table[i]); } diff --git a/trunk/mm/memcontrol.c b/trunk/mm/memcontrol.c index 7acf43bf04a2..dd39ba000b31 100644 --- a/trunk/mm/memcontrol.c +++ b/trunk/mm/memcontrol.c @@ -1055,12 +1055,24 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, struct mem_cgroup *memcg) { struct mem_cgroup_per_zone *mz; + struct lruvec *lruvec; - if (mem_cgroup_disabled()) - return &zone->lruvec; + if (mem_cgroup_disabled()) { + lruvec = &zone->lruvec; + goto out; + } mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); - return &mz->lruvec; + lruvec = &mz->lruvec; +out: + /* + * Since a node can be onlined after the mem_cgroup was created, + * we have to be prepared to initialize lruvec->zone here; + * and if offlined then reonlined, we need to reinitialize it. + */ + if (unlikely(lruvec->zone != zone)) + lruvec->zone = zone; + return lruvec; } /* @@ -1087,9 +1099,12 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) struct mem_cgroup_per_zone *mz; struct mem_cgroup *memcg; struct page_cgroup *pc; + struct lruvec *lruvec; - if (mem_cgroup_disabled()) - return &zone->lruvec; + if (mem_cgroup_disabled()) { + lruvec = &zone->lruvec; + goto out; + } pc = lookup_page_cgroup(page); memcg = pc->mem_cgroup; @@ -1107,7 +1122,16 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) pc->mem_cgroup = memcg = root_mem_cgroup; mz = page_cgroup_zoneinfo(memcg, page); - return &mz->lruvec; + lruvec = &mz->lruvec; +out: + /* + * Since a node can be onlined after the mem_cgroup was created, + * we have to be prepared to initialize lruvec->zone here; + * and if offlined then reonlined, we need to reinitialize it. + */ + if (unlikely(lruvec->zone != zone)) + lruvec->zone = zone; + return lruvec; } /** @@ -1452,17 +1476,26 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg) static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) { u64 limit; - u64 memsw; limit = res_counter_read_u64(&memcg->res, RES_LIMIT); - limit += total_swap_pages << PAGE_SHIFT; - memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); /* - * If memsw is finite and limits the amount of swap space available - * to this memcg, return that limit. + * Do not consider swap space if we cannot swap due to swappiness */ - return min(limit, memsw); + if (mem_cgroup_swappiness(memcg)) { + u64 memsw; + + limit += total_swap_pages << PAGE_SHIFT; + memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); + + /* + * If memsw is finite and limits the amount of swap space + * available to this memcg, return that limit. + */ + limit = min(limit, memsw); + } + + return limit; } void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, @@ -3688,17 +3721,17 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg, int node, int zid, enum lru_list lru) { - struct mem_cgroup_per_zone *mz; + struct lruvec *lruvec; unsigned long flags, loop; struct list_head *list; struct page *busy; struct zone *zone; zone = &NODE_DATA(node)->node_zones[zid]; - mz = mem_cgroup_zoneinfo(memcg, node, zid); - list = &mz->lruvec.lists[lru]; + lruvec = mem_cgroup_zone_lruvec(zone, memcg); + list = &lruvec->lists[lru]; - loop = mz->lru_size[lru]; + loop = mem_cgroup_get_lru_size(lruvec, lru); /* give some margin against EBUSY etc...*/ loop += 256; busy = NULL; @@ -4736,7 +4769,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) for (zone = 0; zone < MAX_NR_ZONES; zone++) { mz = &pn->zoneinfo[zone]; - lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]); + lruvec_init(&mz->lruvec); mz->usage_in_excess = 0; mz->on_tree = false; mz->memcg = memcg; diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c index fb135ba4aba9..221fc9ffcab1 100644 --- a/trunk/mm/memory.c +++ b/trunk/mm/memory.c @@ -2527,9 +2527,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, int ret = 0; int page_mkwrite = 0; struct page *dirty_page = NULL; - unsigned long mmun_start; /* For mmu_notifiers */ - unsigned long mmun_end; /* For mmu_notifiers */ - bool mmun_called = false; /* For mmu_notifiers */ + unsigned long mmun_start = 0; /* For mmu_notifiers */ + unsigned long mmun_end = 0; /* For mmu_notifiers */ old_page = vm_normal_page(vma, address, orig_pte); if (!old_page) { @@ -2708,8 +2707,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, goto oom_free_new; mmun_start = address & PAGE_MASK; - mmun_end = (address & PAGE_MASK) + PAGE_SIZE; - mmun_called = true; + mmun_end = mmun_start + PAGE_SIZE; mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); /* @@ -2778,7 +2776,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, page_cache_release(new_page); unlock: pte_unmap_unlock(page_table, ptl); - if (mmun_called) + if (mmun_end > mmun_start) mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); if (old_page) { /* diff --git a/trunk/mm/memory_hotplug.c b/trunk/mm/memory_hotplug.c index 56b758ae57d2..e4eeacae2b91 100644 --- a/trunk/mm/memory_hotplug.c +++ b/trunk/mm/memory_hotplug.c @@ -106,7 +106,6 @@ static void get_page_bootmem(unsigned long info, struct page *page, void __ref put_page_bootmem(struct page *page) { unsigned long type; - struct zone *zone; type = (unsigned long) page->lru.next; BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || @@ -117,12 +116,6 @@ void __ref put_page_bootmem(struct page *page) set_page_private(page, 0); INIT_LIST_HEAD(&page->lru); __free_pages_bootmem(page, 0); - - zone = page_zone(page); - zone_span_writelock(zone); - zone->present_pages++; - zone_span_writeunlock(zone); - totalram_pages++; } } diff --git a/trunk/mm/mmap.c b/trunk/mm/mmap.c index 2d942353d681..9a796c41e7d9 100644 --- a/trunk/mm/mmap.c +++ b/trunk/mm/mmap.c @@ -334,8 +334,10 @@ void validate_mm(struct mm_struct *mm) struct vm_area_struct *vma = mm->mmap; while (vma) { struct anon_vma_chain *avc; + vma_lock_anon_vma(vma); list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_verify(avc); + vma_unlock_anon_vma(vma); vma = vma->vm_next; i++; } diff --git a/trunk/mm/mmzone.c b/trunk/mm/mmzone.c index 3cef80f6ac79..4596d81b89b1 100644 --- a/trunk/mm/mmzone.c +++ b/trunk/mm/mmzone.c @@ -87,7 +87,7 @@ int memmap_valid_within(unsigned long pfn, } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ -void lruvec_init(struct lruvec *lruvec, struct zone *zone) +void lruvec_init(struct lruvec *lruvec) { enum lru_list lru; @@ -95,8 +95,4 @@ void lruvec_init(struct lruvec *lruvec, struct zone *zone) for_each_lru(lru) INIT_LIST_HEAD(&lruvec->lists[lru]); - -#ifdef CONFIG_MEMCG - lruvec->zone = zone; -#endif } diff --git a/trunk/mm/nobootmem.c b/trunk/mm/nobootmem.c index 714d5d650470..bd82f6b31411 100644 --- a/trunk/mm/nobootmem.c +++ b/trunk/mm/nobootmem.c @@ -116,8 +116,6 @@ static unsigned long __init __free_memory_core(phys_addr_t start, return 0; __free_pages_memory(start_pfn, end_pfn); - fixup_zone_present_pages(pfn_to_nid(start >> PAGE_SHIFT), - start_pfn, end_pfn); return end_pfn - start_pfn; } @@ -128,7 +126,6 @@ unsigned long __init free_low_memory_core_early(int nodeid) phys_addr_t start, end, size; u64 i; - reset_zone_present_pages(); for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) count += __free_memory_core(start, end); diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index 5b74de6702e0..bcb72c6e2b2d 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -1405,7 +1405,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) mt = get_pageblock_migratetype(page); if (unlikely(mt != MIGRATE_ISOLATE)) - __mod_zone_freepage_state(zone, -(1UL << order), mt); + __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt); if (alloc_order != order) expand(zone, page, alloc_order, order, @@ -4505,7 +4505,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, zone->zone_pgdat = pgdat; zone_pcp_init(zone); - lruvec_init(&zone->lruvec, zone); + lruvec_init(&zone->lruvec); if (!size) continue; @@ -6098,37 +6098,3 @@ void dump_page(struct page *page) dump_page_flags(page->flags); mem_cgroup_print_bad_page(page); } - -/* reset zone->present_pages */ -void reset_zone_present_pages(void) -{ - struct zone *z; - int i, nid; - - for_each_node_state(nid, N_HIGH_MEMORY) { - for (i = 0; i < MAX_NR_ZONES; i++) { - z = NODE_DATA(nid)->node_zones + i; - z->present_pages = 0; - } - } -} - -/* calculate zone's present pages in buddy system */ -void fixup_zone_present_pages(int nid, unsigned long start_pfn, - unsigned long end_pfn) -{ - struct zone *z; - unsigned long zone_start_pfn, zone_end_pfn; - int i; - - for (i = 0; i < MAX_NR_ZONES; i++) { - z = NODE_DATA(nid)->node_zones + i; - zone_start_pfn = z->zone_start_pfn; - zone_end_pfn = zone_start_pfn + z->spanned_pages; - - /* if the two regions intersect */ - if (!(zone_start_pfn >= end_pfn || zone_end_pfn <= start_pfn)) - z->present_pages += min(end_pfn, zone_end_pfn) - - max(start_pfn, zone_start_pfn); - } -} diff --git a/trunk/mm/shmem.c b/trunk/mm/shmem.c index 67afba5117f2..89341b658bd0 100644 --- a/trunk/mm/shmem.c +++ b/trunk/mm/shmem.c @@ -643,7 +643,7 @@ static void shmem_evict_inode(struct inode *inode) kfree(info->symlink); simple_xattrs_free(&info->xattrs); - BUG_ON(inode->i_blocks); + WARN_ON(inode->i_blocks); shmem_free_inode(inode->i_sb); clear_inode(inode); } @@ -1145,8 +1145,20 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, if (!error) { error = shmem_add_to_page_cache(page, mapping, index, gfp, swp_to_radix_entry(swap)); - /* We already confirmed swap, and make no allocation */ - VM_BUG_ON(error); + /* + * We already confirmed swap under page lock, and make + * no memory allocation here, so usually no possibility + * of error; but free_swap_and_cache() only trylocks a + * page, so it is just possible that the entry has been + * truncated or holepunched since swap was confirmed. + * shmem_undo_range() will have done some of the + * unaccounting, now delete_from_swap_cache() will do + * the rest (including mem_cgroup_uncharge_swapcache). + * Reset swap.val? No, leave it so "failed" goes back to + * "repeat": reading a hole and writing should succeed. + */ + if (error) + delete_from_swap_cache(page); } if (error) goto failed; diff --git a/trunk/mm/swapfile.c b/trunk/mm/swapfile.c index 71cd288b2001..f91a25547ffe 100644 --- a/trunk/mm/swapfile.c +++ b/trunk/mm/swapfile.c @@ -1494,9 +1494,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) BUG_ON(!current->mm); pathname = getname(specialfile); - err = PTR_ERR(pathname); if (IS_ERR(pathname)) - goto out; + return PTR_ERR(pathname); victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0); err = PTR_ERR(victim); @@ -1608,6 +1607,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) out_dput: filp_close(victim, NULL); out: + putname(pathname); return err; } diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index 2624edcfb420..48550c66f1f2 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -1760,28 +1760,6 @@ static bool in_reclaim_compaction(struct scan_control *sc) return false; } -#ifdef CONFIG_COMPACTION -/* - * If compaction is deferred for sc->order then scale the number of pages - * reclaimed based on the number of consecutive allocation failures - */ -static unsigned long scale_for_compaction(unsigned long pages_for_compaction, - struct lruvec *lruvec, struct scan_control *sc) -{ - struct zone *zone = lruvec_zone(lruvec); - - if (zone->compact_order_failed <= sc->order) - pages_for_compaction <<= zone->compact_defer_shift; - return pages_for_compaction; -} -#else -static unsigned long scale_for_compaction(unsigned long pages_for_compaction, - struct lruvec *lruvec, struct scan_control *sc) -{ - return pages_for_compaction; -} -#endif - /* * Reclaim/compaction is used for high-order allocation requests. It reclaims * order-0 pages before compacting the zone. should_continue_reclaim() returns @@ -1829,9 +1807,6 @@ static inline bool should_continue_reclaim(struct lruvec *lruvec, * inactive lists are large enough, continue reclaiming */ pages_for_compaction = (2UL << sc->order); - - pages_for_compaction = scale_for_compaction(pages_for_compaction, - lruvec, sc); inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE); if (nr_swap_pages > 0) inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON); @@ -3017,6 +2992,8 @@ static int kswapd(void *p) &balanced_classzone_idx); } } + + current->reclaim_state = NULL; return 0; } diff --git a/trunk/net/batman-adv/soft-interface.c b/trunk/net/batman-adv/soft-interface.c index b9a28d2dd3e8..ce0684a1fc83 100644 --- a/trunk/net/batman-adv/soft-interface.c +++ b/trunk/net/batman-adv/soft-interface.c @@ -325,6 +325,12 @@ void batadv_interface_rx(struct net_device *soft_iface, soft_iface->last_rx = jiffies; + /* Let the bridge loop avoidance check the packet. If will + * not handle it, we can safely push it up. + */ + if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) + goto out; + if (orig_node) batadv_tt_add_temporary_global_entry(bat_priv, orig_node, ethhdr->h_source); @@ -332,12 +338,6 @@ void batadv_interface_rx(struct net_device *soft_iface, if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) goto dropped; - /* Let the bridge loop avoidance check the packet. If will - * not handle it, we can safely push it up. - */ - if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) - goto out; - netif_rx(skb); goto out; diff --git a/trunk/net/batman-adv/translation-table.c b/trunk/net/batman-adv/translation-table.c index 112edd371b2f..baae71585804 100644 --- a/trunk/net/batman-adv/translation-table.c +++ b/trunk/net/batman-adv/translation-table.c @@ -769,6 +769,12 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv, */ tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP; + /* the change can carry possible "attribute" flags like the + * TT_CLIENT_WIFI, therefore they have to be copied in the + * client entry + */ + tt_global_entry->common.flags |= flags; + /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only * one originator left in the list and we previously received a * delete + roaming change for this originator. @@ -1496,7 +1502,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, memcpy(tt_change->addr, tt_common_entry->addr, ETH_ALEN); - tt_change->flags = BATADV_NO_FLAGS; + tt_change->flags = tt_common_entry->flags; tt_count++; tt_change++; @@ -2450,6 +2456,13 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, { bool ret = false; + /* if the originator is a backbone node (meaning it belongs to the same + * LAN of this node) the temporary client must not be added because to + * reach such destination the node must use the LAN instead of the mesh + */ + if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig)) + goto out; + if (!batadv_tt_global_add(bat_priv, orig_node, addr, BATADV_TT_CLIENT_TEMP, atomic_read(&orig_node->last_ttvn))) diff --git a/trunk/net/bluetooth/hci_core.c b/trunk/net/bluetooth/hci_core.c index 8a0ce706aebd..a0a2f97b9c62 100644 --- a/trunk/net/bluetooth/hci_core.c +++ b/trunk/net/bluetooth/hci_core.c @@ -1754,11 +1754,11 @@ int hci_register_dev(struct hci_dev *hdev) if (hdev->dev_type != HCI_AMP) set_bit(HCI_AUTO_OFF, &hdev->dev_flags); - schedule_work(&hdev->power_on); - hci_notify(hdev, HCI_DEV_REG); hci_dev_hold(hdev); + schedule_work(&hdev->power_on); + return id; err_wqueue: diff --git a/trunk/net/bluetooth/mgmt.c b/trunk/net/bluetooth/mgmt.c index aa2ea0a8142c..91de4239da66 100644 --- a/trunk/net/bluetooth/mgmt.c +++ b/trunk/net/bluetooth/mgmt.c @@ -326,7 +326,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, struct hci_dev *d; size_t rp_len; u16 count; - int i, err; + int err; BT_DBG("sock %p", sk); @@ -347,9 +347,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, return -ENOMEM; } - rp->num_controllers = cpu_to_le16(count); - - i = 0; + count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (test_bit(HCI_SETUP, &d->dev_flags)) continue; @@ -357,10 +355,13 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, if (!mgmt_valid_hdev(d)) continue; - rp->index[i++] = cpu_to_le16(d->id); + rp->index[count++] = cpu_to_le16(d->id); BT_DBG("Added hci%u", d->id); } + rp->num_controllers = cpu_to_le16(count); + rp_len = sizeof(*rp) + (2 * count); + read_unlock(&hci_dev_list_lock); err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp, @@ -1366,6 +1367,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, continue; list_del(&match->list); + kfree(match); found++; } diff --git a/trunk/net/bluetooth/smp.c b/trunk/net/bluetooth/smp.c index 2ac8d50861e0..a5923378bdf0 100644 --- a/trunk/net/bluetooth/smp.c +++ b/trunk/net/bluetooth/smp.c @@ -267,7 +267,7 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send) clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags); mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type, - hcon->dst_type, reason); + hcon->dst_type, HCI_ERROR_AUTH_FAILURE); cancel_delayed_work_sync(&conn->security_timer); diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c index 09cb3f6dc40c..c0946cb2b354 100644 --- a/trunk/net/core/dev.c +++ b/trunk/net/core/dev.c @@ -1666,7 +1666,7 @@ static inline int deliver_skb(struct sk_buff *skb, static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) { - if (ptype->af_packet_priv == NULL) + if (!ptype->af_packet_priv || !skb->sk) return false; if (ptype->id_match) @@ -2818,8 +2818,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, if (unlikely(tcpu != next_cpu) && (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || ((int)(per_cpu(softnet_data, tcpu).input_queue_head - - rflow->last_qtail)) >= 0)) + rflow->last_qtail)) >= 0)) { + tcpu = next_cpu; rflow = set_rps_cpu(dev, skb, rflow, next_cpu); + } if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { *rflowp = rflow; diff --git a/trunk/net/core/dev_addr_lists.c b/trunk/net/core/dev_addr_lists.c index 87cc17db2d56..b079c7bbc157 100644 --- a/trunk/net/core/dev_addr_lists.c +++ b/trunk/net/core/dev_addr_lists.c @@ -319,7 +319,8 @@ int dev_addr_del(struct net_device *dev, const unsigned char *addr, */ ha = list_first_entry(&dev->dev_addrs.list, struct netdev_hw_addr, list); - if (ha->addr == dev->dev_addr && ha->refcount == 1) + if (!memcmp(ha->addr, addr, dev->addr_len) && + ha->type == addr_type && ha->refcount == 1) return -ENOENT; err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, diff --git a/trunk/net/core/net-sysfs.c b/trunk/net/core/net-sysfs.c index bcf02f608cbf..017a8bacfb27 100644 --- a/trunk/net/core/net-sysfs.c +++ b/trunk/net/core/net-sysfs.c @@ -429,6 +429,17 @@ static struct attribute_group netstat_group = { .name = "statistics", .attrs = netstat_attrs, }; + +#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) +static struct attribute *wireless_attrs[] = { + NULL +}; + +static struct attribute_group wireless_group = { + .name = "wireless", + .attrs = wireless_attrs, +}; +#endif #endif /* CONFIG_SYSFS */ #ifdef CONFIG_RPS @@ -1409,6 +1420,15 @@ int netdev_register_kobject(struct net_device *net) groups++; *groups++ = &netstat_group; + +#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) + if (net->ieee80211_ptr) + *groups++ = &wireless_group; +#if IS_ENABLED(CONFIG_WIRELESS_EXT) + else if (net->wireless_handlers) + *groups++ = &wireless_group; +#endif +#endif #endif /* CONFIG_SYSFS */ error = device_add(dev); diff --git a/trunk/net/core/rtnetlink.c b/trunk/net/core/rtnetlink.c index 76d4c2c3c89b..fad649ae4dec 100644 --- a/trunk/net/core/rtnetlink.c +++ b/trunk/net/core/rtnetlink.c @@ -2192,7 +2192,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb, goto skip; err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, - portid, seq, 0, NTF_SELF); + portid, seq, + RTM_NEWNEIGH, NTF_SELF); if (err < 0) return err; skip: diff --git a/trunk/net/ipv4/inet_diag.c b/trunk/net/ipv4/inet_diag.c index 535584c00f91..0c34bfabc11f 100644 --- a/trunk/net/ipv4/inet_diag.c +++ b/trunk/net/ipv4/inet_diag.c @@ -892,13 +892,16 @@ static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, struct inet_diag_req_v2 *r, struct nlattr *bc) { const struct inet_diag_handler *handler; + int err = 0; handler = inet_diag_lock_handler(r->sdiag_protocol); if (!IS_ERR(handler)) handler->dump(skb, cb, r, bc); + else + err = PTR_ERR(handler); inet_diag_unlock_handler(handler); - return skb->len; + return err ? : skb->len; } static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) diff --git a/trunk/net/ipv4/ip_sockglue.c b/trunk/net/ipv4/ip_sockglue.c index 5eea4a811042..14bbfcf717ac 100644 --- a/trunk/net/ipv4/ip_sockglue.c +++ b/trunk/net/ipv4/ip_sockglue.c @@ -457,19 +457,28 @@ static int do_ip_setsockopt(struct sock *sk, int level, struct inet_sock *inet = inet_sk(sk); int val = 0, err; - if (((1<= sizeof(int)) { if (get_user(val, (int __user *) optval)) return -EFAULT; diff --git a/trunk/net/ipv4/ip_vti.c b/trunk/net/ipv4/ip_vti.c index 1831092f999f..858fddf6482a 100644 --- a/trunk/net/ipv4/ip_vti.c +++ b/trunk/net/ipv4/ip_vti.c @@ -338,12 +338,17 @@ static int vti_rcv(struct sk_buff *skb) if (tunnel != NULL) { struct pcpu_tstats *tstats; + if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) + return -1; + tstats = this_cpu_ptr(tunnel->dev->tstats); u64_stats_update_begin(&tstats->syncp); tstats->rx_packets++; tstats->rx_bytes += skb->len; u64_stats_update_end(&tstats->syncp); + skb->mark = 0; + secpath_reset(skb); skb->dev = tunnel->dev; return 1; } diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index a8c651216fa6..df251424d816 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -1785,6 +1785,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, if (dev_out->flags & IFF_LOOPBACK) flags |= RTCF_LOCAL; + do_cache = true; if (type == RTN_BROADCAST) { flags |= RTCF_BROADCAST | RTCF_LOCAL; fi = NULL; @@ -1793,6 +1794,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res, if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr, fl4->flowi4_proto)) flags &= ~RTCF_LOCAL; + else + do_cache = false; /* If multicast route do not exist use * default one, but do not gateway in this case. * Yes, it is hack. @@ -1802,8 +1805,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res, } fnhe = NULL; - do_cache = fi != NULL; - if (fi) { + do_cache &= fi != NULL; + if (do_cache) { struct rtable __rcu **prth; struct fib_nh *nh = &FIB_RES_NH(*res); @@ -2597,7 +2600,7 @@ int __init ip_rt_init(void) pr_err("Unable to create route proc files\n"); #ifdef CONFIG_XFRM xfrm_init(); - xfrm4_init(ip_rt_max_size); + xfrm4_init(); #endif rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL); diff --git a/trunk/net/ipv4/tcp.c b/trunk/net/ipv4/tcp.c index 197c0008503c..083092e3aed6 100644 --- a/trunk/net/ipv4/tcp.c +++ b/trunk/net/ipv4/tcp.c @@ -1212,7 +1212,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: - if (copied && likely(!tp->repair)) + if (copied) tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) @@ -1223,7 +1223,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } out: - if (copied && likely(!tp->repair)) + if (copied) tcp_push(sk, flags, mss_now, tp->nonagle); release_sock(sk); return copied + copied_syn; diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c index 2c2b13a999ea..609ff98aeb47 100644 --- a/trunk/net/ipv4/tcp_input.c +++ b/trunk/net/ipv4/tcp_input.c @@ -5313,11 +5313,6 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, goto discard; } - /* ts_recent update must be made after we are sure that the packet - * is in window. - */ - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); - /* step 3: check security and precedence [ignored] */ /* step 4: Check for a SYN @@ -5552,6 +5547,11 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) goto discard; + /* ts_recent update must be made after we are sure that the packet + * is in window. + */ + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); + tcp_rcv_rtt_measure_ts(sk, skb); /* Process urgent data. */ @@ -6130,6 +6130,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, } else goto discard; + /* ts_recent update must be made after we are sure that the packet + * is in window. + */ + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); + /* step 6: check the URG bit */ tcp_urg(sk, skb, th); diff --git a/trunk/net/ipv4/tcp_metrics.c b/trunk/net/ipv4/tcp_metrics.c index 53bc5847bfa8..f696d7c2e9fa 100644 --- a/trunk/net/ipv4/tcp_metrics.c +++ b/trunk/net/ipv4/tcp_metrics.c @@ -1,7 +1,6 @@ #include #include #include -#include #include #include #include @@ -9,6 +8,7 @@ #include #include #include +#include #include #include @@ -1034,7 +1034,10 @@ static int __net_init tcp_net_metrics_init(struct net *net) net->ipv4.tcp_metrics_hash_log = order_base_2(slots); size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log; - net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL); + net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!net->ipv4.tcp_metrics_hash) + net->ipv4.tcp_metrics_hash = vzalloc(size); + if (!net->ipv4.tcp_metrics_hash) return -ENOMEM; @@ -1055,7 +1058,10 @@ static void __net_exit tcp_net_metrics_exit(struct net *net) tm = next; } } - kfree(net->ipv4.tcp_metrics_hash); + if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash)) + vfree(net->ipv4.tcp_metrics_hash); + else + kfree(net->ipv4.tcp_metrics_hash); } static __net_initdata struct pernet_operations tcp_net_metrics_ops = { diff --git a/trunk/net/ipv4/tcp_output.c b/trunk/net/ipv4/tcp_output.c index cfe6ffe1c177..2798706cb063 100644 --- a/trunk/net/ipv4/tcp_output.c +++ b/trunk/net/ipv4/tcp_output.c @@ -1986,6 +1986,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, tso_segs = tcp_init_tso_segs(sk, skb, mss_now); BUG_ON(!tso_segs); + if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) + goto repair; /* Skip network transmission */ + cwnd_quota = tcp_cwnd_test(tp, skb); if (!cwnd_quota) break; @@ -2026,6 +2029,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) break; +repair: /* Advance the send_head. This one is sent out. * This call will increment packets_out. */ diff --git a/trunk/net/ipv4/xfrm4_policy.c b/trunk/net/ipv4/xfrm4_policy.c index 05c5ab8d983c..3be0ac2c1920 100644 --- a/trunk/net/ipv4/xfrm4_policy.c +++ b/trunk/net/ipv4/xfrm4_policy.c @@ -279,19 +279,8 @@ static void __exit xfrm4_policy_fini(void) xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo); } -void __init xfrm4_init(int rt_max_size) +void __init xfrm4_init(void) { - /* - * Select a default value for the gc_thresh based on the main route - * table hash size. It seems to me the worst case scenario is when - * we have ipsec operating in transport mode, in which we create a - * dst_entry per socket. The xfrm gc algorithm starts trying to remove - * entries at gc_thresh, and prevents new allocations as 2*gc_thresh - * so lets set an initial xfrm gc_thresh value at the rt_max_size/2. - * That will let us store an ipsec connection per route table entry, - * and start cleaning when were 1/2 full - */ - xfrm4_dst_ops.gc_thresh = rt_max_size/2; dst_entries_init(&xfrm4_dst_ops); xfrm4_state_init(); diff --git a/trunk/net/ipv6/inet6_connection_sock.c b/trunk/net/ipv6/inet6_connection_sock.c index c4f934176cab..30647857a375 100644 --- a/trunk/net/ipv6/inet6_connection_sock.c +++ b/trunk/net/ipv6/inet6_connection_sock.c @@ -252,6 +252,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu) return NULL; dst->ops->update_pmtu(dst, sk, NULL, mtu); - return inet6_csk_route_socket(sk, &fl6); + dst = inet6_csk_route_socket(sk, &fl6); + return IS_ERR(dst) ? NULL : dst; } EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu); diff --git a/trunk/net/ipv6/ip6_gre.c b/trunk/net/ipv6/ip6_gre.c index 0185679c5f53..d5cb3c4e66f8 100644 --- a/trunk/net/ipv6/ip6_gre.c +++ b/trunk/net/ipv6/ip6_gre.c @@ -1633,9 +1633,9 @@ static size_t ip6gre_get_size(const struct net_device *dev) /* IFLA_GRE_OKEY */ nla_total_size(4) + /* IFLA_GRE_LOCAL */ - nla_total_size(4) + + nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GRE_REMOTE */ - nla_total_size(4) + + nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GRE_TTL */ nla_total_size(1) + /* IFLA_GRE_TOS */ @@ -1659,8 +1659,8 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) || nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || - nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) || - nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) || + nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->laddr) || + nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->raddr) || nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) || /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/ nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || diff --git a/trunk/net/ipv6/ipv6_sockglue.c b/trunk/net/ipv6/ipv6_sockglue.c index ba6d13d1f1e1..e02faed6d17e 100644 --- a/trunk/net/ipv6/ipv6_sockglue.c +++ b/trunk/net/ipv6/ipv6_sockglue.c @@ -827,6 +827,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, if (val < 0 || val > 255) goto e_inval; np->min_hopcount = val; + retv = 0; break; case IPV6_DONTFRAG: np->dontfrag = valbool; diff --git a/trunk/net/ipv6/ndisc.c b/trunk/net/ipv6/ndisc.c index ff36194a71aa..2edce30ef733 100644 --- a/trunk/net/ipv6/ndisc.c +++ b/trunk/net/ipv6/ndisc.c @@ -535,7 +535,7 @@ static void ndisc_send_unsol_na(struct net_device *dev) { struct inet6_dev *idev; struct inet6_ifaddr *ifa; - struct in6_addr mcaddr; + struct in6_addr mcaddr = IN6ADDR_LINKLOCAL_ALLNODES_INIT; idev = in6_dev_get(dev); if (!idev) @@ -543,7 +543,6 @@ static void ndisc_send_unsol_na(struct net_device *dev) read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { - addrconf_addr_solict_mult(&ifa->addr, &mcaddr); ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr, /*router=*/ !!idev->cnf.forwarding, /*solicited=*/ false, /*override=*/ true, diff --git a/trunk/net/mac80211/cfg.c b/trunk/net/mac80211/cfg.c index 05f3a313db88..7371f676cf41 100644 --- a/trunk/net/mac80211/cfg.c +++ b/trunk/net/mac80211/cfg.c @@ -2594,6 +2594,9 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy, else local->probe_req_reg--; + if (!local->open_count) + break; + ieee80211_queue_work(&local->hw, &local->reconfig_filter); break; default: diff --git a/trunk/net/mac80211/ibss.c b/trunk/net/mac80211/ibss.c index bf87c70ac6c5..c21e33d1abd0 100644 --- a/trunk/net/mac80211/ibss.c +++ b/trunk/net/mac80211/ibss.c @@ -1151,10 +1151,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) mutex_lock(&sdata->u.ibss.mtx); - sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; - memset(sdata->u.ibss.bssid, 0, ETH_ALEN); - sdata->u.ibss.ssid_len = 0; - active_ibss = ieee80211_sta_active_ibss(sdata); if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { @@ -1175,6 +1171,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) } } + ifibss->state = IEEE80211_IBSS_MLME_SEARCH; + memset(ifibss->bssid, 0, ETH_ALEN); + ifibss->ssid_len = 0; + sta_info_flush(sdata->local, sdata); spin_lock_bh(&ifibss->incomplete_lock); diff --git a/trunk/net/mac80211/ieee80211_i.h b/trunk/net/mac80211/ieee80211_i.h index 8c804550465b..156e5835e37f 100644 --- a/trunk/net/mac80211/ieee80211_i.h +++ b/trunk/net/mac80211/ieee80211_i.h @@ -1314,6 +1314,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); +void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, + struct sk_buff_head *skbs); /* HT */ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, diff --git a/trunk/net/mac80211/main.c b/trunk/net/mac80211/main.c index c80c4490351c..f57f597972f8 100644 --- a/trunk/net/mac80211/main.c +++ b/trunk/net/mac80211/main.c @@ -871,8 +871,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) local->hw.wiphy->cipher_suites, sizeof(u32) * local->hw.wiphy->n_cipher_suites, GFP_KERNEL); - if (!suites) - return -ENOMEM; + if (!suites) { + result = -ENOMEM; + goto fail_wiphy_register; + } for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) { u32 suite = local->hw.wiphy->cipher_suites[r]; if (suite == WLAN_CIPHER_SUITE_WEP40 || diff --git a/trunk/net/mac80211/scan.c b/trunk/net/mac80211/scan.c index c4cdbde24fd3..43e60b5a7546 100644 --- a/trunk/net/mac80211/scan.c +++ b/trunk/net/mac80211/scan.c @@ -917,7 +917,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, struct cfg80211_sched_scan_request *req) { struct ieee80211_local *local = sdata->local; - struct ieee80211_sched_scan_ies sched_scan_ies; + struct ieee80211_sched_scan_ies sched_scan_ies = {}; int ret, i; mutex_lock(&local->mtx); diff --git a/trunk/net/mac80211/sta_info.c b/trunk/net/mac80211/sta_info.c index 0a4e4c04db89..d2eb64e12353 100644 --- a/trunk/net/mac80211/sta_info.c +++ b/trunk/net/mac80211/sta_info.c @@ -117,8 +117,8 @@ static void free_sta_work(struct work_struct *wk) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); - __skb_queue_purge(&sta->ps_tx_buf[ac]); - __skb_queue_purge(&sta->tx_filtered[ac]); + ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]); + ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); } #ifdef CONFIG_MAC80211_MESH @@ -141,7 +141,7 @@ static void free_sta_work(struct work_struct *wk) tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); if (!tid_tx) continue; - __skb_queue_purge(&tid_tx->pending); + ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); kfree(tid_tx); } @@ -961,6 +961,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) struct ieee80211_local *local = sdata->local; struct sk_buff_head pending; int filtered = 0, buffered = 0, ac; + unsigned long flags; clear_sta_flag(sta, WLAN_STA_SP); @@ -976,12 +977,16 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { int count = skb_queue_len(&pending), tmp; + spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); + spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); tmp = skb_queue_len(&pending); filtered += tmp - count; count = tmp; + spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); + spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); tmp = skb_queue_len(&pending); buffered += tmp - count; } diff --git a/trunk/net/mac80211/status.c b/trunk/net/mac80211/status.c index 3af0cc4130f1..101eb88a2b78 100644 --- a/trunk/net/mac80211/status.c +++ b/trunk/net/mac80211/status.c @@ -668,3 +668,12 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb) dev_kfree_skb_any(skb); } EXPORT_SYMBOL(ieee80211_free_txskb); + +void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, + struct sk_buff_head *skbs) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(skbs))) + ieee80211_free_txskb(hw, skb); +} diff --git a/trunk/net/mac80211/tx.c b/trunk/net/mac80211/tx.c index c9bf83f36657..b858ebe41fda 100644 --- a/trunk/net/mac80211/tx.c +++ b/trunk/net/mac80211/tx.c @@ -1358,7 +1358,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) if (tx->skb) ieee80211_free_txskb(&tx->local->hw, tx->skb); else - __skb_queue_purge(&tx->skbs); + ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs); return -1; } else if (unlikely(res == TX_QUEUED)) { I802_DEBUG_INC(tx->local->tx_handlers_queued); @@ -2120,10 +2120,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, */ void ieee80211_clear_tx_pending(struct ieee80211_local *local) { + struct sk_buff *skb; int i; - for (i = 0; i < local->hw.queues; i++) - skb_queue_purge(&local->pending[i]); + for (i = 0; i < local->hw.queues; i++) { + while ((skb = skb_dequeue(&local->pending[i])) != NULL) + ieee80211_free_txskb(&local->hw, skb); + } } /* diff --git a/trunk/net/mac80211/util.c b/trunk/net/mac80211/util.c index 239391807ca9..0151ae33c4cd 100644 --- a/trunk/net/mac80211/util.c +++ b/trunk/net/mac80211/util.c @@ -1491,6 +1491,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_STATION) continue; + if (!sdata->u.mgd.associated) + continue; ieee80211_send_nullfunc(local, sdata, 0); } diff --git a/trunk/net/netfilter/ipset/ip_set_hash_ip.c b/trunk/net/netfilter/ipset/ip_set_hash_ip.c index ec3dba5dcd62..5c0b78528e55 100644 --- a/trunk/net/netfilter/ipset/ip_set_hash_ip.c +++ b/trunk/net/netfilter/ipset/ip_set_hash_ip.c @@ -173,6 +173,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], return adtfn(set, &nip, timeout, flags); } + ip_to = ip; if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) @@ -185,8 +186,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], if (!cidr || cidr > 32) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(ip, ip_to, cidr); - } else - ip_to = ip; + } hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); diff --git a/trunk/net/netfilter/ipset/ip_set_hash_ipport.c b/trunk/net/netfilter/ipset/ip_set_hash_ipport.c index 0171f7502fa5..6283351f4eeb 100644 --- a/trunk/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/trunk/net/netfilter/ipset/ip_set_hash_ipport.c @@ -162,7 +162,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipport4_elem data = { }; - u32 ip, ip_to = 0, p = 0, port, port_to; + u32 ip, ip_to, p = 0, port, port_to; u32 timeout = h->timeout; bool with_ports = false; int ret; @@ -210,7 +210,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], return ip_set_eexist(ret, flags) ? 0 : ret; } - ip = ntohl(data.ip); + ip_to = ip = ntohl(data.ip); if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) @@ -223,8 +223,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], if (!cidr || cidr > 32) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(ip, ip_to, cidr); - } else - ip_to = ip; + } port_to = port = ntohs(data.port); if (with_ports && tb[IPSET_ATTR_PORT_TO]) { diff --git a/trunk/net/netfilter/ipset/ip_set_hash_ipportip.c b/trunk/net/netfilter/ipset/ip_set_hash_ipportip.c index 6344ef551ec8..6a21271c8d5a 100644 --- a/trunk/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/trunk/net/netfilter/ipset/ip_set_hash_ipportip.c @@ -166,7 +166,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportip4_elem data = { }; - u32 ip, ip_to = 0, p = 0, port, port_to; + u32 ip, ip_to, p = 0, port, port_to; u32 timeout = h->timeout; bool with_ports = false; int ret; @@ -218,7 +218,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], return ip_set_eexist(ret, flags) ? 0 : ret; } - ip = ntohl(data.ip); + ip_to = ip = ntohl(data.ip); if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) @@ -231,8 +231,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], if (!cidr || cidr > 32) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(ip, ip_to, cidr); - } else - ip_to = ip; + } port_to = port = ntohs(data.port); if (with_ports && tb[IPSET_ATTR_PORT_TO]) { diff --git a/trunk/net/netfilter/ipset/ip_set_hash_ipportnet.c b/trunk/net/netfilter/ipset/ip_set_hash_ipportnet.c index cb71f9a774e7..2d5cd4ee30eb 100644 --- a/trunk/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/trunk/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -215,8 +215,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 }; - u32 ip, ip_to = 0, p = 0, port, port_to; - u32 ip2_from = 0, ip2_to, ip2_last, ip2; + u32 ip, ip_to, p = 0, port, port_to; + u32 ip2_from, ip2_to, ip2_last, ip2; u32 timeout = h->timeout; bool with_ports = false; u8 cidr; @@ -286,6 +286,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], return ip_set_eexist(ret, flags) ? 0 : ret; } + ip_to = ip; if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) @@ -306,6 +307,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], if (port > port_to) swap(port, port_to); } + + ip2_to = ip2_from; if (tb[IPSET_ATTR_IP2_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); if (ret) diff --git a/trunk/net/netfilter/nfnetlink_cttimeout.c b/trunk/net/netfilter/nfnetlink_cttimeout.c index 8847b4d8be06..701c88a20fea 100644 --- a/trunk/net/netfilter/nfnetlink_cttimeout.c +++ b/trunk/net/netfilter/nfnetlink_cttimeout.c @@ -41,7 +41,8 @@ MODULE_DESCRIPTION("cttimeout: Extended Netfilter Connection Tracking timeout tu static LIST_HEAD(cttimeout_list); static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = { - [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING }, + [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING, + .len = CTNL_TIMEOUT_NAME_MAX - 1}, [CTA_TIMEOUT_L3PROTO] = { .type = NLA_U16 }, [CTA_TIMEOUT_L4PROTO] = { .type = NLA_U8 }, [CTA_TIMEOUT_DATA] = { .type = NLA_NESTED }, diff --git a/trunk/net/nfc/llcp/llcp.c b/trunk/net/nfc/llcp/llcp.c index cc10d073c338..9e8f4b2801f6 100644 --- a/trunk/net/nfc/llcp/llcp.c +++ b/trunk/net/nfc/llcp/llcp.c @@ -1210,7 +1210,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev) local->remote_miu = LLCP_DEFAULT_MIU; local->remote_lto = LLCP_DEFAULT_LTO; - list_add(&llcp_devices, &local->list); + list_add(&local->list, &llcp_devices); return 0; } diff --git a/trunk/net/sched/sch_qfq.c b/trunk/net/sched/sch_qfq.c index f0dd83cff906..9687fa1c2275 100644 --- a/trunk/net/sched/sch_qfq.c +++ b/trunk/net/sched/sch_qfq.c @@ -84,18 +84,19 @@ * grp->index is the index of the group; and grp->slot_shift * is the shift for the corresponding (scaled) sigma_i. */ -#define QFQ_MAX_INDEX 19 -#define QFQ_MAX_WSHIFT 16 +#define QFQ_MAX_INDEX 24 +#define QFQ_MAX_WSHIFT 12 #define QFQ_MAX_WEIGHT (1<wsum += delta_w; } +static void qfq_update_reactivate_class(struct qfq_sched *q, + struct qfq_class *cl, + u32 inv_w, u32 lmax, int delta_w) +{ + bool need_reactivation = false; + int i = qfq_calc_index(inv_w, lmax); + + if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) { + /* + * shift cl->F back, to not charge the + * class for the not-yet-served head + * packet + */ + cl->F = cl->S; + /* remove class from its slot in the old group */ + qfq_deactivate_class(q, cl); + need_reactivation = true; + } + + qfq_update_class_params(q, cl, lmax, inv_w, delta_w); + + if (need_reactivation) /* activate in new group */ + qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc)); +} + + static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg) { @@ -238,7 +265,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct qfq_class *cl = (struct qfq_class *)*arg; struct nlattr *tb[TCA_QFQ_MAX + 1]; u32 weight, lmax, inv_w; - int i, err; + int err; int delta_w; if (tca[TCA_OPTIONS] == NULL) { @@ -270,16 +297,14 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (tb[TCA_QFQ_LMAX]) { lmax = nla_get_u32(tb[TCA_QFQ_LMAX]); - if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) { + if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) { pr_notice("qfq: invalid max length %u\n", lmax); return -EINVAL; } } else - lmax = 1UL << QFQ_MTU_SHIFT; + lmax = psched_mtu(qdisc_dev(sch)); if (cl != NULL) { - bool need_reactivation = false; - if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, &cl->rate_est, qdisc_root_sleeping_lock(sch), @@ -291,24 +316,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (lmax == cl->lmax && inv_w == cl->inv_w) return 0; /* nothing to update */ - i = qfq_calc_index(inv_w, lmax); sch_tree_lock(sch); - if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) { - /* - * shift cl->F back, to not charge the - * class for the not-yet-served head - * packet - */ - cl->F = cl->S; - /* remove class from its slot in the old group */ - qfq_deactivate_class(q, cl); - need_reactivation = true; - } - - qfq_update_class_params(q, cl, lmax, inv_w, delta_w); - - if (need_reactivation) /* activate in new group */ - qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc)); + qfq_update_reactivate_class(q, cl, inv_w, lmax, delta_w); sch_tree_unlock(sch); return 0; @@ -663,15 +672,48 @@ static void qfq_make_eligible(struct qfq_sched *q, u64 old_V) /* - * XXX we should make sure that slot becomes less than 32. - * This is guaranteed by the input values. - * roundedS is always cl->S rounded on grp->slot_shift bits. + * If the weight and lmax (max_pkt_size) of the classes do not change, + * then QFQ guarantees that the slot index is never higher than + * 2 + ((1<S) >> grp->slot_shift; - unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS; + unsigned int i; /* slot index in the bucket list */ + + if (unlikely(slot > QFQ_MAX_SLOTS - 2)) { + u64 deltaS = roundedS - grp->S - + ((u64)(QFQ_MAX_SLOTS - 2)<slot_shift); + cl->S -= deltaS; + cl->F -= deltaS; + slot = QFQ_MAX_SLOTS - 2; + } + + i = (grp->front + slot) % QFQ_MAX_SLOTS; hlist_add_head(&cl->next, &grp->slots[i]); __set_bit(slot, &grp->full_slots); @@ -892,6 +934,13 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) } pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); + if (unlikely(cl->lmax < qdisc_pkt_len(skb))) { + pr_debug("qfq: increasing maxpkt from %u to %u for class %u", + cl->lmax, qdisc_pkt_len(skb), cl->common.classid); + qfq_update_reactivate_class(q, cl, cl->inv_w, + qdisc_pkt_len(skb), 0); + } + err = qdisc_enqueue(skb, cl->qdisc); if (unlikely(err != NET_XMIT_SUCCESS)) { pr_debug("qfq_enqueue: enqueue failed %d\n", err); diff --git a/trunk/net/sctp/proc.c b/trunk/net/sctp/proc.c index c3bea269faf4..9966e7b16451 100644 --- a/trunk/net/sctp/proc.c +++ b/trunk/net/sctp/proc.c @@ -102,7 +102,7 @@ static const struct file_operations sctp_snmp_seq_fops = { .open = sctp_snmp_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = single_release, + .release = single_release_net, }; /* Set up the proc fs entry for 'snmp' object. */ @@ -251,7 +251,7 @@ static const struct file_operations sctp_eps_seq_fops = { .open = sctp_eps_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_net, }; /* Set up the proc fs entry for 'eps' object. */ @@ -372,7 +372,7 @@ static const struct file_operations sctp_assocs_seq_fops = { .open = sctp_assocs_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_net, }; /* Set up the proc fs entry for 'assocs' object. */ @@ -517,7 +517,7 @@ static const struct file_operations sctp_remaddr_seq_fops = { .open = sctp_remaddr_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_net, }; int __net_init sctp_remaddr_proc_init(struct net *net) diff --git a/trunk/net/tipc/handler.c b/trunk/net/tipc/handler.c index 111ff8300ae5..b36f0fcd9bdf 100644 --- a/trunk/net/tipc/handler.c +++ b/trunk/net/tipc/handler.c @@ -116,7 +116,6 @@ void tipc_handler_stop(void) return; handler_enabled = 0; - tasklet_disable(&tipc_tasklet); tasklet_kill(&tipc_tasklet); spin_lock_bh(&qitem_lock); diff --git a/trunk/net/wireless/reg.c b/trunk/net/wireless/reg.c index bcc7d7ee5a51..b75756b05af7 100644 --- a/trunk/net/wireless/reg.c +++ b/trunk/net/wireless/reg.c @@ -141,9 +141,8 @@ static const struct ieee80211_regdomain world_regdom = { .reg_rules = { /* IEEE 802.11b/g, channels 1..11 */ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), - /* IEEE 802.11b/g, channels 12..13. No HT40 - * channel fits here. */ - REG_RULE(2467-10, 2472+10, 20, 6, 20, + /* IEEE 802.11b/g, channels 12..13. */ + REG_RULE(2467-10, 2472+10, 40, 6, 20, NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS), /* IEEE 802.11 channel 14 - Only JP enables diff --git a/trunk/scripts/Makefile.modinst b/trunk/scripts/Makefile.modinst index dda4b2b61927..ecbb44797e28 100644 --- a/trunk/scripts/Makefile.modinst +++ b/trunk/scripts/Makefile.modinst @@ -16,8 +16,9 @@ PHONY += $(modules) __modinst: $(modules) @: +# Don't stop modules_install if we can't sign external modules. quiet_cmd_modules_install = INSTALL $@ - cmd_modules_install = mkdir -p $(2); cp $@ $(2) ; $(mod_strip_cmd) $(2)/$(notdir $@) ; $(mod_sign_cmd) $(2)/$(notdir $@) + cmd_modules_install = mkdir -p $(2); cp $@ $(2) ; $(mod_strip_cmd) $(2)/$(notdir $@) ; $(mod_sign_cmd) $(2)/$(notdir $@) $(patsubst %,|| true,$(KBUILD_EXTMOD)) # Modules built outside the kernel source tree go into extra by default INSTALL_MOD_DIR ?= extra diff --git a/trunk/scripts/checkpatch.pl b/trunk/scripts/checkpatch.pl index 21a9f5de0a21..f18750e3bd6c 100755 --- a/trunk/scripts/checkpatch.pl +++ b/trunk/scripts/checkpatch.pl @@ -1890,8 +1890,10 @@ sub process { } if ($realfile =~ m@^(drivers/net/|net/)@ && - $rawline !~ m@^\+[ \t]*(\/\*|\*\/)@ && - $rawline =~ m@^\+[ \t]*.+\*\/[ \t]*$@) { + $rawline !~ m@^\+[ \t]*\*/[ \t]*$@ && #trailing */ + $rawline !~ m@^\+.*/\*.*\*/[ \t]*$@ && #inline /*...*/ + $rawline !~ m@^\+.*\*{2,}/[ \t]*$@ && #trailing **/ + $rawline =~ m@^\+[ \t]*.+\*\/[ \t]*$@) { #non blank */ WARN("NETWORKING_BLOCK_COMMENT_STYLE", "networking block comments put the trailing */ on a separate line\n" . $herecurr); } diff --git a/trunk/scripts/kconfig/expr.h b/trunk/scripts/kconfig/expr.h index bd2e09895553..cdd48600e02a 100644 --- a/trunk/scripts/kconfig/expr.h +++ b/trunk/scripts/kconfig/expr.h @@ -12,7 +12,7 @@ extern "C" { #include #include -#include +#include "list.h" #ifndef __cplusplus #include #endif @@ -175,12 +175,11 @@ struct menu { #define MENU_ROOT 0x0002 struct jump_key { - CIRCLEQ_ENTRY(jump_key) entries; + struct list_head entries; size_t offset; struct menu *target; int index; }; -CIRCLEQ_HEAD(jk_head, jump_key); #define JUMP_NB 9 diff --git a/trunk/scripts/kconfig/list.h b/trunk/scripts/kconfig/list.h new file mode 100644 index 000000000000..0ae730be5f49 --- /dev/null +++ b/trunk/scripts/kconfig/list.h @@ -0,0 +1,91 @@ +#ifndef LIST_H +#define LIST_H + +/* + * Copied from include/linux/... + */ + +#undef offsetof +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + + +struct list_head { + struct list_head *next, *prev; +}; + + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +/** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * list_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int list_empty(const struct list_head *head) +{ + return head->next == head; +} + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add(struct list_head *_new, + struct list_head *prev, + struct list_head *next) +{ + next->prev = _new; + _new->next = next; + _new->prev = prev; + prev->next = _new; +} + +/** + * list_add_tail - add a new entry + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +static inline void list_add_tail(struct list_head *_new, struct list_head *head) +{ + __list_add(_new, head->prev, head); +} + +#endif diff --git a/trunk/scripts/kconfig/lkc_proto.h b/trunk/scripts/kconfig/lkc_proto.h index 1d1c08537f1e..ef1a7381f956 100644 --- a/trunk/scripts/kconfig/lkc_proto.h +++ b/trunk/scripts/kconfig/lkc_proto.h @@ -21,9 +21,9 @@ P(menu_get_root_menu,struct menu *,(struct menu *menu)); P(menu_get_parent_menu,struct menu *,(struct menu *menu)); P(menu_has_help,bool,(struct menu *menu)); P(menu_get_help,const char *,(struct menu *menu)); -P(get_symbol_str, void, (struct gstr *r, struct symbol *sym, struct jk_head +P(get_symbol_str, void, (struct gstr *r, struct symbol *sym, struct list_head *head)); -P(get_relations_str, struct gstr, (struct symbol **sym_arr, struct jk_head +P(get_relations_str, struct gstr, (struct symbol **sym_arr, struct list_head *head)); P(menu_get_ext_help,void,(struct menu *menu, struct gstr *help)); diff --git a/trunk/scripts/kconfig/mconf.c b/trunk/scripts/kconfig/mconf.c index 48f67448af7b..53975cf87608 100644 --- a/trunk/scripts/kconfig/mconf.c +++ b/trunk/scripts/kconfig/mconf.c @@ -312,7 +312,7 @@ static void set_config_filename(const char *config_filename) struct search_data { - struct jk_head *head; + struct list_head *head; struct menu **targets; int *keys; }; @@ -323,7 +323,7 @@ static void update_text(char *buf, size_t start, size_t end, void *_data) struct jump_key *pos; int k = 0; - CIRCLEQ_FOREACH(pos, data->head, entries) { + list_for_each_entry(pos, data->head, entries) { if (pos->offset >= start && pos->offset < end) { char header[4]; @@ -375,7 +375,7 @@ static void search_conf(void) sym_arr = sym_re_search(dialog_input); do { - struct jk_head head = CIRCLEQ_HEAD_INITIALIZER(head); + LIST_HEAD(head); struct menu *targets[JUMP_NB]; int keys[JUMP_NB + 1], i; struct search_data data = { diff --git a/trunk/scripts/kconfig/menu.c b/trunk/scripts/kconfig/menu.c index a3cade659f89..e98a05c8e508 100644 --- a/trunk/scripts/kconfig/menu.c +++ b/trunk/scripts/kconfig/menu.c @@ -508,7 +508,7 @@ const char *menu_get_help(struct menu *menu) } static void get_prompt_str(struct gstr *r, struct property *prop, - struct jk_head *head) + struct list_head *head) { int i, j; struct menu *submenu[8], *menu, *location = NULL; @@ -544,12 +544,13 @@ static void get_prompt_str(struct gstr *r, struct property *prop, } else jump->target = location; - if (CIRCLEQ_EMPTY(head)) + if (list_empty(head)) jump->index = 0; else - jump->index = CIRCLEQ_LAST(head)->index + 1; + jump->index = list_entry(head->prev, struct jump_key, + entries)->index + 1; - CIRCLEQ_INSERT_TAIL(head, jump, entries); + list_add_tail(&jump->entries, head); } if (i > 0) { @@ -573,7 +574,8 @@ static void get_prompt_str(struct gstr *r, struct property *prop, /* * head is optional and may be NULL */ -void get_symbol_str(struct gstr *r, struct symbol *sym, struct jk_head *head) +void get_symbol_str(struct gstr *r, struct symbol *sym, + struct list_head *head) { bool hit; struct property *prop; @@ -612,7 +614,7 @@ void get_symbol_str(struct gstr *r, struct symbol *sym, struct jk_head *head) str_append(r, "\n\n"); } -struct gstr get_relations_str(struct symbol **sym_arr, struct jk_head *head) +struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head) { struct symbol *sym; struct gstr res = str_new(); diff --git a/trunk/scripts/sign-file b/trunk/scripts/sign-file index 87ca59d36e7e..974a20b661b7 100755 --- a/trunk/scripts/sign-file +++ b/trunk/scripts/sign-file @@ -156,12 +156,12 @@ sub asn1_extract($$@) if ($l == 0x1) { $len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1)); - } elsif ($l = 0x2) { + } elsif ($l == 0x2) { $len = unpack("n", substr(${$cursor->[2]}, $cursor->[0], 2)); - } elsif ($l = 0x3) { + } elsif ($l == 0x3) { $len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1)) << 16; $len = unpack("n", substr(${$cursor->[2]}, $cursor->[0] + 1, 2)); - } elsif ($l = 0x4) { + } elsif ($l == 0x4) { $len = unpack("N", substr(${$cursor->[2]}, $cursor->[0], 4)); } else { die $x509, ": ", $cursor->[0], ": ASN.1 element too long (", $l, ")\n"; diff --git a/trunk/security/device_cgroup.c b/trunk/security/device_cgroup.c index 842c254396db..b08d20c66c2e 100644 --- a/trunk/security/device_cgroup.c +++ b/trunk/security/device_cgroup.c @@ -164,8 +164,8 @@ static void dev_exception_clean(struct dev_cgroup *dev_cgroup) struct dev_exception_item *ex, *tmp; list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) { - list_del(&ex->list); - kfree(ex); + list_del_rcu(&ex->list); + kfree_rcu(ex, rcu); } } @@ -298,7 +298,7 @@ static int may_access(struct dev_cgroup *dev_cgroup, struct dev_exception_item *ex; bool match = false; - list_for_each_entry(ex, &dev_cgroup->exceptions, list) { + list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) { if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) continue; if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR)) @@ -352,6 +352,8 @@ static int parent_has_perm(struct dev_cgroup *childcg, */ static inline int may_allow_all(struct dev_cgroup *parent) { + if (!parent) + return 1; return parent->behavior == DEVCG_DEFAULT_ALLOW; } @@ -376,11 +378,14 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, int count, rc; struct dev_exception_item ex; struct cgroup *p = devcgroup->css.cgroup; - struct dev_cgroup *parent = cgroup_to_devcgroup(p->parent); + struct dev_cgroup *parent = NULL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (p->parent) + parent = cgroup_to_devcgroup(p->parent); + memset(&ex, 0, sizeof(ex)); b = buffer; @@ -391,11 +396,14 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, if (!may_allow_all(parent)) return -EPERM; dev_exception_clean(devcgroup); + devcgroup->behavior = DEVCG_DEFAULT_ALLOW; + if (!parent) + break; + rc = dev_exceptions_copy(&devcgroup->exceptions, &parent->exceptions); if (rc) return rc; - devcgroup->behavior = DEVCG_DEFAULT_ALLOW; break; case DEVCG_DENY: dev_exception_clean(devcgroup); diff --git a/trunk/security/selinux/netnode.c b/trunk/security/selinux/netnode.c index 28f911cdd7c7..c5454c0477c3 100644 --- a/trunk/security/selinux/netnode.c +++ b/trunk/security/selinux/netnode.c @@ -174,7 +174,8 @@ static void sel_netnode_insert(struct sel_netnode *node) if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) { struct sel_netnode *tail; tail = list_entry( - rcu_dereference(sel_netnode_hash[idx].list.prev), + rcu_dereference_protected(sel_netnode_hash[idx].list.prev, + lockdep_is_held(&sel_netnode_lock)), struct sel_netnode, list); list_del_rcu(&tail->list); kfree_rcu(tail, rcu); diff --git a/trunk/sound/core/oss/mixer_oss.c b/trunk/sound/core/oss/mixer_oss.c index a9a2e63c0222..e8a1d18774b2 100644 --- a/trunk/sound/core/oss/mixer_oss.c +++ b/trunk/sound/core/oss/mixer_oss.c @@ -76,6 +76,7 @@ static int snd_mixer_oss_open(struct inode *inode, struct file *file) snd_card_unref(card); return -EFAULT; } + snd_card_unref(card); return 0; } diff --git a/trunk/sound/core/oss/pcm_oss.c b/trunk/sound/core/oss/pcm_oss.c index f337b66a020b..4c1cc51772e6 100644 --- a/trunk/sound/core/oss/pcm_oss.c +++ b/trunk/sound/core/oss/pcm_oss.c @@ -2454,6 +2454,7 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file) mutex_unlock(&pcm->open_mutex); if (err < 0) goto __error; + snd_card_unref(pcm->card); return err; __error: diff --git a/trunk/sound/core/pcm_native.c b/trunk/sound/core/pcm_native.c index 6e8872de5ba0..f9ddecf2f4cd 100644 --- a/trunk/sound/core/pcm_native.c +++ b/trunk/sound/core/pcm_native.c @@ -2122,7 +2122,8 @@ static int snd_pcm_playback_open(struct inode *inode, struct file *file) pcm = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_PCM_PLAYBACK); err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK); - snd_card_unref(pcm->card); + if (pcm) + snd_card_unref(pcm->card); return err; } @@ -2135,7 +2136,8 @@ static int snd_pcm_capture_open(struct inode *inode, struct file *file) pcm = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_PCM_CAPTURE); err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE); - snd_card_unref(pcm->card); + if (pcm) + snd_card_unref(pcm->card); return err; } diff --git a/trunk/sound/core/sound.c b/trunk/sound/core/sound.c index 89780c323f19..70ccdab74153 100644 --- a/trunk/sound/core/sound.c +++ b/trunk/sound/core/sound.c @@ -114,7 +114,7 @@ void *snd_lookup_minor_data(unsigned int minor, int type) mreg = snd_minors[minor]; if (mreg && mreg->type == type) { private_data = mreg->private_data; - if (mreg->card_ptr) + if (private_data && mreg->card_ptr) atomic_inc(&mreg->card_ptr->refcount); } else private_data = NULL; diff --git a/trunk/sound/core/sound_oss.c b/trunk/sound/core/sound_oss.c index e1d79ee35906..726a49ac9725 100644 --- a/trunk/sound/core/sound_oss.c +++ b/trunk/sound/core/sound_oss.c @@ -54,7 +54,7 @@ void *snd_lookup_oss_minor_data(unsigned int minor, int type) mreg = snd_oss_minors[minor]; if (mreg && mreg->type == type) { private_data = mreg->private_data; - if (mreg->card_ptr) + if (private_data && mreg->card_ptr) atomic_inc(&mreg->card_ptr->refcount); } else private_data = NULL; diff --git a/trunk/sound/i2c/other/ak4113.c b/trunk/sound/i2c/other/ak4113.c index ef68d710d08c..e04e750a77ed 100644 --- a/trunk/sound/i2c/other/ak4113.c +++ b/trunk/sound/i2c/other/ak4113.c @@ -426,7 +426,7 @@ static struct snd_kcontrol_new snd_ak4113_iec958_controls[] = { }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, - .name = "IEC958 Preample Capture Default", + .name = "IEC958 Preamble Capture Default", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4113_spdif_pinfo, diff --git a/trunk/sound/i2c/other/ak4114.c b/trunk/sound/i2c/other/ak4114.c index 816e7d225fb0..5bf4fca19e48 100644 --- a/trunk/sound/i2c/other/ak4114.c +++ b/trunk/sound/i2c/other/ak4114.c @@ -401,7 +401,7 @@ static struct snd_kcontrol_new snd_ak4114_iec958_controls[] = { }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, - .name = "IEC958 Preample Capture Default", + .name = "IEC958 Preamble Capture Default", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4114_spdif_pinfo, .get = snd_ak4114_spdif_pget, diff --git a/trunk/sound/i2c/other/ak4117.c b/trunk/sound/i2c/other/ak4117.c index b4b2a51fc117..40e33c9f2b09 100644 --- a/trunk/sound/i2c/other/ak4117.c +++ b/trunk/sound/i2c/other/ak4117.c @@ -380,7 +380,7 @@ static struct snd_kcontrol_new snd_ak4117_iec958_controls[] = { }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, - .name = "IEC958 Preample Capture Default", + .name = "IEC958 Preamble Capture Default", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_ak4117_spdif_pinfo, .get = snd_ak4117_spdif_pget, diff --git a/trunk/sound/pci/es1968.c b/trunk/sound/pci/es1968.c index 5d0e568fdea1..7266020c16cb 100644 --- a/trunk/sound/pci/es1968.c +++ b/trunk/sound/pci/es1968.c @@ -2581,9 +2581,14 @@ static u8 snd_es1968_tea575x_get_pins(struct snd_tea575x *tea) struct es1968 *chip = tea->private_data; unsigned long io = chip->io_port + GPIO_DATA; u16 val = inw(io); + u8 ret; - return (val & STR_DATA) ? TEA575X_DATA : 0 | - (val & STR_MOST) ? TEA575X_MOST : 0; + ret = 0; + if (val & STR_DATA) + ret |= TEA575X_DATA; + if (val & STR_MOST) + ret |= TEA575X_MOST; + return ret; } static void snd_es1968_tea575x_set_direction(struct snd_tea575x *tea, bool output) @@ -2655,6 +2660,8 @@ static struct ess_device_list pm_whitelist[] __devinitdata = { { TYPE_MAESTRO2E, 0x1179 }, { TYPE_MAESTRO2E, 0x14c0 }, /* HP omnibook 4150 */ { TYPE_MAESTRO2E, 0x1558 }, + { TYPE_MAESTRO2E, 0x125d }, /* a PCI card, e.g. Terratec DMX */ + { TYPE_MAESTRO2, 0x125d }, /* a PCI card, e.g. SF64-PCE2 */ }; static struct ess_device_list mpu_blacklist[] __devinitdata = { diff --git a/trunk/sound/pci/fm801.c b/trunk/sound/pci/fm801.c index cc2e91d15538..c5806f89be1e 100644 --- a/trunk/sound/pci/fm801.c +++ b/trunk/sound/pci/fm801.c @@ -767,9 +767,14 @@ static u8 snd_fm801_tea575x_get_pins(struct snd_tea575x *tea) struct fm801 *chip = tea->private_data; unsigned short reg = inw(FM801_REG(chip, GPIO_CTRL)); struct snd_fm801_tea575x_gpio gpio = *get_tea575x_gpio(chip); - - return (reg & FM801_GPIO_GP(gpio.data)) ? TEA575X_DATA : 0 | - (reg & FM801_GPIO_GP(gpio.most)) ? TEA575X_MOST : 0; + u8 ret; + + ret = 0; + if (reg & FM801_GPIO_GP(gpio.data)) + ret |= TEA575X_DATA; + if (reg & FM801_GPIO_GP(gpio.most)) + ret |= TEA575X_MOST; + return ret; } static void snd_fm801_tea575x_set_direction(struct snd_tea575x *tea, bool output) diff --git a/trunk/sound/pci/hda/hda_codec.c b/trunk/sound/pci/hda/hda_codec.c index 70d4848b5cd0..d010de12335e 100644 --- a/trunk/sound/pci/hda/hda_codec.c +++ b/trunk/sound/pci/hda/hda_codec.c @@ -95,6 +95,7 @@ int snd_hda_delete_codec_preset(struct hda_codec_preset_list *preset) EXPORT_SYMBOL_HDA(snd_hda_delete_codec_preset); #ifdef CONFIG_PM +#define codec_in_pm(codec) ((codec)->in_pm) static void hda_power_work(struct work_struct *work); static void hda_keep_power_on(struct hda_codec *codec); #define hda_codec_is_power_on(codec) ((codec)->power_on) @@ -104,6 +105,7 @@ static inline void hda_call_pm_notify(struct hda_bus *bus, bool power_up) bus->ops.pm_notify(bus, power_up); } #else +#define codec_in_pm(codec) 0 static inline void hda_keep_power_on(struct hda_codec *codec) {} #define hda_codec_is_power_on(codec) 1 #define hda_call_pm_notify(bus, state) {} @@ -228,7 +230,7 @@ static int codec_exec_verb(struct hda_codec *codec, unsigned int cmd, } mutex_unlock(&bus->cmd_mutex); snd_hda_power_down(codec); - if (res && *res == -1 && bus->rirb_error) { + if (!codec_in_pm(codec) && res && *res == -1 && bus->rirb_error) { if (bus->response_reset) { snd_printd("hda_codec: resetting BUS due to " "fatal communication error\n"); @@ -238,7 +240,7 @@ static int codec_exec_verb(struct hda_codec *codec, unsigned int cmd, goto again; } /* clear reset-flag when the communication gets recovered */ - if (!err) + if (!err || codec_in_pm(codec)) bus->response_reset = 0; return err; } @@ -3616,6 +3618,8 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec, bool in_wq) { unsigned int state; + codec->in_pm = 1; + if (codec->patch_ops.suspend) codec->patch_ops.suspend(codec); hda_cleanup_all_streams(codec); @@ -3630,6 +3634,7 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec, bool in_wq) codec->power_transition = 0; codec->power_jiffies = jiffies; spin_unlock(&codec->power_lock); + codec->in_pm = 0; return state; } @@ -3638,6 +3643,8 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec, bool in_wq) */ static void hda_call_codec_resume(struct hda_codec *codec) { + codec->in_pm = 1; + /* set as if powered on for avoiding re-entering the resume * in the resume / power-save sequence */ @@ -3656,6 +3663,8 @@ static void hda_call_codec_resume(struct hda_codec *codec) snd_hda_codec_resume_cache(codec); } snd_hda_jack_report_sync(codec); + + codec->in_pm = 0; snd_hda_power_down(codec); /* flag down before returning */ } #endif /* CONFIG_PM */ diff --git a/trunk/sound/pci/hda/hda_codec.h b/trunk/sound/pci/hda/hda_codec.h index 507fe8a917b6..4f4e545c0f4b 100644 --- a/trunk/sound/pci/hda/hda_codec.h +++ b/trunk/sound/pci/hda/hda_codec.h @@ -869,6 +869,7 @@ struct hda_codec { unsigned int power_on :1; /* current (global) power-state */ unsigned int d3_stop_clk:1; /* support D3 operation without BCLK */ unsigned int pm_down_notified:1; /* PM notified to controller */ + unsigned int in_pm:1; /* suspend/resume being performed */ int power_transition; /* power-state in transition */ int power_count; /* current (global) power refcount */ struct delayed_work power_work; /* delayed task for powerdown */ diff --git a/trunk/sound/pci/hda/hda_intel.c b/trunk/sound/pci/hda/hda_intel.c index 72b085ae7d46..f9d870e554d9 100644 --- a/trunk/sound/pci/hda/hda_intel.c +++ b/trunk/sound/pci/hda/hda_intel.c @@ -556,6 +556,12 @@ enum { #define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */ #define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */ #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ +#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ + +/* quirks for Intel PCH */ +#define AZX_DCAPS_INTEL_PCH \ + (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \ + AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME) /* quirks for ATI SB / AMD Hudson */ #define AZX_DCAPS_PRESET_ATI_SB \ @@ -2433,6 +2439,9 @@ static void azx_power_notify(struct hda_bus *bus, bool power_up) { struct azx *chip = bus->private_data; + if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) + return; + if (power_up) pm_runtime_get_sync(&chip->pci->dev); else @@ -2548,7 +2557,8 @@ static int azx_runtime_suspend(struct device *dev) struct snd_card *card = dev_get_drvdata(dev); struct azx *chip = card->private_data; - if (!power_save_controller) + if (!power_save_controller || + !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) return -EAGAIN; azx_stop_chip(chip); @@ -3429,39 +3439,30 @@ static void __devexit azx_remove(struct pci_dev *pci) static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { /* CPT */ { PCI_DEVICE(0x8086, 0x1c20), - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* PBG */ { PCI_DEVICE(0x8086, 0x1d20), - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE}, + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* Panther Point */ { PCI_DEVICE(0x8086, 0x1e20), - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* Lynx Point */ { PCI_DEVICE(0x8086, 0x8c20), - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* Lynx Point-LP */ { PCI_DEVICE(0x8086, 0x9c20), - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* Lynx Point-LP */ { PCI_DEVICE(0x8086, 0x9c21), - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* Haswell */ { PCI_DEVICE(0x8086, 0x0c0c), - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, { PCI_DEVICE(0x8086, 0x0d0c), - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, /* 5 Series/3400 */ { PCI_DEVICE(0x8086, 0x3b56), - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, /* SCH */ { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | @@ -3563,6 +3564,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { /* Teradici */ { PCI_DEVICE(0x6549, 0x1200), .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT }, + { PCI_DEVICE(0x6549, 0x2200), + .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT }, /* Creative X-Fi (CA0110-IBG) */ /* CTHDA chips */ { PCI_DEVICE(0x1102, 0x0010), diff --git a/trunk/sound/pci/hda/patch_analog.c b/trunk/sound/pci/hda/patch_analog.c index cdd43eadbc67..1eeba7386666 100644 --- a/trunk/sound/pci/hda/patch_analog.c +++ b/trunk/sound/pci/hda/patch_analog.c @@ -545,6 +545,7 @@ static int ad198x_build_pcms(struct hda_codec *codec) if (spec->multiout.dig_out_nid) { info++; codec->num_pcms++; + codec->spdif_status_reset = 1; info->name = "AD198x Digital"; info->pcm_type = HDA_PCM_TYPE_SPDIF; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ad198x_pcm_digital_playback; diff --git a/trunk/sound/pci/hda/patch_cirrus.c b/trunk/sound/pci/hda/patch_cirrus.c index 61a71131711c..3bcb67172358 100644 --- a/trunk/sound/pci/hda/patch_cirrus.c +++ b/trunk/sound/pci/hda/patch_cirrus.c @@ -101,8 +101,8 @@ enum { #define CS420X_VENDOR_NID 0x11 #define CS_DIG_OUT1_PIN_NID 0x10 #define CS_DIG_OUT2_PIN_NID 0x15 -#define CS_DMIC1_PIN_NID 0x12 -#define CS_DMIC2_PIN_NID 0x0e +#define CS_DMIC1_PIN_NID 0x0e +#define CS_DMIC2_PIN_NID 0x12 /* coef indices */ #define IDX_SPDIF_STAT 0x0000 @@ -466,6 +466,7 @@ static int parse_output(struct hda_codec *codec) memcpy(cfg->speaker_pins, cfg->line_out_pins, sizeof(cfg->speaker_pins)); cfg->line_outs = 0; + memset(cfg->line_out_pins, 0, sizeof(cfg->line_out_pins)); } return 0; @@ -1079,14 +1080,18 @@ static void init_input(struct hda_codec *codec) cs_automic(codec, NULL); coef = 0x000a; /* ADC1/2 - Digital and Analog Soft Ramp */ + cs_vendor_coef_set(codec, IDX_ADC_CFG, coef); + + coef = cs_vendor_coef_get(codec, IDX_BEEP_CFG); if (is_active_pin(codec, CS_DMIC2_PIN_NID)) - coef |= 0x0500; /* DMIC2 2 chan on, GPIO1 off */ + coef |= 1 << 4; /* DMIC2 2 chan on, GPIO1 off */ if (is_active_pin(codec, CS_DMIC1_PIN_NID)) - coef |= 0x1800; /* DMIC1 2 chan on, GPIO0 off + coef |= 1 << 3; /* DMIC1 2 chan on, GPIO0 off * No effect if SPDIF_OUT2 is * selected in IDX_SPDIF_CTL. */ - cs_vendor_coef_set(codec, IDX_ADC_CFG, coef); + + cs_vendor_coef_set(codec, IDX_BEEP_CFG, coef); } else { if (spec->mic_detect) cs_automic(codec, NULL); @@ -1107,7 +1112,7 @@ static const struct hda_verb cs_coef_init_verbs[] = { | 0x0400 /* Disable Coefficient Auto increment */ )}, /* Beep */ - {0x11, AC_VERB_SET_COEF_INDEX, IDX_DAC_CFG}, + {0x11, AC_VERB_SET_COEF_INDEX, IDX_BEEP_CFG}, {0x11, AC_VERB_SET_PROC_COEF, 0x0007}, /* Enable Beep thru DAC1/2/3 */ {} /* terminator */ @@ -1728,8 +1733,7 @@ static int cs421x_mux_enum_put(struct snd_kcontrol *kcontrol, } -static struct snd_kcontrol_new cs421x_capture_source = { - +static const struct snd_kcontrol_new cs421x_capture_source = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Source", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, @@ -1946,7 +1950,7 @@ static int cs421x_suspend(struct hda_codec *codec) } #endif -static struct hda_codec_ops cs421x_patch_ops = { +static const struct hda_codec_ops cs421x_patch_ops = { .build_controls = cs421x_build_controls, .build_pcms = cs_build_pcms, .init = cs421x_init, diff --git a/trunk/sound/pci/hda/patch_realtek.c b/trunk/sound/pci/hda/patch_realtek.c index f7397ad02a0d..ad68d223f8af 100644 --- a/trunk/sound/pci/hda/patch_realtek.c +++ b/trunk/sound/pci/hda/patch_realtek.c @@ -5407,6 +5407,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO), + SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), @@ -5840,7 +5841,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec) return alc_parse_auto_config(codec, alc269_ignore, ssids); } -static void alc269_toggle_power_output(struct hda_codec *codec, int power_up) +static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up) { int val = alc_read_coef_idx(codec, 0x04); if (power_up) @@ -5857,10 +5858,10 @@ static void alc269_shutup(struct hda_codec *codec) if (spec->codec_variant != ALC269_TYPE_ALC269VB) return; - if ((alc_get_coef0(codec) & 0x00ff) == 0x017) - alc269_toggle_power_output(codec, 0); - if ((alc_get_coef0(codec) & 0x00ff) == 0x018) { - alc269_toggle_power_output(codec, 0); + if (spec->codec_variant == ALC269_TYPE_ALC269VB) + alc269vb_toggle_power_output(codec, 0); + if (spec->codec_variant == ALC269_TYPE_ALC269VB && + (alc_get_coef0(codec) & 0x00ff) == 0x018) { msleep(150); } } @@ -5870,24 +5871,22 @@ static int alc269_resume(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; - if (spec->codec_variant == ALC269_TYPE_ALC269VB || + if (spec->codec_variant == ALC269_TYPE_ALC269VB) + alc269vb_toggle_power_output(codec, 0); + if (spec->codec_variant == ALC269_TYPE_ALC269VB && (alc_get_coef0(codec) & 0x00ff) == 0x018) { - alc269_toggle_power_output(codec, 0); msleep(150); } codec->patch_ops.init(codec); - if (spec->codec_variant == ALC269_TYPE_ALC269VB || + if (spec->codec_variant == ALC269_TYPE_ALC269VB) + alc269vb_toggle_power_output(codec, 1); + if (spec->codec_variant == ALC269_TYPE_ALC269VB && (alc_get_coef0(codec) & 0x00ff) == 0x017) { - alc269_toggle_power_output(codec, 1); msleep(200); } - if (spec->codec_variant == ALC269_TYPE_ALC269VB || - (alc_get_coef0(codec) & 0x00ff) == 0x018) - alc269_toggle_power_output(codec, 1); - snd_hda_codec_resume_amp(codec); snd_hda_codec_resume_cache(codec); hda_call_check_power_status(codec, 0x01); @@ -7066,6 +7065,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 }, { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 }, { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 }, + { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 }, { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660", .patch = patch_alc861 }, { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd }, @@ -7079,6 +7079,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { .patch = patch_alc662 }, { .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 }, { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 }, + { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 }, { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 }, { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 }, { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 }, @@ -7096,6 +7097,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { { .id = 0x10ec0889, .name = "ALC889", .patch = patch_alc882 }, { .id = 0x10ec0892, .name = "ALC892", .patch = patch_alc662 }, { .id = 0x10ec0899, .name = "ALC898", .patch = patch_alc882 }, + { .id = 0x10ec0900, .name = "ALC1150", .patch = patch_alc882 }, {} /* terminator */ }; diff --git a/trunk/sound/pci/hda/patch_via.c b/trunk/sound/pci/hda/patch_via.c index 72a2f60b087c..019e1a00414a 100644 --- a/trunk/sound/pci/hda/patch_via.c +++ b/trunk/sound/pci/hda/patch_via.c @@ -1809,11 +1809,11 @@ static int via_auto_fill_dac_nids(struct hda_codec *codec) { struct via_spec *spec = codec->spec; const struct auto_pin_cfg *cfg = &spec->autocfg; - int i, dac_num; + int i; hda_nid_t nid; + spec->multiout.num_dacs = 0; spec->multiout.dac_nids = spec->private_dac_nids; - dac_num = 0; for (i = 0; i < cfg->line_outs; i++) { hda_nid_t dac = 0; nid = cfg->line_out_pins[i]; @@ -1824,16 +1824,13 @@ static int via_auto_fill_dac_nids(struct hda_codec *codec) if (!i && parse_output_path(codec, nid, dac, 1, &spec->out_mix_path)) dac = spec->out_mix_path.path[0]; - if (dac) { - spec->private_dac_nids[i] = dac; - dac_num++; - } + if (dac) + spec->private_dac_nids[spec->multiout.num_dacs++] = dac; } if (!spec->out_path[0].depth && spec->out_mix_path.depth) { spec->out_path[0] = spec->out_mix_path; spec->out_mix_path.depth = 0; } - spec->multiout.num_dacs = dac_num; return 0; } @@ -3628,6 +3625,7 @@ static void set_widgets_power_state_vt2002P(struct hda_codec *codec) */ enum { VIA_FIXUP_INTMIC_BOOST, + VIA_FIXUP_ASUS_G75, }; static void via_fixup_intmic_boost(struct hda_codec *codec, @@ -3642,13 +3640,35 @@ static const struct hda_fixup via_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = via_fixup_intmic_boost, }, + [VIA_FIXUP_ASUS_G75] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + /* set 0x24 and 0x33 as speakers */ + { 0x24, 0x991301f0 }, + { 0x33, 0x991301f1 }, /* subwoofer */ + { } + } + }, }; static const struct snd_pci_quirk vt2002p_fixups[] = { + SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75), SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST), {} }; +/* NIDs 0x24 and 0x33 on VT1802 have connections to non-existing NID 0x3e + * Replace this with mixer NID 0x1c + */ +static void fix_vt1802_connections(struct hda_codec *codec) +{ + static hda_nid_t conn_24[] = { 0x14, 0x1c }; + static hda_nid_t conn_33[] = { 0x1c }; + + snd_hda_override_conn_list(codec, 0x24, ARRAY_SIZE(conn_24), conn_24); + snd_hda_override_conn_list(codec, 0x33, ARRAY_SIZE(conn_33), conn_33); +} + /* patch for vt2002P */ static int patch_vt2002P(struct hda_codec *codec) { @@ -3663,6 +3683,8 @@ static int patch_vt2002P(struct hda_codec *codec) spec->aa_mix_nid = 0x21; override_mic_boost(codec, 0x2b, 0, 3, 40); override_mic_boost(codec, 0x29, 0, 3, 40); + if (spec->codec_type == VT1802) + fix_vt1802_connections(codec); add_secret_dac_path(codec); snd_hda_pick_fixup(codec, NULL, vt2002p_fixups, via_fixups); diff --git a/trunk/sound/pci/rme9652/hdspm.c b/trunk/sound/pci/rme9652/hdspm.c index f1cd1e387801..748e36c66603 100644 --- a/trunk/sound/pci/rme9652/hdspm.c +++ b/trunk/sound/pci/rme9652/hdspm.c @@ -3979,7 +3979,8 @@ static int snd_hdspm_get_sync_check(struct snd_kcontrol *kcontrol, case 8: /* SYNC IN */ val = hdspm_sync_in_sync_check(hdspm); break; default: - val = hdspm_s1_sync_check(hdspm, ucontrol->id.index-1); + val = hdspm_s1_sync_check(hdspm, + kcontrol->private_value-1); } break; @@ -4899,7 +4900,7 @@ snd_hdspm_proc_read_madi(struct snd_info_entry * entry, insel = "Coaxial"; break; default: - insel = "Unkown"; + insel = "Unknown"; } snd_iprintf(buffer, diff --git a/trunk/sound/soc/codecs/arizona.c b/trunk/sound/soc/codecs/arizona.c index c03b65af3059..054967d8bac2 100644 --- a/trunk/sound/soc/codecs/arizona.c +++ b/trunk/sound/soc/codecs/arizona.c @@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(arizona_out_ev); static unsigned int arizona_sysclk_48k_rates[] = { 6144000, 12288000, - 22579200, + 24576000, 49152000, 73728000, 98304000, @@ -278,7 +278,7 @@ static unsigned int arizona_sysclk_48k_rates[] = { static unsigned int arizona_sysclk_44k1_rates[] = { 5644800, 11289600, - 24576000, + 22579200, 45158400, 67737600, 90316800, diff --git a/trunk/sound/soc/codecs/cs4271.c b/trunk/sound/soc/codecs/cs4271.c index f994af34f552..e3f0a7f3131e 100644 --- a/trunk/sound/soc/codecs/cs4271.c +++ b/trunk/sound/soc/codecs/cs4271.c @@ -485,7 +485,7 @@ static int cs4271_probe(struct snd_soc_codec *codec) gpio_nreset = cs4271plat->gpio_nreset; if (gpio_nreset >= 0) - if (gpio_request(gpio_nreset, "CS4271 Reset")) + if (devm_gpio_request(codec->dev, gpio_nreset, "CS4271 Reset")) gpio_nreset = -EINVAL; if (gpio_nreset >= 0) { /* Reset codec */ @@ -535,15 +535,10 @@ static int cs4271_probe(struct snd_soc_codec *codec) static int cs4271_remove(struct snd_soc_codec *codec) { struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); - int gpio_nreset; - gpio_nreset = cs4271->gpio_nreset; - - if (gpio_is_valid(gpio_nreset)) { + if (gpio_is_valid(cs4271->gpio_nreset)) /* Set codec to the reset state */ - gpio_set_value(gpio_nreset, 0); - gpio_free(gpio_nreset); - } + gpio_set_value(cs4271->gpio_nreset, 0); return 0; }; diff --git a/trunk/sound/soc/codecs/cs42l52.c b/trunk/sound/soc/codecs/cs42l52.c index 61599298fb26..97a81051e88d 100644 --- a/trunk/sound/soc/codecs/cs42l52.c +++ b/trunk/sound/soc/codecs/cs42l52.c @@ -763,7 +763,7 @@ static int cs42l52_set_sysclk(struct snd_soc_dai *codec_dai, if ((freq >= CS42L52_MIN_CLK) && (freq <= CS42L52_MAX_CLK)) { cs42l52->sysclk = freq; } else { - dev_err(codec->dev, "Invalid freq paramter\n"); + dev_err(codec->dev, "Invalid freq parameter\n"); return -EINVAL; } return 0; @@ -773,7 +773,6 @@ static int cs42l52_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; struct cs42l52_private *cs42l52 = snd_soc_codec_get_drvdata(codec); - int ret = 0; u8 iface = 0; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { @@ -822,7 +821,7 @@ static int cs42l52_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) case SND_SOC_DAIFMT_NB_IF: break; default: - ret = -EINVAL; + return -EINVAL; } cs42l52->config.format = iface; snd_soc_write(codec, CS42L52_IFACE_CTL1, cs42l52->config.format); diff --git a/trunk/sound/soc/codecs/wm5102.c b/trunk/sound/soc/codecs/wm5102.c index 1722b586bdba..7394e73fa43c 100644 --- a/trunk/sound/soc/codecs/wm5102.c +++ b/trunk/sound/soc/codecs/wm5102.c @@ -42,6 +42,556 @@ static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0); static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0); +static const struct reg_default wm5102_sysclk_reva_patch[] = { + { 0x3000, 0x2225 }, + { 0x3001, 0x3a03 }, + { 0x3002, 0x0225 }, + { 0x3003, 0x0801 }, + { 0x3004, 0x6249 }, + { 0x3005, 0x0c04 }, + { 0x3006, 0x0225 }, + { 0x3007, 0x5901 }, + { 0x3008, 0xe249 }, + { 0x3009, 0x030d }, + { 0x300a, 0x0249 }, + { 0x300b, 0x2c01 }, + { 0x300c, 0xe249 }, + { 0x300d, 0x4342 }, + { 0x300e, 0xe249 }, + { 0x300f, 0x73c0 }, + { 0x3010, 0x4249 }, + { 0x3011, 0x0c00 }, + { 0x3012, 0x0225 }, + { 0x3013, 0x1f01 }, + { 0x3014, 0x0225 }, + { 0x3015, 0x1e01 }, + { 0x3016, 0x0225 }, + { 0x3017, 0xfa00 }, + { 0x3018, 0x0000 }, + { 0x3019, 0xf000 }, + { 0x301a, 0x0000 }, + { 0x301b, 0xf000 }, + { 0x301c, 0x0000 }, + { 0x301d, 0xf000 }, + { 0x301e, 0x0000 }, + { 0x301f, 0xf000 }, + { 0x3020, 0x0000 }, + { 0x3021, 0xf000 }, + { 0x3022, 0x0000 }, + { 0x3023, 0xf000 }, + { 0x3024, 0x0000 }, + { 0x3025, 0xf000 }, + { 0x3026, 0x0000 }, + { 0x3027, 0xf000 }, + { 0x3028, 0x0000 }, + { 0x3029, 0xf000 }, + { 0x302a, 0x0000 }, + { 0x302b, 0xf000 }, + { 0x302c, 0x0000 }, + { 0x302d, 0xf000 }, + { 0x302e, 0x0000 }, + { 0x302f, 0xf000 }, + { 0x3030, 0x0225 }, + { 0x3031, 0x1a01 }, + { 0x3032, 0x0225 }, + { 0x3033, 0x1e00 }, + { 0x3034, 0x0225 }, + { 0x3035, 0x1f00 }, + { 0x3036, 0x6225 }, + { 0x3037, 0xf800 }, + { 0x3038, 0x0000 }, + { 0x3039, 0xf000 }, + { 0x303a, 0x0000 }, + { 0x303b, 0xf000 }, + { 0x303c, 0x0000 }, + { 0x303d, 0xf000 }, + { 0x303e, 0x0000 }, + { 0x303f, 0xf000 }, + { 0x3040, 0x2226 }, + { 0x3041, 0x3a03 }, + { 0x3042, 0x0226 }, + { 0x3043, 0x0801 }, + { 0x3044, 0x6249 }, + { 0x3045, 0x0c06 }, + { 0x3046, 0x0226 }, + { 0x3047, 0x5901 }, + { 0x3048, 0xe249 }, + { 0x3049, 0x030d }, + { 0x304a, 0x0249 }, + { 0x304b, 0x2c01 }, + { 0x304c, 0xe249 }, + { 0x304d, 0x4342 }, + { 0x304e, 0xe249 }, + { 0x304f, 0x73c0 }, + { 0x3050, 0x4249 }, + { 0x3051, 0x0c00 }, + { 0x3052, 0x0226 }, + { 0x3053, 0x1f01 }, + { 0x3054, 0x0226 }, + { 0x3055, 0x1e01 }, + { 0x3056, 0x0226 }, + { 0x3057, 0xfa00 }, + { 0x3058, 0x0000 }, + { 0x3059, 0xf000 }, + { 0x305a, 0x0000 }, + { 0x305b, 0xf000 }, + { 0x305c, 0x0000 }, + { 0x305d, 0xf000 }, + { 0x305e, 0x0000 }, + { 0x305f, 0xf000 }, + { 0x3060, 0x0000 }, + { 0x3061, 0xf000 }, + { 0x3062, 0x0000 }, + { 0x3063, 0xf000 }, + { 0x3064, 0x0000 }, + { 0x3065, 0xf000 }, + { 0x3066, 0x0000 }, + { 0x3067, 0xf000 }, + { 0x3068, 0x0000 }, + { 0x3069, 0xf000 }, + { 0x306a, 0x0000 }, + { 0x306b, 0xf000 }, + { 0x306c, 0x0000 }, + { 0x306d, 0xf000 }, + { 0x306e, 0x0000 }, + { 0x306f, 0xf000 }, + { 0x3070, 0x0226 }, + { 0x3071, 0x1a01 }, + { 0x3072, 0x0226 }, + { 0x3073, 0x1e00 }, + { 0x3074, 0x0226 }, + { 0x3075, 0x1f00 }, + { 0x3076, 0x6226 }, + { 0x3077, 0xf800 }, + { 0x3078, 0x0000 }, + { 0x3079, 0xf000 }, + { 0x307a, 0x0000 }, + { 0x307b, 0xf000 }, + { 0x307c, 0x0000 }, + { 0x307d, 0xf000 }, + { 0x307e, 0x0000 }, + { 0x307f, 0xf000 }, + { 0x3080, 0x2227 }, + { 0x3081, 0x3a03 }, + { 0x3082, 0x0227 }, + { 0x3083, 0x0801 }, + { 0x3084, 0x6255 }, + { 0x3085, 0x0c04 }, + { 0x3086, 0x0227 }, + { 0x3087, 0x5901 }, + { 0x3088, 0xe255 }, + { 0x3089, 0x030d }, + { 0x308a, 0x0255 }, + { 0x308b, 0x2c01 }, + { 0x308c, 0xe255 }, + { 0x308d, 0x4342 }, + { 0x308e, 0xe255 }, + { 0x308f, 0x73c0 }, + { 0x3090, 0x4255 }, + { 0x3091, 0x0c00 }, + { 0x3092, 0x0227 }, + { 0x3093, 0x1f01 }, + { 0x3094, 0x0227 }, + { 0x3095, 0x1e01 }, + { 0x3096, 0x0227 }, + { 0x3097, 0xfa00 }, + { 0x3098, 0x0000 }, + { 0x3099, 0xf000 }, + { 0x309a, 0x0000 }, + { 0x309b, 0xf000 }, + { 0x309c, 0x0000 }, + { 0x309d, 0xf000 }, + { 0x309e, 0x0000 }, + { 0x309f, 0xf000 }, + { 0x30a0, 0x0000 }, + { 0x30a1, 0xf000 }, + { 0x30a2, 0x0000 }, + { 0x30a3, 0xf000 }, + { 0x30a4, 0x0000 }, + { 0x30a5, 0xf000 }, + { 0x30a6, 0x0000 }, + { 0x30a7, 0xf000 }, + { 0x30a8, 0x0000 }, + { 0x30a9, 0xf000 }, + { 0x30aa, 0x0000 }, + { 0x30ab, 0xf000 }, + { 0x30ac, 0x0000 }, + { 0x30ad, 0xf000 }, + { 0x30ae, 0x0000 }, + { 0x30af, 0xf000 }, + { 0x30b0, 0x0227 }, + { 0x30b1, 0x1a01 }, + { 0x30b2, 0x0227 }, + { 0x30b3, 0x1e00 }, + { 0x30b4, 0x0227 }, + { 0x30b5, 0x1f00 }, + { 0x30b6, 0x6227 }, + { 0x30b7, 0xf800 }, + { 0x30b8, 0x0000 }, + { 0x30b9, 0xf000 }, + { 0x30ba, 0x0000 }, + { 0x30bb, 0xf000 }, + { 0x30bc, 0x0000 }, + { 0x30bd, 0xf000 }, + { 0x30be, 0x0000 }, + { 0x30bf, 0xf000 }, + { 0x30c0, 0x2228 }, + { 0x30c1, 0x3a03 }, + { 0x30c2, 0x0228 }, + { 0x30c3, 0x0801 }, + { 0x30c4, 0x6255 }, + { 0x30c5, 0x0c06 }, + { 0x30c6, 0x0228 }, + { 0x30c7, 0x5901 }, + { 0x30c8, 0xe255 }, + { 0x30c9, 0x030d }, + { 0x30ca, 0x0255 }, + { 0x30cb, 0x2c01 }, + { 0x30cc, 0xe255 }, + { 0x30cd, 0x4342 }, + { 0x30ce, 0xe255 }, + { 0x30cf, 0x73c0 }, + { 0x30d0, 0x4255 }, + { 0x30d1, 0x0c00 }, + { 0x30d2, 0x0228 }, + { 0x30d3, 0x1f01 }, + { 0x30d4, 0x0228 }, + { 0x30d5, 0x1e01 }, + { 0x30d6, 0x0228 }, + { 0x30d7, 0xfa00 }, + { 0x30d8, 0x0000 }, + { 0x30d9, 0xf000 }, + { 0x30da, 0x0000 }, + { 0x30db, 0xf000 }, + { 0x30dc, 0x0000 }, + { 0x30dd, 0xf000 }, + { 0x30de, 0x0000 }, + { 0x30df, 0xf000 }, + { 0x30e0, 0x0000 }, + { 0x30e1, 0xf000 }, + { 0x30e2, 0x0000 }, + { 0x30e3, 0xf000 }, + { 0x30e4, 0x0000 }, + { 0x30e5, 0xf000 }, + { 0x30e6, 0x0000 }, + { 0x30e7, 0xf000 }, + { 0x30e8, 0x0000 }, + { 0x30e9, 0xf000 }, + { 0x30ea, 0x0000 }, + { 0x30eb, 0xf000 }, + { 0x30ec, 0x0000 }, + { 0x30ed, 0xf000 }, + { 0x30ee, 0x0000 }, + { 0x30ef, 0xf000 }, + { 0x30f0, 0x0228 }, + { 0x30f1, 0x1a01 }, + { 0x30f2, 0x0228 }, + { 0x30f3, 0x1e00 }, + { 0x30f4, 0x0228 }, + { 0x30f5, 0x1f00 }, + { 0x30f6, 0x6228 }, + { 0x30f7, 0xf800 }, + { 0x30f8, 0x0000 }, + { 0x30f9, 0xf000 }, + { 0x30fa, 0x0000 }, + { 0x30fb, 0xf000 }, + { 0x30fc, 0x0000 }, + { 0x30fd, 0xf000 }, + { 0x30fe, 0x0000 }, + { 0x30ff, 0xf000 }, + { 0x3100, 0x222b }, + { 0x3101, 0x3a03 }, + { 0x3102, 0x222b }, + { 0x3103, 0x5803 }, + { 0x3104, 0xe26f }, + { 0x3105, 0x030d }, + { 0x3106, 0x626f }, + { 0x3107, 0x2c01 }, + { 0x3108, 0xe26f }, + { 0x3109, 0x4342 }, + { 0x310a, 0xe26f }, + { 0x310b, 0x73c0 }, + { 0x310c, 0x026f }, + { 0x310d, 0x0c00 }, + { 0x310e, 0x022b }, + { 0x310f, 0x1f01 }, + { 0x3110, 0x022b }, + { 0x3111, 0x1e01 }, + { 0x3112, 0x022b }, + { 0x3113, 0xfa00 }, + { 0x3114, 0x0000 }, + { 0x3115, 0xf000 }, + { 0x3116, 0x0000 }, + { 0x3117, 0xf000 }, + { 0x3118, 0x0000 }, + { 0x3119, 0xf000 }, + { 0x311a, 0x0000 }, + { 0x311b, 0xf000 }, + { 0x311c, 0x0000 }, + { 0x311d, 0xf000 }, + { 0x311e, 0x0000 }, + { 0x311f, 0xf000 }, + { 0x3120, 0x022b }, + { 0x3121, 0x0a01 }, + { 0x3122, 0x022b }, + { 0x3123, 0x1e00 }, + { 0x3124, 0x022b }, + { 0x3125, 0x1f00 }, + { 0x3126, 0x622b }, + { 0x3127, 0xf800 }, + { 0x3128, 0x0000 }, + { 0x3129, 0xf000 }, + { 0x312a, 0x0000 }, + { 0x312b, 0xf000 }, + { 0x312c, 0x0000 }, + { 0x312d, 0xf000 }, + { 0x312e, 0x0000 }, + { 0x312f, 0xf000 }, + { 0x3130, 0x0000 }, + { 0x3131, 0xf000 }, + { 0x3132, 0x0000 }, + { 0x3133, 0xf000 }, + { 0x3134, 0x0000 }, + { 0x3135, 0xf000 }, + { 0x3136, 0x0000 }, + { 0x3137, 0xf000 }, + { 0x3138, 0x0000 }, + { 0x3139, 0xf000 }, + { 0x313a, 0x0000 }, + { 0x313b, 0xf000 }, + { 0x313c, 0x0000 }, + { 0x313d, 0xf000 }, + { 0x313e, 0x0000 }, + { 0x313f, 0xf000 }, + { 0x3140, 0x0000 }, + { 0x3141, 0xf000 }, + { 0x3142, 0x0000 }, + { 0x3143, 0xf000 }, + { 0x3144, 0x0000 }, + { 0x3145, 0xf000 }, + { 0x3146, 0x0000 }, + { 0x3147, 0xf000 }, + { 0x3148, 0x0000 }, + { 0x3149, 0xf000 }, + { 0x314a, 0x0000 }, + { 0x314b, 0xf000 }, + { 0x314c, 0x0000 }, + { 0x314d, 0xf000 }, + { 0x314e, 0x0000 }, + { 0x314f, 0xf000 }, + { 0x3150, 0x0000 }, + { 0x3151, 0xf000 }, + { 0x3152, 0x0000 }, + { 0x3153, 0xf000 }, + { 0x3154, 0x0000 }, + { 0x3155, 0xf000 }, + { 0x3156, 0x0000 }, + { 0x3157, 0xf000 }, + { 0x3158, 0x0000 }, + { 0x3159, 0xf000 }, + { 0x315a, 0x0000 }, + { 0x315b, 0xf000 }, + { 0x315c, 0x0000 }, + { 0x315d, 0xf000 }, + { 0x315e, 0x0000 }, + { 0x315f, 0xf000 }, + { 0x3160, 0x0000 }, + { 0x3161, 0xf000 }, + { 0x3162, 0x0000 }, + { 0x3163, 0xf000 }, + { 0x3164, 0x0000 }, + { 0x3165, 0xf000 }, + { 0x3166, 0x0000 }, + { 0x3167, 0xf000 }, + { 0x3168, 0x0000 }, + { 0x3169, 0xf000 }, + { 0x316a, 0x0000 }, + { 0x316b, 0xf000 }, + { 0x316c, 0x0000 }, + { 0x316d, 0xf000 }, + { 0x316e, 0x0000 }, + { 0x316f, 0xf000 }, + { 0x3170, 0x0000 }, + { 0x3171, 0xf000 }, + { 0x3172, 0x0000 }, + { 0x3173, 0xf000 }, + { 0x3174, 0x0000 }, + { 0x3175, 0xf000 }, + { 0x3176, 0x0000 }, + { 0x3177, 0xf000 }, + { 0x3178, 0x0000 }, + { 0x3179, 0xf000 }, + { 0x317a, 0x0000 }, + { 0x317b, 0xf000 }, + { 0x317c, 0x0000 }, + { 0x317d, 0xf000 }, + { 0x317e, 0x0000 }, + { 0x317f, 0xf000 }, + { 0x3180, 0x2001 }, + { 0x3181, 0xf101 }, + { 0x3182, 0x0000 }, + { 0x3183, 0xf000 }, + { 0x3184, 0x0000 }, + { 0x3185, 0xf000 }, + { 0x3186, 0x0000 }, + { 0x3187, 0xf000 }, + { 0x3188, 0x0000 }, + { 0x3189, 0xf000 }, + { 0x318a, 0x0000 }, + { 0x318b, 0xf000 }, + { 0x318c, 0x0000 }, + { 0x318d, 0xf000 }, + { 0x318e, 0x0000 }, + { 0x318f, 0xf000 }, + { 0x3190, 0x0000 }, + { 0x3191, 0xf000 }, + { 0x3192, 0x0000 }, + { 0x3193, 0xf000 }, + { 0x3194, 0x0000 }, + { 0x3195, 0xf000 }, + { 0x3196, 0x0000 }, + { 0x3197, 0xf000 }, + { 0x3198, 0x0000 }, + { 0x3199, 0xf000 }, + { 0x319a, 0x0000 }, + { 0x319b, 0xf000 }, + { 0x319c, 0x0000 }, + { 0x319d, 0xf000 }, + { 0x319e, 0x0000 }, + { 0x319f, 0xf000 }, + { 0x31a0, 0x0000 }, + { 0x31a1, 0xf000 }, + { 0x31a2, 0x0000 }, + { 0x31a3, 0xf000 }, + { 0x31a4, 0x0000 }, + { 0x31a5, 0xf000 }, + { 0x31a6, 0x0000 }, + { 0x31a7, 0xf000 }, + { 0x31a8, 0x0000 }, + { 0x31a9, 0xf000 }, + { 0x31aa, 0x0000 }, + { 0x31ab, 0xf000 }, + { 0x31ac, 0x0000 }, + { 0x31ad, 0xf000 }, + { 0x31ae, 0x0000 }, + { 0x31af, 0xf000 }, + { 0x31b0, 0x0000 }, + { 0x31b1, 0xf000 }, + { 0x31b2, 0x0000 }, + { 0x31b3, 0xf000 }, + { 0x31b4, 0x0000 }, + { 0x31b5, 0xf000 }, + { 0x31b6, 0x0000 }, + { 0x31b7, 0xf000 }, + { 0x31b8, 0x0000 }, + { 0x31b9, 0xf000 }, + { 0x31ba, 0x0000 }, + { 0x31bb, 0xf000 }, + { 0x31bc, 0x0000 }, + { 0x31bd, 0xf000 }, + { 0x31be, 0x0000 }, + { 0x31bf, 0xf000 }, + { 0x31c0, 0x0000 }, + { 0x31c1, 0xf000 }, + { 0x31c2, 0x0000 }, + { 0x31c3, 0xf000 }, + { 0x31c4, 0x0000 }, + { 0x31c5, 0xf000 }, + { 0x31c6, 0x0000 }, + { 0x31c7, 0xf000 }, + { 0x31c8, 0x0000 }, + { 0x31c9, 0xf000 }, + { 0x31ca, 0x0000 }, + { 0x31cb, 0xf000 }, + { 0x31cc, 0x0000 }, + { 0x31cd, 0xf000 }, + { 0x31ce, 0x0000 }, + { 0x31cf, 0xf000 }, + { 0x31d0, 0x0000 }, + { 0x31d1, 0xf000 }, + { 0x31d2, 0x0000 }, + { 0x31d3, 0xf000 }, + { 0x31d4, 0x0000 }, + { 0x31d5, 0xf000 }, + { 0x31d6, 0x0000 }, + { 0x31d7, 0xf000 }, + { 0x31d8, 0x0000 }, + { 0x31d9, 0xf000 }, + { 0x31da, 0x0000 }, + { 0x31db, 0xf000 }, + { 0x31dc, 0x0000 }, + { 0x31dd, 0xf000 }, + { 0x31de, 0x0000 }, + { 0x31df, 0xf000 }, + { 0x31e0, 0x0000 }, + { 0x31e1, 0xf000 }, + { 0x31e2, 0x0000 }, + { 0x31e3, 0xf000 }, + { 0x31e4, 0x0000 }, + { 0x31e5, 0xf000 }, + { 0x31e6, 0x0000 }, + { 0x31e7, 0xf000 }, + { 0x31e8, 0x0000 }, + { 0x31e9, 0xf000 }, + { 0x31ea, 0x0000 }, + { 0x31eb, 0xf000 }, + { 0x31ec, 0x0000 }, + { 0x31ed, 0xf000 }, + { 0x31ee, 0x0000 }, + { 0x31ef, 0xf000 }, + { 0x31f0, 0x0000 }, + { 0x31f1, 0xf000 }, + { 0x31f2, 0x0000 }, + { 0x31f3, 0xf000 }, + { 0x31f4, 0x0000 }, + { 0x31f5, 0xf000 }, + { 0x31f6, 0x0000 }, + { 0x31f7, 0xf000 }, + { 0x31f8, 0x0000 }, + { 0x31f9, 0xf000 }, + { 0x31fa, 0x0000 }, + { 0x31fb, 0xf000 }, + { 0x31fc, 0x0000 }, + { 0x31fd, 0xf000 }, + { 0x31fe, 0x0000 }, + { 0x31ff, 0xf000 }, + { 0x024d, 0xff50 }, + { 0x0252, 0xff50 }, + { 0x0259, 0x0112 }, + { 0x025e, 0x0112 }, +}; + +static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct arizona *arizona = dev_get_drvdata(codec->dev); + struct regmap *regmap = codec->control_data; + const struct reg_default *patch = NULL; + int i, patch_size; + + switch (arizona->rev) { + case 0: + patch = wm5102_sysclk_reva_patch; + patch_size = ARRAY_SIZE(wm5102_sysclk_reva_patch); + break; + } + + switch (event) { + case SND_SOC_DAPM_POST_PMU: + if (patch) + for (i = 0; i < patch_size; i++) + regmap_write(regmap, patch[i].reg, + patch[i].def); + break; + + default: + break; + } + + return 0; +} + static const struct snd_kcontrol_new wm5102_snd_controls[] = { SOC_SINGLE("IN1 High Performance Switch", ARIZONA_IN1L_CONTROL, ARIZONA_IN1_OSR_SHIFT, 1, 0), @@ -297,7 +847,7 @@ static const struct snd_kcontrol_new wm5102_aec_loopback_mux = static const struct snd_soc_dapm_widget wm5102_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT, - 0, NULL, 0), + 0, wm5102_sysclk_ev, SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1, ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK, diff --git a/trunk/sound/soc/codecs/wm8978.c b/trunk/sound/soc/codecs/wm8978.c index 5421fd9fbcb5..4c0a8e496131 100644 --- a/trunk/sound/soc/codecs/wm8978.c +++ b/trunk/sound/soc/codecs/wm8978.c @@ -782,7 +782,7 @@ static int wm8978_hw_params(struct snd_pcm_substream *substream, wm8978->mclk_idx = -1; f_sel = wm8978->f_mclk; } else { - if (!wm8978->f_pllout) { + if (!wm8978->f_opclk) { /* We only enter here, if OPCLK is not used */ int ret = wm8978_configure_pll(codec); if (ret < 0) diff --git a/trunk/sound/soc/codecs/wm8994.c b/trunk/sound/soc/codecs/wm8994.c index 3fddc7ad1127..b2b2b37131bd 100644 --- a/trunk/sound/soc/codecs/wm8994.c +++ b/trunk/sound/soc/codecs/wm8994.c @@ -3722,7 +3722,7 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data) } while (count--); if (count == 0) - dev_warn(codec->dev, "No impedence range reported for jack\n"); + dev_warn(codec->dev, "No impedance range reported for jack\n"); #ifndef CONFIG_SND_SOC_WM8994_MODULE trace_snd_soc_jack_irq(dev_name(codec->dev)); diff --git a/trunk/sound/soc/kirkwood/kirkwood-dma.c b/trunk/sound/soc/kirkwood/kirkwood-dma.c index b9f16598324c..2ba08148655f 100644 --- a/trunk/sound/soc/kirkwood/kirkwood-dma.c +++ b/trunk/sound/soc/kirkwood/kirkwood-dma.c @@ -71,7 +71,6 @@ static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id) printk(KERN_WARNING "%s: got err interrupt 0x%lx\n", __func__, cause); writel(cause, priv->io + KIRKWOOD_ERR_CAUSE); - return IRQ_HANDLED; } /* we've enabled only bytes interrupts ... */ @@ -178,7 +177,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream) } dram = mv_mbus_dram_info(); - addr = virt_to_phys(substream->dma_buffer.area); + addr = substream->dma_buffer.addr; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { prdata->play_stream = substream; kirkwood_dma_conf_mbus_windows(priv->io, diff --git a/trunk/sound/soc/kirkwood/kirkwood-i2s.c b/trunk/sound/soc/kirkwood/kirkwood-i2s.c index 542538d10ab7..1d5db484d2df 100644 --- a/trunk/sound/soc/kirkwood/kirkwood-i2s.c +++ b/trunk/sound/soc/kirkwood/kirkwood-i2s.c @@ -95,7 +95,7 @@ static inline void kirkwood_set_dco(void __iomem *io, unsigned long rate) do { cpu_relax(); value = readl(io + KIRKWOOD_DCO_SPCR_STATUS); - value &= KIRKWOOD_DCO_SPCR_STATUS; + value &= KIRKWOOD_DCO_SPCR_STATUS_DCO_LOCK; } while (value == 0); } @@ -180,67 +180,72 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai); - unsigned long value; - - /* - * specs says KIRKWOOD_PLAYCTL must be read 2 times before - * changing it. So read 1 time here and 1 later. - */ - value = readl(priv->io + KIRKWOOD_PLAYCTL); + uint32_t ctl, value; + + ctl = readl(priv->io + KIRKWOOD_PLAYCTL); + if (ctl & KIRKWOOD_PLAYCTL_PAUSE) { + unsigned timeout = 5000; + /* + * The Armada510 spec says that if we enter pause mode, the + * busy bit must be read back as clear _twice_. Make sure + * we respect that otherwise we get DMA underruns. + */ + do { + value = ctl; + ctl = readl(priv->io + KIRKWOOD_PLAYCTL); + if (!((ctl | value) & KIRKWOOD_PLAYCTL_PLAY_BUSY)) + break; + udelay(1); + } while (timeout--); + + if ((ctl | value) & KIRKWOOD_PLAYCTL_PLAY_BUSY) + dev_notice(dai->dev, "timed out waiting for busy to deassert: %08x\n", + ctl); + } switch (cmd) { case SNDRV_PCM_TRIGGER_START: - /* stop audio, enable interrupts */ - value = readl(priv->io + KIRKWOOD_PLAYCTL); - value |= KIRKWOOD_PLAYCTL_PAUSE; - writel(value, priv->io + KIRKWOOD_PLAYCTL); - value = readl(priv->io + KIRKWOOD_INT_MASK); value |= KIRKWOOD_INT_CAUSE_PLAY_BYTES; writel(value, priv->io + KIRKWOOD_INT_MASK); /* configure audio & enable i2s playback */ - value = readl(priv->io + KIRKWOOD_PLAYCTL); - value &= ~KIRKWOOD_PLAYCTL_BURST_MASK; - value &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE + ctl &= ~KIRKWOOD_PLAYCTL_BURST_MASK; + ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE | KIRKWOOD_PLAYCTL_SPDIF_EN); if (priv->burst == 32) - value |= KIRKWOOD_PLAYCTL_BURST_32; + ctl |= KIRKWOOD_PLAYCTL_BURST_32; else - value |= KIRKWOOD_PLAYCTL_BURST_128; - value |= KIRKWOOD_PLAYCTL_I2S_EN; - writel(value, priv->io + KIRKWOOD_PLAYCTL); + ctl |= KIRKWOOD_PLAYCTL_BURST_128; + ctl |= KIRKWOOD_PLAYCTL_I2S_EN; + writel(ctl, priv->io + KIRKWOOD_PLAYCTL); break; case SNDRV_PCM_TRIGGER_STOP: /* stop audio, disable interrupts */ - value = readl(priv->io + KIRKWOOD_PLAYCTL); - value |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE; - writel(value, priv->io + KIRKWOOD_PLAYCTL); + ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE; + writel(ctl, priv->io + KIRKWOOD_PLAYCTL); value = readl(priv->io + KIRKWOOD_INT_MASK); value &= ~KIRKWOOD_INT_CAUSE_PLAY_BYTES; writel(value, priv->io + KIRKWOOD_INT_MASK); /* disable all playbacks */ - value = readl(priv->io + KIRKWOOD_PLAYCTL); - value &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN); - writel(value, priv->io + KIRKWOOD_PLAYCTL); + ctl &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN); + writel(ctl, priv->io + KIRKWOOD_PLAYCTL); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: - value = readl(priv->io + KIRKWOOD_PLAYCTL); - value |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE; - writel(value, priv->io + KIRKWOOD_PLAYCTL); + ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE; + writel(ctl, priv->io + KIRKWOOD_PLAYCTL); break; case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - value = readl(priv->io + KIRKWOOD_PLAYCTL); - value &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE); - writel(value, priv->io + KIRKWOOD_PLAYCTL); + ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE); + writel(ctl, priv->io + KIRKWOOD_PLAYCTL); break; default: @@ -260,11 +265,6 @@ static int kirkwood_i2s_rec_trigger(struct snd_pcm_substream *substream, switch (cmd) { case SNDRV_PCM_TRIGGER_START: - /* stop audio, enable interrupts */ - value = readl(priv->io + KIRKWOOD_RECCTL); - value |= KIRKWOOD_RECCTL_PAUSE; - writel(value, priv->io + KIRKWOOD_RECCTL); - value = readl(priv->io + KIRKWOOD_INT_MASK); value |= KIRKWOOD_INT_CAUSE_REC_BYTES; writel(value, priv->io + KIRKWOOD_INT_MASK); diff --git a/trunk/sound/soc/mxs/mxs-saif.c b/trunk/sound/soc/mxs/mxs-saif.c index aa037b292f3d..c294fbb523fc 100644 --- a/trunk/sound/soc/mxs/mxs-saif.c +++ b/trunk/sound/soc/mxs/mxs-saif.c @@ -523,16 +523,24 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd, if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { /* - * write a data to saif data register to trigger - * the transfer + * write data to saif data register to trigger + * the transfer. + * For 24-bit format the 32-bit FIFO register stores + * only one channel, so we need to write twice. + * This is also safe for the other non 24-bit formats. */ __raw_writel(0, saif->base + SAIF_DATA); + __raw_writel(0, saif->base + SAIF_DATA); } else { /* - * read a data from saif data register to trigger - * the receive + * read data from saif data register to trigger + * the receive. + * For 24-bit format the 32-bit FIFO register stores + * only one channel, so we need to read twice. + * This is also safe for the other non 24-bit formats. */ __raw_readl(saif->base + SAIF_DATA); + __raw_readl(saif->base + SAIF_DATA); } master_saif->ongoing = 1; @@ -812,3 +820,4 @@ module_platform_driver(mxs_saif_driver); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("MXS ASoC SAIF driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mxs-saif"); diff --git a/trunk/sound/soc/samsung/Kconfig b/trunk/sound/soc/samsung/Kconfig index e7b83179aca2..3c7c3a59ed39 100644 --- a/trunk/sound/soc/samsung/Kconfig +++ b/trunk/sound/soc/samsung/Kconfig @@ -207,6 +207,8 @@ config SND_SOC_BELLS select SND_SOC_WM5102 select SND_SOC_WM5110 select SND_SOC_WM9081 + select SND_SOC_WM0010 + select SND_SOC_WM1250_EV1 config SND_SOC_LOWLAND tristate "Audio support for Wolfson Lowland" diff --git a/trunk/sound/soc/samsung/bells.c b/trunk/sound/soc/samsung/bells.c index b0d46d63d55e..a2ca1567b9e4 100644 --- a/trunk/sound/soc/samsung/bells.c +++ b/trunk/sound/soc/samsung/bells.c @@ -212,7 +212,7 @@ static struct snd_soc_dai_link bells_dai_wm5102[] = { { .name = "Sub", .stream_name = "Sub", - .cpu_dai_name = "wm5110-aif3", + .cpu_dai_name = "wm5102-aif3", .codec_dai_name = "wm9081-hifi", .codec_name = "wm9081.1-006c", .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF diff --git a/trunk/sound/soc/soc-core.c b/trunk/sound/soc/soc-core.c index d1198627fc40..10d21be383f6 100644 --- a/trunk/sound/soc/soc-core.c +++ b/trunk/sound/soc/soc-core.c @@ -2786,8 +2786,9 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, val = (ucontrol->value.integer.value[0] + min) & mask; val = val << shift; - if (snd_soc_update_bits_locked(codec, reg, val_mask, val)) - return err; + err = snd_soc_update_bits_locked(codec, reg, val_mask, val); + if (err < 0) + return err; if (snd_soc_volsw_is_stereo(mc)) { val_mask = mask << rshift; diff --git a/trunk/sound/soc/soc-dapm.c b/trunk/sound/soc/soc-dapm.c index d0a4be38dc0f..6e35bcae02df 100644 --- a/trunk/sound/soc/soc-dapm.c +++ b/trunk/sound/soc/soc-dapm.c @@ -3745,7 +3745,7 @@ void snd_soc_dapm_shutdown(struct snd_soc_card *card) { struct snd_soc_codec *codec; - list_for_each_entry(codec, &card->codec_dev_list, list) { + list_for_each_entry(codec, &card->codec_dev_list, card_list) { soc_dapm_shutdown_codec(&codec->dapm); if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) snd_soc_dapm_set_bias_level(&codec->dapm, diff --git a/trunk/sound/usb/card.c b/trunk/sound/usb/card.c index 282f0fc9fed1..dbf7999d18b4 100644 --- a/trunk/sound/usb/card.c +++ b/trunk/sound/usb/card.c @@ -559,9 +559,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, return; card = chip->card; - mutex_lock(®ister_mutex); down_write(&chip->shutdown_rwsem); chip->shutdown = 1; + up_write(&chip->shutdown_rwsem); + + mutex_lock(®ister_mutex); chip->num_interfaces--; if (chip->num_interfaces <= 0) { snd_card_disconnect(card); @@ -582,11 +584,9 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, snd_usb_mixer_disconnect(p); } usb_chip[chip->index] = NULL; - up_write(&chip->shutdown_rwsem); mutex_unlock(®ister_mutex); snd_card_free_when_closed(card); } else { - up_write(&chip->shutdown_rwsem); mutex_unlock(®ister_mutex); } } diff --git a/trunk/sound/usb/endpoint.c b/trunk/sound/usb/endpoint.c index 7f78c6d782b0..34de6f2faf61 100644 --- a/trunk/sound/usb/endpoint.c +++ b/trunk/sound/usb/endpoint.c @@ -35,6 +35,7 @@ #define EP_FLAG_ACTIVATED 0 #define EP_FLAG_RUNNING 1 +#define EP_FLAG_STOPPING 2 /* * snd_usb_endpoint is a model that abstracts everything related to an @@ -502,10 +503,20 @@ static int wait_clear_urbs(struct snd_usb_endpoint *ep) if (alive) snd_printk(KERN_ERR "timeout: still %d active urbs on EP #%x\n", alive, ep->ep_num); + clear_bit(EP_FLAG_STOPPING, &ep->flags); return 0; } +/* sync the pending stop operation; + * this function itself doesn't trigger the stop operation + */ +void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep) +{ + if (ep && test_bit(EP_FLAG_STOPPING, &ep->flags)) + wait_clear_urbs(ep); +} + /* * unlink active urbs. */ @@ -918,6 +929,8 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep, if (wait) wait_clear_urbs(ep); + else + set_bit(EP_FLAG_STOPPING, &ep->flags); } } diff --git a/trunk/sound/usb/endpoint.h b/trunk/sound/usb/endpoint.h index 6376ccf10fd4..3d4c9705041f 100644 --- a/trunk/sound/usb/endpoint.h +++ b/trunk/sound/usb/endpoint.h @@ -19,6 +19,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep); void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep, int force, int can_sleep, int wait); +void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep); int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep); void snd_usb_endpoint_free(struct list_head *head); diff --git a/trunk/sound/usb/midi.c b/trunk/sound/usb/midi.c index c83f6143c0eb..eeefbce3873c 100644 --- a/trunk/sound/usb/midi.c +++ b/trunk/sound/usb/midi.c @@ -148,6 +148,7 @@ struct snd_usb_midi_out_endpoint { struct snd_usb_midi_out_endpoint* ep; struct snd_rawmidi_substream *substream; int active; + bool autopm_reference; uint8_t cable; /* cable number << 4 */ uint8_t state; #define STATE_UNKNOWN 0 @@ -1076,7 +1077,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) return -ENXIO; } err = usb_autopm_get_interface(umidi->iface); - if (err < 0) + port->autopm_reference = err >= 0; + if (err < 0 && err != -EACCES) return -EIO; substream->runtime->private_data = port; port->state = STATE_UNKNOWN; @@ -1087,9 +1089,11 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream) { struct snd_usb_midi* umidi = substream->rmidi->private_data; + struct usbmidi_out_port *port = substream->runtime->private_data; substream_open(substream, 0); - usb_autopm_put_interface(umidi->iface); + if (port->autopm_reference) + usb_autopm_put_interface(umidi->iface); return 0; } diff --git a/trunk/sound/usb/pcm.c b/trunk/sound/usb/pcm.c index 37428f74dbb6..ef6fa24fc473 100644 --- a/trunk/sound/usb/pcm.c +++ b/trunk/sound/usb/pcm.c @@ -459,7 +459,7 @@ static int configure_endpoint(struct snd_usb_substream *subs) return ret; if (subs->sync_endpoint) - ret = snd_usb_endpoint_set_params(subs->data_endpoint, + ret = snd_usb_endpoint_set_params(subs->sync_endpoint, subs->pcm_format, subs->channels, subs->period_bytes, @@ -568,6 +568,9 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream) goto unlock; } + snd_usb_endpoint_sync_pending_stop(subs->sync_endpoint); + snd_usb_endpoint_sync_pending_stop(subs->data_endpoint); + ret = set_format(subs, subs->cur_audiofmt); if (ret < 0) goto unlock; diff --git a/trunk/tools/power/x86/turbostat/turbostat.c b/trunk/tools/power/x86/turbostat/turbostat.c index 2655ae9a3ad8..ea095abbe97e 100644 --- a/trunk/tools/power/x86/turbostat/turbostat.c +++ b/trunk/tools/power/x86/turbostat/turbostat.c @@ -206,8 +206,10 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr) retval = pread(fd, msr, sizeof *msr, offset); close(fd); - if (retval != sizeof *msr) + if (retval != sizeof *msr) { + fprintf(stderr, "%s offset 0x%zx read failed\n", pathname, offset); return -1; + } return 0; } @@ -1101,7 +1103,9 @@ void turbostat_loop() restart: retval = for_all_cpus(get_counters, EVEN_COUNTERS); - if (retval) { + if (retval < -1) { + exit(retval); + } else if (retval == -1) { re_initialize(); goto restart; } @@ -1114,7 +1118,9 @@ void turbostat_loop() } sleep(interval_sec); retval = for_all_cpus(get_counters, ODD_COUNTERS); - if (retval) { + if (retval < -1) { + exit(retval); + } else if (retval == -1) { re_initialize(); goto restart; } @@ -1126,7 +1132,9 @@ void turbostat_loop() flush_stdout(); sleep(interval_sec); retval = for_all_cpus(get_counters, EVEN_COUNTERS); - if (retval) { + if (retval < -1) { + exit(retval); + } else if (retval == -1) { re_initialize(); goto restart; } @@ -1545,8 +1553,11 @@ void turbostat_init() int fork_it(char **argv) { pid_t child_pid; + int status; - for_all_cpus(get_counters, EVEN_COUNTERS); + status = for_all_cpus(get_counters, EVEN_COUNTERS); + if (status) + exit(status); /* clear affinity side-effect of get_counters() */ sched_setaffinity(0, cpu_present_setsize, cpu_present_set); gettimeofday(&tv_even, (struct timezone *)NULL); @@ -1556,7 +1567,6 @@ int fork_it(char **argv) /* child */ execvp(argv[0], argv); } else { - int status; /* parent */ if (child_pid == -1) { @@ -1568,7 +1578,7 @@ int fork_it(char **argv) signal(SIGQUIT, SIG_IGN); if (waitpid(child_pid, &status, 0) == -1) { perror("wait"); - exit(1); + exit(status); } } /* @@ -1585,7 +1595,7 @@ int fork_it(char **argv) fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0); - return 0; + return status; } void cmdline(int argc, char **argv) @@ -1594,7 +1604,7 @@ void cmdline(int argc, char **argv) progname = argv[0]; - while ((opt = getopt(argc, argv, "+pPSvisc:sC:m:M:")) != -1) { + while ((opt = getopt(argc, argv, "+pPSvi:sc:sC:m:M:")) != -1) { switch (opt) { case 'p': show_core_only++; diff --git a/trunk/tools/testing/selftests/Makefile b/trunk/tools/testing/selftests/Makefile index 43480149119e..85baf11e2acd 100644 --- a/trunk/tools/testing/selftests/Makefile +++ b/trunk/tools/testing/selftests/Makefile @@ -1,4 +1,4 @@ -TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug epoll +TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug all: for TARGET in $(TARGETS); do \ diff --git a/trunk/tools/testing/selftests/epoll/Makefile b/trunk/tools/testing/selftests/epoll/Makefile deleted file mode 100644 index 19806ed62f50..000000000000 --- a/trunk/tools/testing/selftests/epoll/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# Makefile for epoll selftests - -all: test_epoll -%: %.c - gcc -pthread -g -o $@ $^ - -run_tests: all - ./test_epoll - -clean: - $(RM) test_epoll diff --git a/trunk/tools/testing/selftests/epoll/test_epoll.c b/trunk/tools/testing/selftests/epoll/test_epoll.c deleted file mode 100644 index f7525392ce84..000000000000 --- a/trunk/tools/testing/selftests/epoll/test_epoll.c +++ /dev/null @@ -1,344 +0,0 @@ -/* - * tools/testing/selftests/epoll/test_epoll.c - * - * Copyright 2012 Adobe Systems Incorporated - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * Paton J. Lewis - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * A pointer to an epoll_item_private structure will be stored in the epoll - * item's event structure so that we can get access to the epoll_item_private - * data after calling epoll_wait: - */ -struct epoll_item_private { - int index; /* Position of this struct within the epoll_items array. */ - int fd; - uint32_t events; - pthread_mutex_t mutex; /* Guards the following variables... */ - int stop; - int status; /* Stores any error encountered while handling item. */ - /* The following variable allows us to test whether we have encountered - a problem while attempting to cancel and delete the associated - event. When the test program exits, 'deleted' should be exactly - one. If it is greater than one, then the failed test reflects a real - world situation where we would have tried to access the epoll item's - private data after deleting it: */ - int deleted; -}; - -struct epoll_item_private *epoll_items; - -/* - * Delete the specified item from the epoll set. In a real-world secneario this - * is where we would free the associated data structure, but in this testing - * environment we retain the structure so that we can test for double-deletion: - */ -void delete_item(int index) -{ - __sync_fetch_and_add(&epoll_items[index].deleted, 1); -} - -/* - * A pointer to a read_thread_data structure will be passed as the argument to - * each read thread: - */ -struct read_thread_data { - int stop; - int status; /* Indicates any error encountered by the read thread. */ - int epoll_set; -}; - -/* - * The function executed by the read threads: - */ -void *read_thread_function(void *function_data) -{ - struct read_thread_data *thread_data = - (struct read_thread_data *)function_data; - struct epoll_event event_data; - struct epoll_item_private *item_data; - char socket_data; - - /* Handle events until we encounter an error or this thread's 'stop' - condition is set: */ - while (1) { - int result = epoll_wait(thread_data->epoll_set, - &event_data, - 1, /* Number of desired events */ - 1000); /* Timeout in ms */ - if (result < 0) { - /* Breakpoints signal all threads. Ignore that while - debugging: */ - if (errno == EINTR) - continue; - thread_data->status = errno; - return 0; - } else if (thread_data->stop) - return 0; - else if (result == 0) /* Timeout */ - continue; - - /* We need the mutex here because checking for the stop - condition and re-enabling the epoll item need to be done - together as one atomic operation when EPOLL_CTL_DISABLE is - available: */ - item_data = (struct epoll_item_private *)event_data.data.ptr; - pthread_mutex_lock(&item_data->mutex); - - /* Remove the item from the epoll set if we want to stop - handling that event: */ - if (item_data->stop) - delete_item(item_data->index); - else { - /* Clear the data that was written to the other end of - our non-blocking socket: */ - do { - if (read(item_data->fd, &socket_data, 1) < 1) { - if ((errno == EAGAIN) || - (errno == EWOULDBLOCK)) - break; - else - goto error_unlock; - } - } while (item_data->events & EPOLLET); - - /* The item was one-shot, so re-enable it: */ - event_data.events = item_data->events; - if (epoll_ctl(thread_data->epoll_set, - EPOLL_CTL_MOD, - item_data->fd, - &event_data) < 0) - goto error_unlock; - } - - pthread_mutex_unlock(&item_data->mutex); - } - -error_unlock: - thread_data->status = item_data->status = errno; - pthread_mutex_unlock(&item_data->mutex); - return 0; -} - -/* - * A pointer to a write_thread_data structure will be passed as the argument to - * the write thread: - */ -struct write_thread_data { - int stop; - int status; /* Indicates any error encountered by the write thread. */ - int n_fds; - int *fds; -}; - -/* - * The function executed by the write thread. It writes a single byte to each - * socket in turn until the stop condition for this thread is set. If writing to - * a socket would block (i.e. errno was EAGAIN), we leave that socket alone for - * the moment and just move on to the next socket in the list. We don't care - * about the order in which we deliver events to the epoll set. In fact we don't - * care about the data we're writing to the pipes at all; we just want to - * trigger epoll events: - */ -void *write_thread_function(void *function_data) -{ - const char data = 'X'; - int index; - struct write_thread_data *thread_data = - (struct write_thread_data *)function_data; - while (!thread_data->stop) - for (index = 0; - !thread_data->stop && (index < thread_data->n_fds); - ++index) - if ((write(thread_data->fds[index], &data, 1) < 1) && - (errno != EAGAIN) && - (errno != EWOULDBLOCK)) { - thread_data->status = errno; - return; - } -} - -/* - * Arguments are currently ignored: - */ -int main(int argc, char **argv) -{ - const int n_read_threads = 100; - const int n_epoll_items = 500; - int index; - int epoll_set = epoll_create1(0); - struct write_thread_data write_thread_data = { - 0, 0, n_epoll_items, malloc(n_epoll_items * sizeof(int)) - }; - struct read_thread_data *read_thread_data = - malloc(n_read_threads * sizeof(struct read_thread_data)); - pthread_t *read_threads = malloc(n_read_threads * sizeof(pthread_t)); - pthread_t write_thread; - - printf("-----------------\n"); - printf("Runing test_epoll\n"); - printf("-----------------\n"); - - epoll_items = malloc(n_epoll_items * sizeof(struct epoll_item_private)); - - if (epoll_set < 0 || epoll_items == 0 || write_thread_data.fds == 0 || - read_thread_data == 0 || read_threads == 0) - goto error; - - if (sysconf(_SC_NPROCESSORS_ONLN) < 2) { - printf("Error: please run this test on a multi-core system.\n"); - goto error; - } - - /* Create the socket pairs and epoll items: */ - for (index = 0; index < n_epoll_items; ++index) { - int socket_pair[2]; - struct epoll_event event_data; - if (socketpair(AF_UNIX, - SOCK_STREAM | SOCK_NONBLOCK, - 0, - socket_pair) < 0) - goto error; - write_thread_data.fds[index] = socket_pair[0]; - epoll_items[index].index = index; - epoll_items[index].fd = socket_pair[1]; - if (pthread_mutex_init(&epoll_items[index].mutex, NULL) != 0) - goto error; - /* We always use EPOLLONESHOT because this test is currently - structured to demonstrate the need for EPOLL_CTL_DISABLE, - which only produces useful information in the EPOLLONESHOT - case (without EPOLLONESHOT, calling epoll_ctl with - EPOLL_CTL_DISABLE will never return EBUSY). If support for - testing events without EPOLLONESHOT is desired, it should - probably be implemented in a separate unit test. */ - epoll_items[index].events = EPOLLIN | EPOLLONESHOT; - if (index < n_epoll_items / 2) - epoll_items[index].events |= EPOLLET; - epoll_items[index].stop = 0; - epoll_items[index].status = 0; - epoll_items[index].deleted = 0; - event_data.events = epoll_items[index].events; - event_data.data.ptr = &epoll_items[index]; - if (epoll_ctl(epoll_set, - EPOLL_CTL_ADD, - epoll_items[index].fd, - &event_data) < 0) - goto error; - } - - /* Create and start the read threads: */ - for (index = 0; index < n_read_threads; ++index) { - read_thread_data[index].stop = 0; - read_thread_data[index].status = 0; - read_thread_data[index].epoll_set = epoll_set; - if (pthread_create(&read_threads[index], - NULL, - read_thread_function, - &read_thread_data[index]) != 0) - goto error; - } - - if (pthread_create(&write_thread, - NULL, - write_thread_function, - &write_thread_data) != 0) - goto error; - - /* Cancel all event pollers: */ -#ifdef EPOLL_CTL_DISABLE - for (index = 0; index < n_epoll_items; ++index) { - pthread_mutex_lock(&epoll_items[index].mutex); - ++epoll_items[index].stop; - if (epoll_ctl(epoll_set, - EPOLL_CTL_DISABLE, - epoll_items[index].fd, - NULL) == 0) - delete_item(index); - else if (errno != EBUSY) { - pthread_mutex_unlock(&epoll_items[index].mutex); - goto error; - } - /* EBUSY means events were being handled; allow the other thread - to delete the item. */ - pthread_mutex_unlock(&epoll_items[index].mutex); - } -#else - for (index = 0; index < n_epoll_items; ++index) { - pthread_mutex_lock(&epoll_items[index].mutex); - ++epoll_items[index].stop; - pthread_mutex_unlock(&epoll_items[index].mutex); - /* Wait in case a thread running read_thread_function is - currently executing code between epoll_wait and - pthread_mutex_lock with this item. Note that a longer delay - would make double-deletion less likely (at the expense of - performance), but there is no guarantee that any delay would - ever be sufficient. Note also that we delete all event - pollers at once for testing purposes, but in a real-world - environment we are likely to want to be able to cancel event - pollers at arbitrary times. Therefore we can't improve this - situation by just splitting this loop into two loops - (i.e. signal 'stop' for all items, sleep, and then delete all - items). We also can't fix the problem via EPOLL_CTL_DEL - because that command can't prevent the case where some other - thread is executing read_thread_function within the region - mentioned above: */ - usleep(1); - pthread_mutex_lock(&epoll_items[index].mutex); - if (!epoll_items[index].deleted) - delete_item(index); - pthread_mutex_unlock(&epoll_items[index].mutex); - } -#endif - - /* Shut down the read threads: */ - for (index = 0; index < n_read_threads; ++index) - __sync_fetch_and_add(&read_thread_data[index].stop, 1); - for (index = 0; index < n_read_threads; ++index) { - if (pthread_join(read_threads[index], NULL) != 0) - goto error; - if (read_thread_data[index].status) - goto error; - } - - /* Shut down the write thread: */ - __sync_fetch_and_add(&write_thread_data.stop, 1); - if ((pthread_join(write_thread, NULL) != 0) || write_thread_data.status) - goto error; - - /* Check for final error conditions: */ - for (index = 0; index < n_epoll_items; ++index) { - if (epoll_items[index].status != 0) - goto error; - if (pthread_mutex_destroy(&epoll_items[index].mutex) < 0) - goto error; - } - for (index = 0; index < n_epoll_items; ++index) - if (epoll_items[index].deleted != 1) { - printf("Error: item data deleted %1d times.\n", - epoll_items[index].deleted); - goto error; - } - - printf("[PASS]\n"); - return 0; - - error: - printf("[FAIL]\n"); - return errno; -}