From dced55ac114c6bab126b41bbbbdf42a3e1470feb Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 27 Sep 2012 15:08:35 -0400 Subject: [PATCH] --- yaml --- r: 345571 b: refs/heads/master c: 4d75658bffea78f0c6f82fd46df1ec983ccacdf0 h: refs/heads/master i: 345569: d60eba35a3c453ce10bf4a8d33f81a7312b04474 345567: b13bf095aefdd1854267e05508d726411f851da1 v: v3 --- [refs] | 2 +- trunk/Documentation/DMA-attributes.txt | 9 - trunk/Documentation/DocBook/drm.tmpl | 39 +- .../bindings/gpu/nvidia,tegra20-host1x.txt | 191 -- trunk/Documentation/kref.txt | 88 - trunk/MAINTAINERS | 9 - trunk/arch/arm/mm/dma-mapping.c | 41 +- trunk/drivers/char/agp/intel-agp.h | 91 + trunk/drivers/char/agp/intel-gtt.c | 320 +- trunk/drivers/gpu/drm/Kconfig | 2 - trunk/drivers/gpu/drm/Makefile | 6 +- trunk/drivers/gpu/drm/ast/ast_ttm.c | 12 +- trunk/drivers/gpu/drm/cirrus/cirrus_drv.c | 13 +- trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c | 12 +- trunk/drivers/gpu/drm/drm_crtc.c | 63 +- trunk/drivers/gpu/drm/drm_crtc_helper.c | 161 +- .../{drm_dp_helper.c => drm_dp_i2c_helper.c} | 146 +- trunk/drivers/gpu/drm/drm_edid.c | 29 +- trunk/drivers/gpu/drm/drm_fb_helper.c | 76 +- trunk/drivers/gpu/drm/drm_hashtab.c | 38 +- trunk/drivers/gpu/drm/drm_ioctl.c | 3 - trunk/drivers/gpu/drm/drm_irq.c | 120 +- trunk/drivers/gpu/drm/drm_modes.c | 8 +- trunk/drivers/gpu/drm/drm_pci.c | 2 +- trunk/drivers/gpu/drm/drm_stub.c | 37 +- trunk/drivers/gpu/drm/drm_sysfs.c | 6 +- trunk/drivers/gpu/drm/exynos/Kconfig | 6 - trunk/drivers/gpu/drm/exynos/Makefile | 1 - trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c | 94 +- trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h | 4 +- .../drivers/gpu/drm/exynos/exynos_drm_crtc.c | 5 - .../gpu/drm/exynos/exynos_drm_dmabuf.c | 84 +- trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c | 23 +- trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h | 14 +- .../gpu/drm/exynos/exynos_drm_encoder.c | 33 - .../gpu/drm/exynos/exynos_drm_encoder.h | 1 - trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c | 56 +- .../drivers/gpu/drm/exynos/exynos_drm_fbdev.c | 34 +- .../drivers/gpu/drm/exynos/exynos_drm_fimd.c | 60 +- trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c | 495 +--- trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c | 435 ++- trunk/drivers/gpu/drm/exynos/exynos_drm_gem.h | 56 +- .../drivers/gpu/drm/exynos/exynos_drm_hdmi.c | 15 - .../drivers/gpu/drm/exynos/exynos_drm_hdmi.h | 1 - .../drivers/gpu/drm/exynos/exynos_drm_iommu.c | 150 - .../drivers/gpu/drm/exynos/exynos_drm_iommu.h | 85 - .../drivers/gpu/drm/exynos/exynos_drm_vidi.c | 20 +- trunk/drivers/gpu/drm/exynos/exynos_hdmi.c | 237 +- trunk/drivers/gpu/drm/exynos/exynos_mixer.c | 93 +- trunk/drivers/gpu/drm/exynos/regs-hdmi.h | 17 +- trunk/drivers/gpu/drm/gma500/cdv_device.c | 4 +- trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c | 2 +- trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c | 6 +- trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c | 10 +- .../drivers/gpu/drm/gma500/mdfld_dsi_output.c | 12 +- .../gpu/drm/gma500/mdfld_intel_display.c | 2 +- trunk/drivers/gpu/drm/gma500/oaktrail.h | 6 - trunk/drivers/gpu/drm/gma500/oaktrail_crtc.c | 10 +- .../drivers/gpu/drm/gma500/oaktrail_device.c | 2 +- trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c | 365 +-- trunk/drivers/gpu/drm/gma500/oaktrail_lvds.c | 8 +- trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c | 10 +- trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c | 24 +- trunk/drivers/gpu/drm/i2c/ch7006_drv.c | 20 +- trunk/drivers/gpu/drm/i915/i915_debugfs.c | 52 +- trunk/drivers/gpu/drm/i915/i915_dma.c | 86 +- trunk/drivers/gpu/drm/i915/i915_drv.c | 131 +- trunk/drivers/gpu/drm/i915/i915_drv.h | 454 ++- trunk/drivers/gpu/drm/i915/i915_gem.c | 196 +- trunk/drivers/gpu/drm/i915/i915_gem_context.c | 2 +- .../gpu/drm/i915/i915_gem_execbuffer.c | 36 +- trunk/drivers/gpu/drm/i915/i915_gem_gtt.c | 416 +-- trunk/drivers/gpu/drm/i915/i915_irq.c | 80 +- trunk/drivers/gpu/drm/i915/i915_reg.h | 297 +- trunk/drivers/gpu/drm/i915/i915_suspend.c | 763 +++-- trunk/drivers/gpu/drm/i915/i915_sysfs.c | 43 +- trunk/drivers/gpu/drm/i915/i915_trace.h | 10 +- trunk/drivers/gpu/drm/i915/intel_bios.c | 3 +- trunk/drivers/gpu/drm/i915/intel_crt.c | 49 +- trunk/drivers/gpu/drm/i915/intel_ddi.c | 1074 ++----- trunk/drivers/gpu/drm/i915/intel_display.c | 1660 +++-------- trunk/drivers/gpu/drm/i915/intel_dp.c | 961 +++---- trunk/drivers/gpu/drm/i915/intel_drv.h | 117 +- trunk/drivers/gpu/drm/i915/intel_hdmi.c | 133 +- trunk/drivers/gpu/drm/i915/intel_i2c.c | 9 +- trunk/drivers/gpu/drm/i915/intel_lvds.c | 225 +- trunk/drivers/gpu/drm/i915/intel_modes.c | 11 +- trunk/drivers/gpu/drm/i915/intel_opregion.c | 2 - trunk/drivers/gpu/drm/i915/intel_panel.c | 77 +- trunk/drivers/gpu/drm/i915/intel_pm.c | 489 ++-- trunk/drivers/gpu/drm/i915/intel_ringbuffer.c | 126 +- trunk/drivers/gpu/drm/i915/intel_ringbuffer.h | 6 +- trunk/drivers/gpu/drm/i915/intel_sdvo.c | 97 +- trunk/drivers/gpu/drm/i915/intel_sprite.c | 101 +- trunk/drivers/gpu/drm/i915/intel_tv.c | 21 +- trunk/drivers/gpu/drm/mgag200/mgag200_main.c | 4 +- trunk/drivers/gpu/drm/mgag200/mgag200_ttm.c | 12 +- trunk/drivers/gpu/drm/nouveau/Makefile | 38 +- .../gpu/drm/nouveau/core/core/engctx.c | 15 - .../gpu/drm/nouveau/core/core/falcon.c | 247 -- .../gpu/drm/nouveau/core/core/gpuobj.c | 4 +- trunk/drivers/gpu/drm/nouveau/core/core/mm.c | 17 +- .../gpu/drm/nouveau/core/engine/bsp/nv84.c | 108 +- .../gpu/drm/nouveau/core/engine/bsp/nvc0.c | 110 - .../gpu/drm/nouveau/core/engine/bsp/nve0.c | 110 - .../gpu/drm/nouveau/core/engine/copy/nva3.c | 124 +- .../gpu/drm/nouveau/core/engine/copy/nvc0.c | 167 +- .../gpu/drm/nouveau/core/engine/copy/nve0.c | 54 +- .../gpu/drm/nouveau/core/engine/crypt/nv84.c | 46 +- .../gpu/drm/nouveau/core/engine/crypt/nv98.c | 83 +- .../drm/nouveau/core/engine/disp/dacnv50.c | 88 - .../drm/nouveau/core/engine/disp/hdanva3.c | 48 - .../drm/nouveau/core/engine/disp/hdanvd0.c | 53 - .../drm/nouveau/core/engine/disp/hdminv84.c | 66 - .../drm/nouveau/core/engine/disp/hdminva3.c | 66 - .../drm/nouveau/core/engine/disp/hdminvd0.c | 62 - .../gpu/drm/nouveau/core/engine/disp/nv50.c | 1144 +------- .../gpu/drm/nouveau/core/engine/disp/nv50.h | 142 - .../gpu/drm/nouveau/core/engine/disp/nv84.c | 98 - .../gpu/drm/nouveau/core/engine/disp/nv94.c | 109 - .../gpu/drm/nouveau/core/engine/disp/nva0.c | 88 - .../gpu/drm/nouveau/core/engine/disp/nva3.c | 111 - .../gpu/drm/nouveau/core/engine/disp/nvd0.c | 884 +----- .../gpu/drm/nouveau/core/engine/disp/nve0.c | 94 - .../drm/nouveau/core/engine/disp/sornv50.c | 112 - .../drm/nouveau/core/engine/disp/sornv94.c | 190 -- .../drm/nouveau/core/engine/disp/sornvd0.c | 126 - .../gpu/drm/nouveau/core/engine/dmaobj/base.c | 71 +- .../gpu/drm/nouveau/core/engine/dmaobj/nv04.c | 68 +- .../gpu/drm/nouveau/core/engine/dmaobj/nv50.c | 126 +- .../gpu/drm/nouveau/core/engine/dmaobj/nvc0.c | 104 +- .../gpu/drm/nouveau/core/engine/dmaobj/nvd0.c | 122 - .../gpu/drm/nouveau/core/engine/fifo/base.c | 19 +- .../gpu/drm/nouveau/core/engine/fifo/nv04.c | 17 +- .../gpu/drm/nouveau/core/engine/fifo/nv10.c | 6 +- .../gpu/drm/nouveau/core/engine/fifo/nv17.c | 8 +- .../gpu/drm/nouveau/core/engine/fifo/nv40.c | 8 +- .../gpu/drm/nouveau/core/engine/fifo/nv50.c | 36 +- .../gpu/drm/nouveau/core/engine/fifo/nv84.c | 60 +- .../gpu/drm/nouveau/core/engine/fifo/nvc0.c | 26 +- .../gpu/drm/nouveau/core/engine/fifo/nve0.c | 21 +- .../gpu/drm/nouveau/core/engine/graph/nv04.c | 184 +- .../gpu/drm/nouveau/core/engine/graph/nv10.c | 10 +- .../gpu/drm/nouveau/core/engine/graph/nv20.c | 6 +- .../gpu/drm/nouveau/core/engine/graph/nv40.c | 38 +- .../gpu/drm/nouveau/core/engine/graph/nv50.c | 83 +- .../gpu/drm/nouveau/core/engine/graph/nvc0.c | 2 +- .../gpu/drm/nouveau/core/engine/graph/regs.h | 5 - .../gpu/drm/nouveau/core/engine/mpeg/nv31.c | 6 +- .../gpu/drm/nouveau/core/engine/mpeg/nv50.c | 1 + .../gpu/drm/nouveau/core/engine/ppp/nv98.c | 107 +- .../gpu/drm/nouveau/core/engine/ppp/nvc0.c | 110 - .../drm/nouveau/core/engine/software/nv04.c | 4 +- .../drm/nouveau/core/engine/software/nv10.c | 2 +- .../drm/nouveau/core/engine/software/nv50.c | 10 +- .../drm/nouveau/core/engine/software/nvc0.c | 10 +- .../gpu/drm/nouveau/core/engine/vp/nv84.c | 108 +- .../gpu/drm/nouveau/core/engine/vp/nvc0.c | 110 - .../gpu/drm/nouveau/core/engine/vp/nve0.c | 110 - .../gpu/drm/nouveau/core/include/core/class.h | 225 -- .../drm/nouveau/core/include/core/engctx.h | 3 - .../drm/nouveau/core/include/core/falcon.h | 81 - .../drm/nouveau/core/include/core/gpuobj.h | 4 +- .../gpu/drm/nouveau/core/include/core/mm.h | 6 - .../drm/nouveau/core/include/core/object.h | 41 +- .../drm/nouveau/core/include/core/parent.h | 2 +- .../gpu/drm/nouveau/core/include/engine/bsp.h | 41 +- .../drm/nouveau/core/include/engine/copy.h | 39 +- .../drm/nouveau/core/include/engine/crypt.h | 39 + .../drm/nouveau/core/include/engine/disp.h | 5 - .../drm/nouveau/core/include/engine/dmaobj.h | 29 +- .../drm/nouveau/core/include/engine/fifo.h | 6 +- .../gpu/drm/nouveau/core/include/engine/ppp.h | 40 +- .../gpu/drm/nouveau/core/include/engine/vp.h | 41 +- .../nouveau/core/include/subdev/bios/dcb.h | 34 +- .../nouveau/core/include/subdev/bios/disp.h | 48 - .../drm/nouveau/core/include/subdev/bios/dp.h | 32 +- .../gpu/drm/nouveau/core/include/subdev/fb.h | 43 +- .../gpu/drm/nouveau/core/subdev/bar/base.c | 4 +- .../gpu/drm/nouveau/core/subdev/bios/base.c | 32 +- .../gpu/drm/nouveau/core/subdev/bios/dcb.c | 63 - .../gpu/drm/nouveau/core/subdev/bios/disp.c | 178 -- .../gpu/drm/nouveau/core/subdev/bios/dp.c | 182 +- .../gpu/drm/nouveau/core/subdev/bios/gpio.c | 4 +- .../gpu/drm/nouveau/core/subdev/bios/init.c | 13 +- .../gpu/drm/nouveau/core/subdev/device/base.c | 28 +- .../gpu/drm/nouveau/core/subdev/device/nv10.c | 4 +- .../gpu/drm/nouveau/core/subdev/device/nv20.c | 6 +- .../gpu/drm/nouveau/core/subdev/device/nv30.c | 6 +- .../gpu/drm/nouveau/core/subdev/device/nv40.c | 28 +- .../gpu/drm/nouveau/core/subdev/device/nv50.c | 26 +- .../gpu/drm/nouveau/core/subdev/device/nvc0.c | 64 +- .../gpu/drm/nouveau/core/subdev/device/nve0.c | 17 +- .../drm/nouveau/core/subdev/devinit/nv50.c | 34 +- .../gpu/drm/nouveau/core/subdev/fb/base.c | 92 +- .../gpu/drm/nouveau/core/subdev/fb/nv04.c | 62 +- .../gpu/drm/nouveau/core/subdev/fb/nv10.c | 52 +- .../gpu/drm/nouveau/core/subdev/fb/nv1a.c | 89 - .../gpu/drm/nouveau/core/subdev/fb/nv20.c | 86 +- .../gpu/drm/nouveau/core/subdev/fb/nv25.c | 81 - .../gpu/drm/nouveau/core/subdev/fb/nv30.c | 51 +- .../gpu/drm/nouveau/core/subdev/fb/nv35.c | 82 - .../gpu/drm/nouveau/core/subdev/fb/nv36.c | 82 - .../gpu/drm/nouveau/core/subdev/fb/nv40.c | 131 +- .../gpu/drm/nouveau/core/subdev/fb/nv41.c | 106 - .../gpu/drm/nouveau/core/subdev/fb/nv44.c | 114 - .../gpu/drm/nouveau/core/subdev/fb/nv46.c | 79 - .../gpu/drm/nouveau/core/subdev/fb/nv47.c | 66 - .../gpu/drm/nouveau/core/subdev/fb/nv49.c | 84 - .../gpu/drm/nouveau/core/subdev/fb/nv4e.c | 72 - .../gpu/drm/nouveau/core/subdev/fb/nv50.c | 393 ++- .../gpu/drm/nouveau/core/subdev/fb/nvc0.c | 126 +- .../gpu/drm/nouveau/core/subdev/i2c/aux.c | 2 +- .../drm/nouveau/core/subdev/instmem/nv04.c | 8 +- .../drm/nouveau/core/subdev/instmem/nv40.c | 4 +- .../drm/nouveau/core/subdev/instmem/nv50.c | 4 +- .../gpu/drm/nouveau/core/subdev/mc/base.c | 8 +- .../gpu/drm/nouveau/core/subdev/mc/nv50.c | 1 - .../gpu/drm/nouveau/core/subdev/mc/nv98.c | 1 - .../gpu/drm/nouveau/core/subdev/mc/nvc0.c | 1 - trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c | 27 +- trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c | 30 +- trunk/drivers/gpu/drm/nouveau/nouveau_acpi.h | 4 - trunk/drivers/gpu/drm/nouveau/nouveau_bios.c | 235 +- trunk/drivers/gpu/drm/nouveau/nouveau_bios.h | 9 + trunk/drivers/gpu/drm/nouveau/nouveau_bo.c | 65 +- trunk/drivers/gpu/drm/nouveau/nouveau_bo.h | 2 +- trunk/drivers/gpu/drm/nouveau/nouveau_chan.c | 6 +- .../gpu/drm/nouveau/nouveau_connector.c | 25 +- .../gpu/drm/nouveau/nouveau_connector.h | 16 - trunk/drivers/gpu/drm/nouveau/nouveau_crtc.h | 10 + .../drivers/gpu/drm/nouveau/nouveau_display.c | 25 +- trunk/drivers/gpu/drm/nouveau/nouveau_dp.c | 141 +- trunk/drivers/gpu/drm/nouveau/nouveau_drm.c | 98 +- trunk/drivers/gpu/drm/nouveau/nouveau_drm.h | 4 +- .../drivers/gpu/drm/nouveau/nouveau_encoder.h | 7 +- trunk/drivers/gpu/drm/nouveau/nouveau_gem.c | 2 +- trunk/drivers/gpu/drm/nouveau/nouveau_hdmi.c | 261 ++ trunk/drivers/gpu/drm/nouveau/nouveau_irq.c | 12 + trunk/drivers/gpu/drm/nouveau/nouveau_prime.c | 4 + trunk/drivers/gpu/drm/nouveau/nouveau_vga.c | 5 +- trunk/drivers/gpu/drm/nouveau/nv04_crtc.c | 6 +- trunk/drivers/gpu/drm/nouveau/nv04_display.c | 5 + trunk/drivers/gpu/drm/nouveau/nv10_fence.c | 7 +- trunk/drivers/gpu/drm/nouveau/nv17_tv.c | 16 +- trunk/drivers/gpu/drm/nouveau/nv50_crtc.c | 764 +++++ trunk/drivers/gpu/drm/nouveau/nv50_cursor.c | 136 + trunk/drivers/gpu/drm/nouveau/nv50_dac.c | 321 +++ trunk/drivers/gpu/drm/nouveau/nv50_display.c | 2561 +++++------------ trunk/drivers/gpu/drm/nouveau/nv50_display.h | 71 +- trunk/drivers/gpu/drm/nouveau/nv50_evo.c | 403 +++ trunk/drivers/gpu/drm/nouveau/nv50_evo.h | 120 + trunk/drivers/gpu/drm/nouveau/nv50_fence.c | 5 +- trunk/drivers/gpu/drm/nouveau/nv50_pm.c | 2 +- trunk/drivers/gpu/drm/nouveau/nv50_sor.c | 530 ++++ trunk/drivers/gpu/drm/nouveau/nvc0_fence.c | 28 +- trunk/drivers/gpu/drm/nouveau/nvd0_display.c | 2141 ++++++++++++++ trunk/drivers/gpu/drm/radeon/atombios_dp.c | 149 +- trunk/drivers/gpu/drm/radeon/r600.c | 479 ++- trunk/drivers/gpu/drm/radeon/r600_cp.c | 7 +- trunk/drivers/gpu/drm/radeon/r600d.h | 54 +- trunk/drivers/gpu/drm/radeon/radeon.h | 11 +- trunk/drivers/gpu/drm/radeon/radeon_asic.c | 39 +- trunk/drivers/gpu/drm/radeon/radeon_asic.h | 13 + .../gpu/drm/radeon/radeon_connectors.c | 62 +- trunk/drivers/gpu/drm/radeon/radeon_drv.c | 13 +- trunk/drivers/gpu/drm/radeon/radeon_gart.c | 1 + trunk/drivers/gpu/drm/radeon/radeon_mode.h | 2 +- trunk/drivers/gpu/drm/radeon/radeon_object.c | 16 +- trunk/drivers/gpu/drm/radeon/radeon_object.h | 2 +- trunk/drivers/gpu/drm/radeon/radeon_ttm.c | 40 +- trunk/drivers/gpu/drm/radeon/rv770.c | 31 +- trunk/drivers/gpu/drm/radeon/rv770d.h | 23 + .../drivers/gpu/drm/shmobile/shmob_drm_crtc.c | 2 +- trunk/drivers/gpu/drm/tegra/Kconfig | 23 - trunk/drivers/gpu/drm/tegra/Makefile | 7 - trunk/drivers/gpu/drm/tegra/dc.c | 834 ------ trunk/drivers/gpu/drm/tegra/dc.h | 388 --- trunk/drivers/gpu/drm/tegra/drm.c | 115 - trunk/drivers/gpu/drm/tegra/drm.h | 234 -- trunk/drivers/gpu/drm/tegra/fb.c | 56 - trunk/drivers/gpu/drm/tegra/hdmi.c | 1334 --------- trunk/drivers/gpu/drm/tegra/hdmi.h | 575 ---- trunk/drivers/gpu/drm/tegra/host1x.c | 325 --- trunk/drivers/gpu/drm/tegra/output.c | 272 -- trunk/drivers/gpu/drm/tegra/rgb.c | 228 -- trunk/drivers/gpu/drm/ttm/ttm_bo.c | 321 ++- trunk/drivers/gpu/drm/ttm/ttm_bo_util.c | 8 +- trunk/drivers/gpu/drm/ttm/ttm_bo_vm.c | 4 +- trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c | 10 +- trunk/drivers/gpu/drm/ttm/ttm_memory.c | 1 + trunk/drivers/gpu/drm/ttm/ttm_object.c | 51 +- trunk/drivers/gpu/drm/udl/udl_connector.c | 14 +- trunk/drivers/gpu/drm/vmwgfx/Makefile | 3 +- .../gpu/drm/vmwgfx/svga3d_surfacedefs.h | 909 ------ trunk/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 23 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | 274 -- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | 22 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 92 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 153 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 917 ++---- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 2 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 7 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 2 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | 21 +- .../drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 2109 +++++++++----- .../gpu/drm/vmwgfx/vmwgfx_resource_priv.h | 84 - trunk/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 4 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 893 ------ trunk/drivers/gpu/vga/vga_switcheroo.c | 6 + trunk/include/drm/drmP.h | 3 - trunk/include/drm/drm_crtc.h | 18 +- trunk/include/drm/drm_crtc_helper.h | 3 - trunk/include/drm/drm_dp_helper.h | 39 - trunk/include/drm/drm_hashtab.h | 14 - trunk/include/drm/intel-gtt.h | 7 +- trunk/include/drm/ttm/ttm_bo_api.h | 33 +- trunk/include/drm/ttm/ttm_bo_driver.h | 45 +- trunk/include/drm/ttm/ttm_execbuf_util.h | 3 + trunk/include/drm/ttm/ttm_memory.h | 2 + trunk/include/drm/ttm/ttm_object.h | 4 - trunk/include/linux/dma-attrs.h | 1 - trunk/include/linux/kref.h | 21 - trunk/include/uapi/drm/drm.h | 1 - trunk/include/uapi/drm/exynos_drm.h | 13 +- trunk/include/uapi/drm/i915_drm.h | 6 - 326 files changed, 14869 insertions(+), 26007 deletions(-) delete mode 100644 trunk/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt rename trunk/drivers/gpu/drm/{drm_dp_helper.c => drm_dp_i2c_helper.c} (58%) delete mode 100644 trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.c delete mode 100644 trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.h delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/core/falcon.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/include/core/falcon.h delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c delete mode 100644 trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c create mode 100644 trunk/drivers/gpu/drm/nouveau/nouveau_hdmi.c create mode 100644 trunk/drivers/gpu/drm/nouveau/nv50_crtc.c create mode 100644 trunk/drivers/gpu/drm/nouveau/nv50_cursor.c create mode 100644 trunk/drivers/gpu/drm/nouveau/nv50_dac.c create mode 100644 trunk/drivers/gpu/drm/nouveau/nv50_evo.c create mode 100644 trunk/drivers/gpu/drm/nouveau/nv50_evo.h create mode 100644 trunk/drivers/gpu/drm/nouveau/nv50_sor.c create mode 100644 trunk/drivers/gpu/drm/nouveau/nvd0_display.c delete mode 100644 trunk/drivers/gpu/drm/tegra/Kconfig delete mode 100644 trunk/drivers/gpu/drm/tegra/Makefile delete mode 100644 trunk/drivers/gpu/drm/tegra/dc.c delete mode 100644 trunk/drivers/gpu/drm/tegra/dc.h delete mode 100644 trunk/drivers/gpu/drm/tegra/drm.c delete mode 100644 trunk/drivers/gpu/drm/tegra/drm.h delete mode 100644 trunk/drivers/gpu/drm/tegra/fb.c delete mode 100644 trunk/drivers/gpu/drm/tegra/hdmi.c delete mode 100644 trunk/drivers/gpu/drm/tegra/hdmi.h delete mode 100644 trunk/drivers/gpu/drm/tegra/host1x.c delete mode 100644 trunk/drivers/gpu/drm/tegra/output.c delete mode 100644 trunk/drivers/gpu/drm/tegra/rgb.c delete mode 100644 trunk/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h delete mode 100644 trunk/drivers/gpu/drm/vmwgfx/vmwgfx_context.c delete mode 100644 trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h delete mode 100644 trunk/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c diff --git a/[refs] b/[refs] index d9cdc2ad91e8..4a3dbf1f7757 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 97a875cbdf89a4638eea57c2b456c7cc4e3e8b21 +refs/heads/master: 4d75658bffea78f0c6f82fd46df1ec983ccacdf0 diff --git a/trunk/Documentation/DMA-attributes.txt b/trunk/Documentation/DMA-attributes.txt index e59480db9ee0..f50309081ac7 100644 --- a/trunk/Documentation/DMA-attributes.txt +++ b/trunk/Documentation/DMA-attributes.txt @@ -91,12 +91,3 @@ transferred to 'device' domain. This attribute can be also used for dma_unmap_{single,page,sg} functions family to force buffer to stay in device domain after releasing a mapping for it. Use this attribute with care! - -DMA_ATTR_FORCE_CONTIGUOUS -------------------------- - -By default DMA-mapping subsystem is allowed to assemble the buffer -allocated by dma_alloc_attrs() function from individual pages if it can -be mapped as contiguous chunk into device dma address space. By -specifing this attribute the allocated buffer is forced to be contiguous -also in physical memory. diff --git a/trunk/Documentation/DocBook/drm.tmpl b/trunk/Documentation/DocBook/drm.tmpl index 4ee2304f82f9..b0300529ab13 100644 --- a/trunk/Documentation/DocBook/drm.tmpl +++ b/trunk/Documentation/DocBook/drm.tmpl @@ -1141,13 +1141,23 @@ int max_width, max_height; the page_flip operation will be called with a non-NULL event argument pointing to a drm_pending_vblank_event instance. Upon page - flip completion the driver must call drm_send_vblank_event - to fill in the event and send to wake up any waiting processes. - This can be performed with + flip completion the driver must fill the + event::event + sequence, tv_sec + and tv_usec fields with the associated + vertical blanking count and timestamp, add the event to the + drm_file list of events to be signaled, and wake + up any waiting process. This can be performed with event.sequence = drm_vblank_count_and_time(..., &now); + event->event.tv_sec = now.tv_sec; + event->event.tv_usec = now.tv_usec; + spin_lock_irqsave(&dev->event_lock, flags); - ... - drm_send_vblank_event(dev, pipe, event); + list_add_tail(&event->base.link, &event->base.file_priv->event_list); + wake_up_interruptible(&event->base.file_priv->event_wait); spin_unlock_irqrestore(&dev->event_lock, flags); ]]> @@ -1611,10 +1621,10 @@ void intel_crt_init(struct drm_device *dev) - + - Mode Setting Helper Functions + Mid-layer Helper Functions The CRTC, encoder and connector functions provided by the drivers implement the DRM API. They're called by the DRM core and ioctl handlers @@ -2096,21 +2106,6 @@ void intel_crt_init(struct drm_device *dev) - - Modeset Helper Functions Reference -!Edrivers/gpu/drm/drm_crtc_helper.c - - - fbdev Helper Functions Reference -!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers -!Edrivers/gpu/drm/drm_fb_helper.c - - - Display Port Helper Functions Reference -!Pdrivers/gpu/drm/drm_dp_helper.c dp helpers -!Iinclude/drm/drm_dp_helper.h -!Edrivers/gpu/drm/drm_dp_helper.c - diff --git a/trunk/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/trunk/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt deleted file mode 100644 index b4fa934ae3a2..000000000000 --- a/trunk/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt +++ /dev/null @@ -1,191 +0,0 @@ -NVIDIA Tegra host1x - -Required properties: -- compatible: "nvidia,tegra-host1x" -- reg: Physical base address and length of the controller's registers. -- interrupts: The interrupt outputs from the controller. -- #address-cells: The number of cells used to represent physical base addresses - in the host1x address space. Should be 1. -- #size-cells: The number of cells used to represent the size of an address - range in the host1x address space. Should be 1. -- ranges: The mapping of the host1x address space to the CPU address space. - -The host1x top-level node defines a number of children, each representing one -of the following host1x client modules: - -- mpe: video encoder - - Required properties: - - compatible: "nvidia,tegra-mpe" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - -- vi: video input - - Required properties: - - compatible: "nvidia,tegra-vi" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - -- epp: encoder pre-processor - - Required properties: - - compatible: "nvidia,tegra-epp" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - -- isp: image signal processor - - Required properties: - - compatible: "nvidia,tegra-isp" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - -- gr2d: 2D graphics engine - - Required properties: - - compatible: "nvidia,tegra-gr2d" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - -- gr3d: 3D graphics engine - - Required properties: - - compatible: "nvidia,tegra-gr3d" - - reg: Physical base address and length of the controller's registers. - -- dc: display controller - - Required properties: - - compatible: "nvidia,tegra-dc" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - - Each display controller node has a child node, named "rgb", that represents - the RGB output associated with the controller. It can take the following - optional properties: - - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing - - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection - - nvidia,edid: supplies a binary EDID blob - -- hdmi: High Definition Multimedia Interface - - Required properties: - - compatible: "nvidia,tegra-hdmi" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - - vdd-supply: regulator for supply voltage - - pll-supply: regulator for PLL - - Optional properties: - - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing - - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection - - nvidia,edid: supplies a binary EDID blob - -- tvo: TV encoder output - - Required properties: - - compatible: "nvidia,tegra-tvo" - - reg: Physical base address and length of the controller's registers. - - interrupts: The interrupt outputs from the controller. - -- dsi: display serial interface - - Required properties: - - compatible: "nvidia,tegra-dsi" - - reg: Physical base address and length of the controller's registers. - -Example: - -/ { - ... - - host1x { - compatible = "nvidia,tegra20-host1x", "simple-bus"; - reg = <0x50000000 0x00024000>; - interrupts = <0 65 0x04 /* mpcore syncpt */ - 0 67 0x04>; /* mpcore general */ - - #address-cells = <1>; - #size-cells = <1>; - - ranges = <0x54000000 0x54000000 0x04000000>; - - mpe { - compatible = "nvidia,tegra20-mpe"; - reg = <0x54040000 0x00040000>; - interrupts = <0 68 0x04>; - }; - - vi { - compatible = "nvidia,tegra20-vi"; - reg = <0x54080000 0x00040000>; - interrupts = <0 69 0x04>; - }; - - epp { - compatible = "nvidia,tegra20-epp"; - reg = <0x540c0000 0x00040000>; - interrupts = <0 70 0x04>; - }; - - isp { - compatible = "nvidia,tegra20-isp"; - reg = <0x54100000 0x00040000>; - interrupts = <0 71 0x04>; - }; - - gr2d { - compatible = "nvidia,tegra20-gr2d"; - reg = <0x54140000 0x00040000>; - interrupts = <0 72 0x04>; - }; - - gr3d { - compatible = "nvidia,tegra20-gr3d"; - reg = <0x54180000 0x00040000>; - }; - - dc@54200000 { - compatible = "nvidia,tegra20-dc"; - reg = <0x54200000 0x00040000>; - interrupts = <0 73 0x04>; - - rgb { - status = "disabled"; - }; - }; - - dc@54240000 { - compatible = "nvidia,tegra20-dc"; - reg = <0x54240000 0x00040000>; - interrupts = <0 74 0x04>; - - rgb { - status = "disabled"; - }; - }; - - hdmi { - compatible = "nvidia,tegra20-hdmi"; - reg = <0x54280000 0x00040000>; - interrupts = <0 75 0x04>; - status = "disabled"; - }; - - tvo { - compatible = "nvidia,tegra20-tvo"; - reg = <0x542c0000 0x00040000>; - interrupts = <0 76 0x04>; - status = "disabled"; - }; - - dsi { - compatible = "nvidia,tegra20-dsi"; - reg = <0x54300000 0x00040000>; - status = "disabled"; - }; - }; - - ... -}; diff --git a/trunk/Documentation/kref.txt b/trunk/Documentation/kref.txt index ddf85a5dde0c..48ba715d5a63 100644 --- a/trunk/Documentation/kref.txt +++ b/trunk/Documentation/kref.txt @@ -213,91 +213,3 @@ presentation on krefs, which can be found at: and: http://www.kroah.com/linux/talks/ols_2004_kref_talk/ - -The above example could also be optimized using kref_get_unless_zero() in -the following way: - -static struct my_data *get_entry() -{ - struct my_data *entry = NULL; - mutex_lock(&mutex); - if (!list_empty(&q)) { - entry = container_of(q.next, struct my_data, link); - if (!kref_get_unless_zero(&entry->refcount)) - entry = NULL; - } - mutex_unlock(&mutex); - return entry; -} - -static void release_entry(struct kref *ref) -{ - struct my_data *entry = container_of(ref, struct my_data, refcount); - - mutex_lock(&mutex); - list_del(&entry->link); - mutex_unlock(&mutex); - kfree(entry); -} - -static void put_entry(struct my_data *entry) -{ - kref_put(&entry->refcount, release_entry); -} - -Which is useful to remove the mutex lock around kref_put() in put_entry(), but -it's important that kref_get_unless_zero is enclosed in the same critical -section that finds the entry in the lookup table, -otherwise kref_get_unless_zero may reference already freed memory. -Note that it is illegal to use kref_get_unless_zero without checking its -return value. If you are sure (by already having a valid pointer) that -kref_get_unless_zero() will return true, then use kref_get() instead. - -The function kref_get_unless_zero also makes it possible to use rcu -locking for lookups in the above example: - -struct my_data -{ - struct rcu_head rhead; - . - struct kref refcount; - . - . -}; - -static struct my_data *get_entry_rcu() -{ - struct my_data *entry = NULL; - rcu_read_lock(); - if (!list_empty(&q)) { - entry = container_of(q.next, struct my_data, link); - if (!kref_get_unless_zero(&entry->refcount)) - entry = NULL; - } - rcu_read_unlock(); - return entry; -} - -static void release_entry_rcu(struct kref *ref) -{ - struct my_data *entry = container_of(ref, struct my_data, refcount); - - mutex_lock(&mutex); - list_del_rcu(&entry->link); - mutex_unlock(&mutex); - kfree_rcu(entry, rhead); -} - -static void put_entry(struct my_data *entry) -{ - kref_put(&entry->refcount, release_entry_rcu); -} - -But note that the struct kref member needs to remain in valid memory for a -rcu grace period after release_entry_rcu was called. That can be accomplished -by using kfree_rcu(entry, rhead) as done above, or by calling synchronize_rcu() -before using kfree, but note that synchronize_rcu() may sleep for a -substantial amount of time. - - -Thomas Hellstrom diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 8e196d7b7c4e..9386a63ea8f6 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -2520,15 +2520,6 @@ S: Supported F: drivers/gpu/drm/exynos F: include/drm/exynos* -DRM DRIVERS FOR NVIDIA TEGRA -M: Thierry Reding -L: dri-devel@lists.freedesktop.org -L: linux-tegra@vger.kernel.org -T: git git://gitorious.org/thierryreding/linux.git -S: Maintained -F: drivers/gpu/drm/tegra/ -F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt - DSCC4 DRIVER M: Francois Romieu L: netdev@vger.kernel.org diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c index f076f209c7a4..58bc3e4d3bd0 100644 --- a/trunk/arch/arm/mm/dma-mapping.c +++ b/trunk/arch/arm/mm/dma-mapping.c @@ -1036,8 +1036,7 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping, spin_unlock_irqrestore(&mapping->lock, flags); } -static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, - gfp_t gfp, struct dma_attrs *attrs) +static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) { struct page **pages; int count = size >> PAGE_SHIFT; @@ -1051,23 +1050,6 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, if (!pages) return NULL; - if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) - { - unsigned long order = get_order(size); - struct page *page; - - page = dma_alloc_from_contiguous(dev, count, order); - if (!page) - goto error; - - __dma_clear_buffer(page, size); - - for (i = 0; i < count; i++) - pages[i] = page + i; - - return pages; - } - while (count) { int j, order = __fls(count); @@ -1101,21 +1083,14 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, return NULL; } -static int __iommu_free_buffer(struct device *dev, struct page **pages, - size_t size, struct dma_attrs *attrs) +static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size) { int count = size >> PAGE_SHIFT; int array_size = count * sizeof(struct page *); int i; - - if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { - dma_release_from_contiguous(dev, pages[0], count); - } else { - for (i = 0; i < count; i++) - if (pages[i]) - __free_pages(pages[i], 0); - } - + for (i = 0; i < count; i++) + if (pages[i]) + __free_pages(pages[i], 0); if (array_size <= PAGE_SIZE) kfree(pages); else @@ -1277,7 +1252,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, if (gfp & GFP_ATOMIC) return __iommu_alloc_atomic(dev, size, handle); - pages = __iommu_alloc_buffer(dev, size, gfp, attrs); + pages = __iommu_alloc_buffer(dev, size, gfp); if (!pages) return NULL; @@ -1298,7 +1273,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, err_mapping: __iommu_remove_mapping(dev, *handle, size); err_buffer: - __iommu_free_buffer(dev, pages, size, attrs); + __iommu_free_buffer(dev, pages, size); return NULL; } @@ -1354,7 +1329,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, } __iommu_remove_mapping(dev, handle, size); - __iommu_free_buffer(dev, pages, size, attrs); + __iommu_free_buffer(dev, pages, size); } static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, diff --git a/trunk/drivers/char/agp/intel-agp.h b/trunk/drivers/char/agp/intel-agp.h index 1042c1b90376..6ec0fff79bc2 100644 --- a/trunk/drivers/char/agp/intel-agp.h +++ b/trunk/drivers/char/agp/intel-agp.h @@ -62,6 +62,12 @@ #define I810_PTE_LOCAL 0x00000002 #define I810_PTE_VALID 0x00000001 #define I830_PTE_SYSTEM_CACHED 0x00000006 +/* GT PTE cache control fields */ +#define GEN6_PTE_UNCACHED 0x00000002 +#define HSW_PTE_UNCACHED 0x00000000 +#define GEN6_PTE_LLC 0x00000004 +#define GEN6_PTE_LLC_MLC 0x00000006 +#define GEN6_PTE_GFDT 0x00000008 #define I810_SMRAM_MISCC 0x70 #define I810_GFX_MEM_WIN_SIZE 0x00010000 @@ -91,6 +97,7 @@ #define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN) #define GFX_FLSH_CNTL 0x2170 /* 915+ */ +#define GFX_FLSH_CNTL_VLV 0x101008 #define I810_DRAM_CTL 0x3000 #define I810_DRAM_ROW_0 0x00000001 @@ -141,6 +148,29 @@ #define INTEL_I7505_AGPCTRL 0x70 #define INTEL_I7505_MCHCFG 0x50 +#define SNB_GMCH_CTRL 0x50 +#define SNB_GMCH_GMS_STOLEN_MASK 0xF8 +#define SNB_GMCH_GMS_STOLEN_32M (1 << 3) +#define SNB_GMCH_GMS_STOLEN_64M (2 << 3) +#define SNB_GMCH_GMS_STOLEN_96M (3 << 3) +#define SNB_GMCH_GMS_STOLEN_128M (4 << 3) +#define SNB_GMCH_GMS_STOLEN_160M (5 << 3) +#define SNB_GMCH_GMS_STOLEN_192M (6 << 3) +#define SNB_GMCH_GMS_STOLEN_224M (7 << 3) +#define SNB_GMCH_GMS_STOLEN_256M (8 << 3) +#define SNB_GMCH_GMS_STOLEN_288M (9 << 3) +#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) +#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) +#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) +#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) +#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) +#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) +#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) +#define SNB_GTT_SIZE_0M (0 << 8) +#define SNB_GTT_SIZE_1M (1 << 8) +#define SNB_GTT_SIZE_2M (2 << 8) +#define SNB_GTT_SIZE_MASK (3 << 8) + /* pci devices ids */ #define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a @@ -189,5 +219,66 @@ #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */ +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */ +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */ +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */ +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152 +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162 +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */ +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156 +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166 +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */ +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A +#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A +#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */ +#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30 +#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */ +#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402 +#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412 +#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422 +#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */ +#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406 +#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416 +#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426 +#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */ +#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a +#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a +#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a +#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A #endif diff --git a/trunk/drivers/char/agp/intel-gtt.c b/trunk/drivers/char/agp/intel-gtt.c index dbd901e94ea6..38390f7c6ab6 100644 --- a/trunk/drivers/char/agp/intel-gtt.c +++ b/trunk/drivers/char/agp/intel-gtt.c @@ -367,6 +367,62 @@ static unsigned int intel_gtt_stolen_size(void) stolen_size = 0; break; } + } else if (INTEL_GTT_GEN == 6) { + /* + * SandyBridge has new memory control reg at 0x50.w + */ + u16 snb_gmch_ctl; + pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); + switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { + case SNB_GMCH_GMS_STOLEN_32M: + stolen_size = MB(32); + break; + case SNB_GMCH_GMS_STOLEN_64M: + stolen_size = MB(64); + break; + case SNB_GMCH_GMS_STOLEN_96M: + stolen_size = MB(96); + break; + case SNB_GMCH_GMS_STOLEN_128M: + stolen_size = MB(128); + break; + case SNB_GMCH_GMS_STOLEN_160M: + stolen_size = MB(160); + break; + case SNB_GMCH_GMS_STOLEN_192M: + stolen_size = MB(192); + break; + case SNB_GMCH_GMS_STOLEN_224M: + stolen_size = MB(224); + break; + case SNB_GMCH_GMS_STOLEN_256M: + stolen_size = MB(256); + break; + case SNB_GMCH_GMS_STOLEN_288M: + stolen_size = MB(288); + break; + case SNB_GMCH_GMS_STOLEN_320M: + stolen_size = MB(320); + break; + case SNB_GMCH_GMS_STOLEN_352M: + stolen_size = MB(352); + break; + case SNB_GMCH_GMS_STOLEN_384M: + stolen_size = MB(384); + break; + case SNB_GMCH_GMS_STOLEN_416M: + stolen_size = MB(416); + break; + case SNB_GMCH_GMS_STOLEN_448M: + stolen_size = MB(448); + break; + case SNB_GMCH_GMS_STOLEN_480M: + stolen_size = MB(480); + break; + case SNB_GMCH_GMS_STOLEN_512M: + stolen_size = MB(512); + break; + } } else { switch (gmch_ctrl & I855_GMCH_GMS_MASK) { case I855_GMCH_GMS_STOLEN_1M: @@ -500,9 +556,29 @@ static unsigned int i965_gtt_total_entries(void) static unsigned int intel_gtt_total_entries(void) { + int size; + if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) return i965_gtt_total_entries(); - else { + else if (INTEL_GTT_GEN == 6) { + u16 snb_gmch_ctl; + + pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); + switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { + default: + case SNB_GTT_SIZE_0M: + printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); + size = MB(0); + break; + case SNB_GTT_SIZE_1M: + size = MB(1); + break; + case SNB_GTT_SIZE_2M: + size = MB(2); + break; + } + return size/4; + } else { /* On previous hardware, the GTT size was just what was * required to map the aperture. */ @@ -702,6 +778,9 @@ bool intel_enable_gtt(void) { u8 __iomem *reg; + if (INTEL_GTT_GEN >= 6) + return true; + if (INTEL_GTT_GEN == 2) { u16 gmch_ctrl; @@ -1070,6 +1149,85 @@ static void i965_write_entry(dma_addr_t addr, writel(addr | pte_flags, intel_private.gtt + entry); } +static bool gen6_check_flags(unsigned int flags) +{ + return true; +} + +static void haswell_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; + unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; + u32 pte_flags; + + if (type_mask == AGP_USER_MEMORY) + pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID; + else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { + pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } else { /* set 'normal'/'cached' to LLC by default */ + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } + + /* gen6 has bit11-4 for physical addr bit39-32 */ + addr |= (addr >> 28) & 0xff0; + writel(addr | pte_flags, intel_private.gtt + entry); +} + +static void gen6_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; + unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; + u32 pte_flags; + + if (type_mask == AGP_USER_MEMORY) + pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; + else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { + pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } else { /* set 'normal'/'cached' to LLC by default */ + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } + + /* gen6 has bit11-4 for physical addr bit39-32 */ + addr |= (addr >> 28) & 0xff0; + writel(addr | pte_flags, intel_private.gtt + entry); +} + +static void valleyview_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; + unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; + u32 pte_flags; + + if (type_mask == AGP_USER_MEMORY) + pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; + else { + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } + + /* gen6 has bit11-4 for physical addr bit39-32 */ + addr |= (addr >> 28) & 0xff0; + writel(addr | pte_flags, intel_private.gtt + entry); + + writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV); +} + +static void gen6_cleanup(void) +{ +} + /* Certain Gen5 chipsets require require idling the GPU before * unmapping anything from the GTT when VT-d is enabled. */ @@ -1091,29 +1249,41 @@ static inline int needs_idle_maps(void) static int i9xx_setup(void) { - u32 reg_addr, gtt_addr; + u32 reg_addr; int size = KB(512); pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr); reg_addr &= 0xfff80000; + if (INTEL_GTT_GEN >= 7) + size = MB(2); + intel_private.registers = ioremap(reg_addr, size); if (!intel_private.registers) return -ENOMEM; - switch (INTEL_GTT_GEN) { - case 3: + if (INTEL_GTT_GEN == 3) { + u32 gtt_addr; + pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, >t_addr); intel_private.gtt_bus_addr = gtt_addr; - break; - case 5: - intel_private.gtt_bus_addr = reg_addr + MB(2); - break; - default: - intel_private.gtt_bus_addr = reg_addr + KB(512); - break; + } else { + u32 gtt_offset; + + switch (INTEL_GTT_GEN) { + case 5: + case 6: + case 7: + gtt_offset = MB(2); + break; + case 4: + default: + gtt_offset = KB(512); + break; + } + intel_private.gtt_bus_addr = reg_addr + gtt_offset; } if (needs_idle_maps()) @@ -1225,6 +1395,32 @@ static const struct intel_gtt_driver ironlake_gtt_driver = { .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, }; +static const struct intel_gtt_driver sandybridge_gtt_driver = { + .gen = 6, + .setup = i9xx_setup, + .cleanup = gen6_cleanup, + .write_entry = gen6_write_entry, + .dma_mask_size = 40, + .check_flags = gen6_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver haswell_gtt_driver = { + .gen = 6, + .setup = i9xx_setup, + .cleanup = gen6_cleanup, + .write_entry = haswell_write_entry, + .dma_mask_size = 40, + .check_flags = gen6_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver valleyview_gtt_driver = { + .gen = 7, + .setup = i9xx_setup, + .cleanup = gen6_cleanup, + .write_entry = valleyview_write_entry, + .dma_mask_size = 40, + .check_flags = gen6_check_flags, +}; /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of * driver and gmch_driver must be non-null, and find_gmch will determine @@ -1305,6 +1501,106 @@ static const struct intel_gtt_driver_description { "HD Graphics", &ironlake_gtt_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, "HD Graphics", &ironlake_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, + "Sandybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, + "Sandybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, + "Sandybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, + "Sandybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, + "Sandybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, + "Sandybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, + "Sandybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG, + "Ivybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG, + "Ivybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG, + "Ivybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG, + "Ivybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG, + "Ivybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG, + "Ivybridge", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG, + "ValleyView", &valleyview_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, { 0, NULL, NULL } }; @@ -1390,7 +1686,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, } EXPORT_SYMBOL(intel_gmch_probe); -struct intel_gtt *intel_gtt_get(void) +const struct intel_gtt *intel_gtt_get(void) { return &intel_private.base; } diff --git a/trunk/drivers/gpu/drm/Kconfig b/trunk/drivers/gpu/drm/Kconfig index 983201b450f1..18321b68b880 100644 --- a/trunk/drivers/gpu/drm/Kconfig +++ b/trunk/drivers/gpu/drm/Kconfig @@ -210,5 +210,3 @@ source "drivers/gpu/drm/mgag200/Kconfig" source "drivers/gpu/drm/cirrus/Kconfig" source "drivers/gpu/drm/shmobile/Kconfig" - -source "drivers/gpu/drm/tegra/Kconfig" diff --git a/trunk/drivers/gpu/drm/Makefile b/trunk/drivers/gpu/drm/Makefile index 6f58c81cfcbc..2ff5cefe9ead 100644 --- a/trunk/drivers/gpu/drm/Makefile +++ b/trunk/drivers/gpu/drm/Makefile @@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ drm_context.o drm_dma.o \ drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ - drm_agpsupport.o drm_scatter.o drm_pci.o \ + drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ drm_crtc.o drm_modes.o drm_edid.o \ drm_info.o drm_debugfs.o drm_encoder_slave.o \ @@ -16,11 +16,10 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o -drm-$(CONFIG_PCI) += ati_pcigart.o drm-usb-y := drm_usb.o -drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o +drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o @@ -49,5 +48,4 @@ obj-$(CONFIG_DRM_GMA500) += gma500/ obj-$(CONFIG_DRM_UDL) += udl/ obj-$(CONFIG_DRM_AST) += ast/ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ -obj-$(CONFIG_DRM_TEGRA) += tegra/ obj-y += i2c/ diff --git a/trunk/drivers/gpu/drm/ast/ast_ttm.c b/trunk/drivers/gpu/drm/ast/ast_ttm.c index 3602731a6112..1a026ac2dfb4 100644 --- a/trunk/drivers/gpu/drm/ast/ast_ttm.c +++ b/trunk/drivers/gpu/drm/ast/ast_ttm.c @@ -186,11 +186,11 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg * static int ast_bo_move(struct ttm_buffer_object *bo, bool evict, bool interruptible, - bool no_wait_gpu, + bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { int r; - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); return r; } @@ -356,7 +356,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align, ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size, ttm_bo_type_device, &astbo->placement, - align >> PAGE_SHIFT, false, NULL, acc_size, + align >> PAGE_SHIFT, 0, false, NULL, acc_size, NULL, ast_bo_ttm_destroy); if (ret) return ret; @@ -383,7 +383,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr) ast_ttm_placement(bo, pl_flag); for (i = 0; i < bo->placement.num_placement; i++) bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); if (ret) return ret; @@ -406,7 +406,7 @@ int ast_bo_unpin(struct ast_bo *bo) for (i = 0; i < bo->placement.num_placement ; i++) bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); if (ret) return ret; @@ -431,7 +431,7 @@ int ast_bo_push_sysram(struct ast_bo *bo) for (i = 0; i < bo->placement.num_placement ; i++) bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); if (ret) { DRM_ERROR("pushing to VRAM failed\n"); return ret; diff --git a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c index dcd1a8c029eb..101e423c8991 100644 --- a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -35,15 +35,12 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { }; -static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev) +static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev) { struct apertures_struct *ap; bool primary = false; ap = alloc_apertures(1); - if (!ap) - return -ENOMEM; - ap->ranges[0].base = pci_resource_start(pdev, 0); ap->ranges[0].size = pci_resource_len(pdev, 0); @@ -52,18 +49,12 @@ static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev) #endif remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary); kfree(ap); - - return 0; } static int __devinit cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - int ret; - - ret = cirrus_kick_out_firmware_fb(pdev); - if (ret) - return ret; + cirrus_kick_out_firmware_fb(pdev); return drm_get_pci_dev(pdev, ent, &driver); } diff --git a/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c b/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c index 1413a26e4905..bc83f835c830 100644 --- a/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -186,11 +186,11 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re static int cirrus_bo_move(struct ttm_buffer_object *bo, bool evict, bool interruptible, - bool no_wait_gpu, + bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { int r; - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); return r; } @@ -361,7 +361,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align, ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size, ttm_bo_type_device, &cirrusbo->placement, - align >> PAGE_SHIFT, false, NULL, acc_size, + align >> PAGE_SHIFT, 0, false, NULL, acc_size, NULL, cirrus_bo_ttm_destroy); if (ret) return ret; @@ -388,7 +388,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr) cirrus_ttm_placement(bo, pl_flag); for (i = 0; i < bo->placement.num_placement; i++) bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); if (ret) return ret; @@ -411,7 +411,7 @@ int cirrus_bo_unpin(struct cirrus_bo *bo) for (i = 0; i < bo->placement.num_placement ; i++) bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); if (ret) return ret; @@ -436,7 +436,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo) for (i = 0; i < bo->placement.num_placement ; i++) bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); if (ret) { DRM_ERROR("pushing to VRAM failed\n"); return ret; diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c index f2d667b8bee2..ef1b22144d37 100644 --- a/trunk/drivers/gpu/drm/drm_crtc.c +++ b/trunk/drivers/gpu/drm/drm_crtc.c @@ -470,8 +470,10 @@ void drm_crtc_cleanup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - kfree(crtc->gamma_store); - crtc->gamma_store = NULL; + if (crtc->gamma_store) { + kfree(crtc->gamma_store); + crtc->gamma_store = NULL; + } drm_mode_object_put(dev, &crtc->base); list_del(&crtc->head); @@ -553,17 +555,16 @@ int drm_connector_init(struct drm_device *dev, INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->modes); connector->edid_blob_ptr = NULL; - connector->status = connector_status_unknown; list_add_tail(&connector->head, &dev->mode_config.connector_list); dev->mode_config.num_connector++; if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL) - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.edid_property, 0); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.dpms_property, 0); out: @@ -2279,21 +2280,13 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) for (i = 0; i < num_planes; i++) { unsigned int width = r->width / (i != 0 ? hsub : 1); - unsigned int height = r->height / (i != 0 ? vsub : 1); - unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i); if (!r->handles[i]) { DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i); return -EINVAL; } - if ((uint64_t) width * cpp > UINT_MAX) - return -ERANGE; - - if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX) - return -ERANGE; - - if (r->pitches[i] < width * cpp) { + if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) { DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i); return -EINVAL; } @@ -2330,11 +2323,6 @@ int drm_mode_addfb2(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - if (r->flags & ~DRM_MODE_FB_INTERLACED) { - DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); - return -EINVAL; - } - if ((config->min_width > r->width) || (r->width > config->max_width)) { DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", r->width, config->min_width, config->max_width); @@ -2928,6 +2916,27 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property) } EXPORT_SYMBOL(drm_property_destroy); +void drm_connector_attach_property(struct drm_connector *connector, + struct drm_property *property, uint64_t init_val) +{ + drm_object_attach_property(&connector->base, property, init_val); +} +EXPORT_SYMBOL(drm_connector_attach_property); + +int drm_connector_property_set_value(struct drm_connector *connector, + struct drm_property *property, uint64_t value) +{ + return drm_object_property_set_value(&connector->base, property, value); +} +EXPORT_SYMBOL(drm_connector_property_set_value); + +int drm_connector_property_get_value(struct drm_connector *connector, + struct drm_property *property, uint64_t *val) +{ + return drm_object_property_get_value(&connector->base, property, val); +} +EXPORT_SYMBOL(drm_connector_property_get_value); + void drm_object_attach_property(struct drm_mode_object *obj, struct drm_property *property, uint64_t init_val) @@ -3164,17 +3173,15 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, /* Delete edid, when there is none. */ if (!edid) { connector->edid_blob_ptr = NULL; - ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0); + ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0); return ret; } size = EDID_LENGTH * (1 + edid->extensions); connector->edid_blob_ptr = drm_property_create_blob(connector->dev, size, edid); - if (!connector->edid_blob_ptr) - return -EINVAL; - ret = drm_object_property_set_value(&connector->base, + ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, connector->edid_blob_ptr->base.id); @@ -3197,9 +3204,6 @@ static bool drm_property_change_is_valid(struct drm_property *property, for (i = 0; i < property->num_values; i++) valid_mask |= (1ULL << property->values[i]); return !(value & ~valid_mask); - } else if (property->flags & DRM_MODE_PROP_BLOB) { - /* Only the driver knows */ - return true; } else { int i; for (i = 0; i < property->num_values; i++) @@ -3241,7 +3245,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj, /* store the property value if successful */ if (!ret) - drm_object_property_set_value(&connector->base, property, value); + drm_connector_property_set_value(connector, property, value); return ret; } @@ -3652,12 +3656,9 @@ void drm_mode_config_reset(struct drm_device *dev) if (encoder->funcs->reset) encoder->funcs->reset(encoder); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - connector->status = connector_status_unknown; - + list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->funcs->reset) connector->funcs->reset(connector); - } } EXPORT_SYMBOL(drm_mode_config_reset); diff --git a/trunk/drivers/gpu/drm/drm_crtc_helper.c b/trunk/drivers/gpu/drm/drm_crtc_helper.c index 7b2d378b2576..1227adf74dbc 100644 --- a/trunk/drivers/gpu/drm/drm_crtc_helper.c +++ b/trunk/drivers/gpu/drm/drm_crtc_helper.c @@ -39,35 +39,6 @@ #include #include -/** - * drm_helper_move_panel_connectors_to_head() - move panels to the front in the - * connector list - * @dev: drm device to operate on - * - * Some userspace presumes that the first connected connector is the main - * display, where it's supposed to display e.g. the login screen. For - * laptops, this should be the main panel. Use this function to sort all - * (eDP/LVDS) panels to the front of the connector list, instead of - * painstakingly trying to initialize them in the right order. - */ -void drm_helper_move_panel_connectors_to_head(struct drm_device *dev) -{ - struct drm_connector *connector, *tmp; - struct list_head panel_list; - - INIT_LIST_HEAD(&panel_list); - - list_for_each_entry_safe(connector, tmp, - &dev->mode_config.connector_list, head) { - if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS || - connector->connector_type == DRM_MODE_CONNECTOR_eDP) - list_move_tail(&connector->head, &panel_list); - } - - list_splice(&panel_list, &dev->mode_config.connector_list); -} -EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head); - static bool drm_kms_helper_poll = true; module_param_named(poll, drm_kms_helper_poll, bool, 0600); @@ -93,21 +64,22 @@ static void drm_mode_validate_flag(struct drm_connector *connector, /** * drm_helper_probe_single_connector_modes - get complete set of display modes - * @connector: connector to probe + * @dev: DRM device * @maxX: max width for modes * @maxY: max height for modes * * LOCKING: * Caller must hold mode config lock. * - * Based on the helper callbacks implemented by @connector try to detect all - * valid modes. Modes will first be added to the connector's probed_modes list, - * then culled (based on validity and the @maxX, @maxY parameters) and put into - * the normal modes list. + * Based on @dev's mode_config layout, scan all the connectors and try to detect + * modes on them. Modes will first be added to the connector's probed_modes + * list, then culled (based on validity and the @maxX, @maxY parameters) and + * put into the normal modes list. * - * Intended to be use as a generic implementation of the ->probe() @connector - * callback for drivers that use the crtc helpers for output mode filtering and - * detection. + * Intended to be used either at bootup time or when major configuration + * changes have occurred. + * + * FIXME: take into account monitor limits * * RETURNS: * Number of modes found on @connector. @@ -137,13 +109,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, connector->funcs->force(connector); } else { connector->status = connector->funcs->detect(connector, true); - } - - /* Re-enable polling in case the global poll config changed. */ - if (drm_kms_helper_poll != dev->mode_config.poll_running) drm_kms_helper_poll_enable(dev); - - dev->mode_config.poll_running = drm_kms_helper_poll; + } if (connector->status == connector_status_disconnected) { DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", @@ -358,24 +325,17 @@ drm_crtc_prepare_encoders(struct drm_device *dev) } /** - * drm_crtc_helper_set_mode - internal helper to set a mode + * drm_crtc_set_mode - set a mode * @crtc: CRTC to program * @mode: mode to use - * @x: horizontal offset into the surface - * @y: vertical offset into the surface - * @old_fb: old framebuffer, for cleanup + * @x: width of mode + * @y: height of mode * * LOCKING: * Caller must hold mode config lock. * * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance - * to fixup or reject the mode prior to trying to set it. This is an internal - * helper that drivers could e.g. use to update properties that require the - * entire output pipe to be disabled and re-enabled in a new configuration. For - * example for changing whether audio is enabled on a hdmi link or for changing - * panel fitter or dither attributes. It is also called by the - * drm_crtc_helper_set_config() helper function to drive the mode setting - * sequence. + * to fixup or reject the mode prior to trying to set it. * * RETURNS: * True if the mode was set successfully, or false otherwise. @@ -531,19 +491,20 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) /** * drm_crtc_helper_set_config - set a new config from userspace - * @set: mode set configuration + * @crtc: CRTC to setup + * @crtc_info: user provided configuration + * @new_mode: new mode to set + * @connector_set: set of connectors for the new config + * @fb: new framebuffer * * LOCKING: * Caller must hold mode config lock. * - * Setup a new configuration, provided by the upper layers (either an ioctl call - * from userspace or internally e.g. from the fbdev suppport code) in @set, and - * enable it. This is the main helper functions for drivers that implement - * kernel mode setting with the crtc helper functions and the assorted - * ->prepare(), ->modeset() and ->commit() helper callbacks. + * Setup a new configuration, provided by the user in @crtc_info, and enable + * it. * * RETURNS: - * Returns 0 on success, -ERRNO on failure. + * Zero. (FIXME) */ int drm_crtc_helper_set_config(struct drm_mode_set *set) { @@ -839,14 +800,12 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) } /** - * drm_helper_connector_dpms() - connector dpms helper implementation - * @connector: affected connector - * @mode: DPMS mode + * drm_helper_connector_dpms + * @connector affected connector + * @mode DPMS mode * - * This is the main helper function provided by the crtc helper framework for - * implementing the DPMS connector attribute. It computes the new desired DPMS - * state for all encoders and crtcs in the output mesh and calls the ->dpms() - * callback provided by the driver appropriately. + * Calls the low-level connector DPMS function, then + * calls appropriate encoder and crtc DPMS functions as well */ void drm_helper_connector_dpms(struct drm_connector *connector, int mode) { @@ -959,15 +918,6 @@ int drm_helper_resume_force_mode(struct drm_device *dev) } EXPORT_SYMBOL(drm_helper_resume_force_mode); -void drm_kms_helper_hotplug_event(struct drm_device *dev) -{ - /* send a uevent + call fbdev */ - drm_sysfs_hotplug_event(dev); - if (dev->mode_config.funcs->output_poll_changed) - dev->mode_config.funcs->output_poll_changed(dev); -} -EXPORT_SYMBOL(drm_kms_helper_hotplug_event); - #define DRM_OUTPUT_POLL_PERIOD (10*HZ) static void output_poll_execute(struct work_struct *work) { @@ -983,22 +933,20 @@ static void output_poll_execute(struct work_struct *work) mutex_lock(&dev->mode_config.mutex); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - /* Ignore forced connectors. */ - if (connector->force) + /* if this is HPD or polled don't check it - + TV out for instance */ + if (!connector->polled) continue; - /* Ignore HPD capable connectors and connectors where we don't - * want any hotplug detection at all for polling. */ - if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD) - continue; - - repoll = true; + else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT)) + repoll = true; old_status = connector->status; /* if we are connected and don't want to poll for disconnect skip it */ if (old_status == connector_status_connected && - !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT)) + !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) && + !(connector->polled & DRM_CONNECTOR_POLL_HPD)) continue; connector->status = connector->funcs->detect(connector, false); @@ -1012,8 +960,12 @@ static void output_poll_execute(struct work_struct *work) mutex_unlock(&dev->mode_config.mutex); - if (changed) - drm_kms_helper_hotplug_event(dev); + if (changed) { + /* send a uevent + call fbdev */ + drm_sysfs_hotplug_event(dev); + if (dev->mode_config.funcs->output_poll_changed) + dev->mode_config.funcs->output_poll_changed(dev); + } if (repoll) schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); @@ -1036,8 +988,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev) return; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | - DRM_CONNECTOR_POLL_DISCONNECT)) + if (connector->polled) poll = true; } @@ -1063,34 +1014,12 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini); void drm_helper_hpd_irq_event(struct drm_device *dev) { - struct drm_connector *connector; - enum drm_connector_status old_status; - bool changed = false; - if (!dev->mode_config.poll_enabled) return; - mutex_lock(&dev->mode_config.mutex); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - - /* Only handle HPD capable connectors. */ - if (!(connector->polled & DRM_CONNECTOR_POLL_HPD)) - continue; - - old_status = connector->status; - - connector->status = connector->funcs->detect(connector, false); - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", - connector->base.id, - drm_get_connector_name(connector), - old_status, connector->status); - if (old_status != connector->status) - changed = true; - } - - mutex_unlock(&dev->mode_config.mutex); - - if (changed) - drm_kms_helper_hotplug_event(dev); + /* kill timer and schedule immediate execution, this doesn't block */ + cancel_delayed_work(&dev->mode_config.output_poll_work); + if (drm_kms_helper_poll) + schedule_delayed_work(&dev->mode_config.output_poll_work, 0); } EXPORT_SYMBOL(drm_helper_hpd_irq_event); diff --git a/trunk/drivers/gpu/drm/drm_dp_helper.c b/trunk/drivers/gpu/drm/drm_dp_i2c_helper.c similarity index 58% rename from trunk/drivers/gpu/drm/drm_dp_helper.c rename to trunk/drivers/gpu/drm/drm_dp_i2c_helper.c index 89e196627160..7f246f212457 100644 --- a/trunk/drivers/gpu/drm/drm_dp_helper.c +++ b/trunk/drivers/gpu/drm/drm_dp_i2c_helper.c @@ -30,15 +30,6 @@ #include #include -/** - * DOC: dp helpers - * - * These functions contain some common logic and helpers at various abstraction - * levels to deal with Display Port sink devices and related things like DP aux - * channel transfers, EDID reading over DP aux channels, decoding certain DPCD - * blocks, ... - */ - /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ static int i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, @@ -46,7 +37,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, { struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; int ret; - + ret = (*algo_data->aux_ch)(adapter, mode, write_byte, read_byte); return ret; @@ -191,6 +182,7 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter) { (void) i2c_algo_dp_aux_address(adapter, 0, false); (void) i2c_algo_dp_aux_stop(adapter, false); + } static int @@ -202,23 +194,11 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter) return 0; } -/** - * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper - * @adapter: i2c adapter to register - * - * This registers an i2c adapater that uses dp aux channel as it's underlaying - * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure - * and store it in the algo_data member of the @adapter argument. This will be - * used by the i2c over dp aux algorithm to drive the hardware. - * - * RETURNS: - * 0 on success, -ERRNO on failure. - */ int i2c_dp_aux_add_bus(struct i2c_adapter *adapter) { int error; - + error = i2c_dp_aux_prepare_bus(adapter); if (error) return error; @@ -226,123 +206,3 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter) return error; } EXPORT_SYMBOL(i2c_dp_aux_add_bus); - -/* Helpers for DP link training */ -static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) -{ - return link_status[r - DP_LANE0_1_STATUS]; -} - -static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], - int lane) -{ - int i = DP_LANE0_1_STATUS + (lane >> 1); - int s = (lane & 1) * 4; - u8 l = dp_link_status(link_status, i); - return (l >> s) & 0xf; -} - -bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], - int lane_count) -{ - u8 lane_align; - u8 lane_status; - int lane; - - lane_align = dp_link_status(link_status, - DP_LANE_ALIGN_STATUS_UPDATED); - if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) - return false; - for (lane = 0; lane < lane_count; lane++) { - lane_status = dp_get_lane_status(link_status, lane); - if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS) - return false; - } - return true; -} -EXPORT_SYMBOL(drm_dp_channel_eq_ok); - -bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], - int lane_count) -{ - int lane; - u8 lane_status; - - for (lane = 0; lane < lane_count; lane++) { - lane_status = dp_get_lane_status(link_status, lane); - if ((lane_status & DP_LANE_CR_DONE) == 0) - return false; - } - return true; -} -EXPORT_SYMBOL(drm_dp_clock_recovery_ok); - -u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], - int lane) -{ - int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); - int s = ((lane & 1) ? - DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : - DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); - u8 l = dp_link_status(link_status, i); - - return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; -} -EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage); - -u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], - int lane) -{ - int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); - int s = ((lane & 1) ? - DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : - DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); - u8 l = dp_link_status(link_status, i); - - return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; -} -EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis); - -void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { - if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) - udelay(100); - else - mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4); -} -EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); - -void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) { - if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) - udelay(400); - else - mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4); -} -EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay); - -u8 drm_dp_link_rate_to_bw_code(int link_rate) -{ - switch (link_rate) { - case 162000: - default: - return DP_LINK_BW_1_62; - case 270000: - return DP_LINK_BW_2_7; - case 540000: - return DP_LINK_BW_5_4; - } -} -EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code); - -int drm_dp_bw_code_to_link_rate(u8 link_bw) -{ - switch (link_bw) { - case DP_LINK_BW_1_62: - default: - return 162000; - case DP_LINK_BW_2_7: - return 270000; - case DP_LINK_BW_5_4: - return 540000; - } -} -EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate); diff --git a/trunk/drivers/gpu/drm/drm_edid.c b/trunk/drivers/gpu/drm/drm_edid.c index 484c36a4b7a5..fadcd44ff196 100644 --- a/trunk/drivers/gpu/drm/drm_edid.c +++ b/trunk/drivers/gpu/drm/drm_edid.c @@ -307,9 +307,12 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, static bool drm_edid_is_zero(u8 *in_edid, int length) { - if (memchr_inv(in_edid, 0, length)) - return false; + int i; + u32 *raw_edid = (u32 *)in_edid; + for (i = 0; i < length / 4; i++) + if (*(raw_edid + i) != 0) + return false; return true; } @@ -1513,26 +1516,6 @@ u8 *drm_find_cea_extension(struct edid *edid) } EXPORT_SYMBOL(drm_find_cea_extension); -/* - * Looks for a CEA mode matching given drm_display_mode. - * Returns its CEA Video ID code, or 0 if not found. - */ -u8 drm_match_cea_mode(struct drm_display_mode *to_match) -{ - struct drm_display_mode *cea_mode; - u8 mode; - - for (mode = 0; mode < drm_num_cea_modes; mode++) { - cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode]; - - if (drm_mode_equal(to_match, cea_mode)) - return mode + 1; - } - return 0; -} -EXPORT_SYMBOL(drm_match_cea_mode); - - static int do_cea_modes (struct drm_connector *connector, u8 *db, u8 len) { @@ -1639,7 +1622,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db) if (len >= 12) connector->audio_latency[1] = db[12]; - DRM_DEBUG_KMS("HDMI: DVI dual %d, " + DRM_LOG_KMS("HDMI: DVI dual %d, " "max TMDS clock %d, " "latency present %d %d, " "video latency %d %d, " diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c index 954d175bd7fa..4d58d7e6af3f 100644 --- a/trunk/drivers/gpu/drm/drm_fb_helper.c +++ b/trunk/drivers/gpu/drm/drm_fb_helper.c @@ -27,8 +27,6 @@ * Dave Airlie * Jesse Barnes */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -45,15 +43,6 @@ MODULE_LICENSE("GPL and additional rights"); static LIST_HEAD(kernel_fb_helper_list); -/** - * DOC: fbdev helpers - * - * The fb helper functions are useful to provide an fbdev on top of a drm kernel - * mode setting driver. They can be used mostly independantely from the crtc - * helper functions used by many drivers to implement the kernel mode setting - * interfaces. - */ - /* simple single crtc case helper function */ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) { @@ -106,16 +95,10 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) if (mode->force) { const char *s; switch (mode->force) { - case DRM_FORCE_OFF: - s = "OFF"; - break; - case DRM_FORCE_ON_DIGITAL: - s = "ON - dig"; - break; + case DRM_FORCE_OFF: s = "OFF"; break; + case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break; default: - case DRM_FORCE_ON: - s = "ON"; - break; + case DRM_FORCE_ON: s = "ON"; break; } DRM_INFO("forcing %s connector %s\n", @@ -282,7 +265,7 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, if (panic_timeout < 0) return 0; - pr_err("panic occurred, switching back to text console\n"); + printk(KERN_ERR "panic occurred, switching back to text console\n"); return drm_fb_helper_force_kernel_mode(); } EXPORT_SYMBOL(drm_fb_helper_panic); @@ -348,7 +331,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) for (j = 0; j < fb_helper->connector_count; j++) { connector = fb_helper->connector_info[j]->connector; connector->funcs->dpms(connector, dpms_mode); - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, dev->mode_config.dpms_property, dpms_mode); } } @@ -450,7 +433,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) if (!list_empty(&fb_helper->kernel_fb_list)) { list_del(&fb_helper->kernel_fb_list); if (list_empty(&kernel_fb_helper_list)) { - pr_info("drm: unregistered panic notifier\n"); + printk(KERN_INFO "drm: unregistered panic notifier\n"); atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); @@ -741,9 +724,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, /* if driver picks 8 or 16 by default use that for both depth/bpp */ - if (preferred_bpp != sizes.surface_bpp) + if (preferred_bpp != sizes.surface_bpp) { sizes.surface_depth = sizes.surface_bpp = preferred_bpp; - + } /* first up get a count of crtcs now in use and new min/maxes width/heights */ for (i = 0; i < fb_helper->connector_count; i++) { struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; @@ -811,16 +794,18 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, info = fb_helper->fbdev; /* set the fb pointer */ - for (i = 0; i < fb_helper->crtc_count; i++) + for (i = 0; i < fb_helper->crtc_count; i++) { fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb; + } if (new_fb) { info->var.pixclock = 0; - if (register_framebuffer(info) < 0) + if (register_framebuffer(info) < 0) { return -EINVAL; + } - dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n", - info->node, info->fix.id); + printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, + info->fix.id); } else { drm_fb_helper_set_par(info); @@ -829,7 +814,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, /* Switch back to kernel console on panic */ /* multi card linked list maybe */ if (list_empty(&kernel_fb_helper_list)) { - dev_info(fb_helper->dev->dev, "registered panic notifier\n"); + printk(KERN_INFO "drm: registered panic notifier\n"); atomic_notifier_chain_register(&panic_notifier_list, &paniced); register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); @@ -1017,11 +1002,11 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict) { bool enable; - if (strict) + if (strict) { enable = connector->status == connector_status_connected; - else + } else { enable = connector->status != connector_status_disconnected; - + } return enable; } @@ -1206,8 +1191,9 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, for (c = 0; c < fb_helper->crtc_count; c++) { crtc = &fb_helper->crtc_info[c]; - if ((encoder->possible_crtcs & (1 << c)) == 0) + if ((encoder->possible_crtcs & (1 << c)) == 0) { continue; + } for (o = 0; o < n; o++) if (best_crtcs[o] == crtc) @@ -1260,11 +1246,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) sizeof(struct drm_display_mode *), GFP_KERNEL); enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), GFP_KERNEL); - if (!crtcs || !modes || !enabled) { - DRM_ERROR("Memory allocation failed\n"); - goto out; - } - drm_enable_connectors(fb_helper, enabled); @@ -1303,7 +1284,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) } } -out: kfree(crtcs); kfree(modes); kfree(enabled); @@ -1311,14 +1291,12 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) /** * drm_helper_initial_config - setup a sane initial connector configuration - * @fb_helper: fb_helper device struct - * @bpp_sel: bpp value to use for the framebuffer configuration + * @dev: DRM device * * LOCKING: - * Called at init time by the driver to set up the @fb_helper initial - * configuration, must take the mode config lock. + * Called at init time, must take mode config lock. * - * Scans the CRTCs and connectors and tries to put together an initial setup. + * Scan the CRTCs and connectors and try to put together an initial setup. * At the moment, this is a cloned configuration across all heads with * a new framebuffer object as the backing store. * @@ -1341,9 +1319,9 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) /* * we shouldn't end up with no modes here. */ - if (count == 0) - dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n"); - + if (count == 0) { + printk(KERN_INFO "No connectors reported connected with modes\n"); + } drm_setup_crtcs(fb_helper); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); @@ -1352,7 +1330,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config); /** * drm_fb_helper_hotplug_event - respond to a hotplug notification by - * probing all the outputs attached to the fb + * probing all the outputs attached to the fb. * @fb_helper: the drm_fb_helper * * LOCKING: diff --git a/trunk/drivers/gpu/drm/drm_hashtab.c b/trunk/drivers/gpu/drm/drm_hashtab.c index 80254547a3f8..c3745c4d46d8 100644 --- a/trunk/drivers/gpu/drm/drm_hashtab.c +++ b/trunk/drivers/gpu/drm/drm_hashtab.c @@ -67,8 +67,10 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) hashed_key = hash_long(key, ht->order); DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); h_list = &ht->table[hashed_key]; - hlist_for_each_entry(entry, list, h_list, head) + hlist_for_each(list, h_list) { + entry = hlist_entry(list, struct drm_hash_item, head); DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); + } } static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, @@ -81,7 +83,8 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, hashed_key = hash_long(key, ht->order); h_list = &ht->table[hashed_key]; - hlist_for_each_entry(entry, list, h_list, head) { + hlist_for_each(list, h_list) { + entry = hlist_entry(list, struct drm_hash_item, head); if (entry->key == key) return list; if (entry->key > key) @@ -90,24 +93,6 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, return NULL; } -static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht, - unsigned long key) -{ - struct drm_hash_item *entry; - struct hlist_head *h_list; - struct hlist_node *list; - unsigned int hashed_key; - - hashed_key = hash_long(key, ht->order); - h_list = &ht->table[hashed_key]; - hlist_for_each_entry_rcu(entry, list, h_list, head) { - if (entry->key == key) - return list; - if (entry->key > key) - break; - } - return NULL; -} int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) { @@ -120,7 +105,8 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) hashed_key = hash_long(key, ht->order); h_list = &ht->table[hashed_key]; parent = NULL; - hlist_for_each_entry(entry, list, h_list, head) { + hlist_for_each(list, h_list) { + entry = hlist_entry(list, struct drm_hash_item, head); if (entry->key == key) return -EINVAL; if (entry->key > key) @@ -128,9 +114,9 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) parent = list; } if (parent) { - hlist_add_after_rcu(parent, &item->head); + hlist_add_after(parent, &item->head); } else { - hlist_add_head_rcu(&item->head, h_list); + hlist_add_head(&item->head, h_list); } return 0; } @@ -170,7 +156,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, { struct hlist_node *list; - list = drm_ht_find_key_rcu(ht, key); + list = drm_ht_find_key(ht, key); if (!list) return -EINVAL; @@ -185,7 +171,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) list = drm_ht_find_key(ht, key); if (list) { - hlist_del_init_rcu(list); + hlist_del_init(list); return 0; } return -EINVAL; @@ -193,7 +179,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) { - hlist_del_init_rcu(&item->head); + hlist_del_init(&item->head); return 0; } EXPORT_SYMBOL(drm_ht_remove_item); diff --git a/trunk/drivers/gpu/drm/drm_ioctl.c b/trunk/drivers/gpu/drm/drm_ioctl.c index e77bd8b57df2..23dd97506f28 100644 --- a/trunk/drivers/gpu/drm/drm_ioctl.c +++ b/trunk/drivers/gpu/drm/drm_ioctl.c @@ -287,9 +287,6 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0; req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0; break; - case DRM_CAP_TIMESTAMP_MONOTONIC: - req->value = drm_timestamp_monotonic; - break; default: return -EINVAL; } diff --git a/trunk/drivers/gpu/drm/drm_irq.c b/trunk/drivers/gpu/drm/drm_irq.c index 19c01ca3cc76..3a3d0ce891b9 100644 --- a/trunk/drivers/gpu/drm/drm_irq.c +++ b/trunk/drivers/gpu/drm/drm_irq.c @@ -106,7 +106,6 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) s64 diff_ns; int vblrc; struct timeval tvblank; - int count = DRM_TIMESTAMP_MAXRETRIES; /* Prevent vblank irq processing while disabling vblank irqs, * so no updates of timestamps or count can happen after we've @@ -132,10 +131,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) do { dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); - } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc); - - if (!count) - vblrc = 0; + } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc)); /* Compute time difference to stored timestamp of last vblank * as updated by last invocation of drm_handle_vblank() in vblank irq. @@ -580,8 +576,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, unsigned flags, struct drm_crtc *refcrtc) { - ktime_t stime, etime, mono_time_offset; - struct timeval tv_etime; + struct timeval stime, raw_time; struct drm_display_mode *mode; int vbl_status, vtotal, vdisplay; int vpos, hpos, i; @@ -630,15 +625,13 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, preempt_disable(); /* Get system timestamp before query. */ - stime = ktime_get(); + do_gettimeofday(&stime); /* Get vertical and horizontal scanout pos. vpos, hpos. */ vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos); /* Get system timestamp after query. */ - etime = ktime_get(); - if (!drm_timestamp_monotonic) - mono_time_offset = ktime_get_monotonic_offset(); + do_gettimeofday(&raw_time); preempt_enable(); @@ -649,7 +642,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, return -EIO; } - duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime); + duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime); /* Accept result with < max_error nsecs timing uncertainty. */ if (duration_ns <= (s64) *max_error) @@ -696,20 +689,14 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, vbl_status |= 0x8; } - if (!drm_timestamp_monotonic) - etime = ktime_sub(etime, mono_time_offset); - - /* save this only for debugging purposes */ - tv_etime = ktime_to_timeval(etime); /* Subtract time delta from raw timestamp to get final * vblank_time timestamp for end of vblank. */ - etime = ktime_sub_ns(etime, delta_ns); - *vblank_time = ktime_to_timeval(etime); + *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns); DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", crtc, (int)vbl_status, hpos, vpos, - (long)tv_etime.tv_sec, (long)tv_etime.tv_usec, + (long)raw_time.tv_sec, (long)raw_time.tv_usec, (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, (int)duration_ns/1000, i); @@ -721,17 +708,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, } EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos); -static struct timeval get_drm_timestamp(void) -{ - ktime_t now; - - now = ktime_get(); - if (!drm_timestamp_monotonic) - now = ktime_sub(now, ktime_get_monotonic_offset()); - - return ktime_to_timeval(now); -} - /** * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent * vblank interval. @@ -769,9 +745,9 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, } /* GPU high precision timestamp query unsupported or failed. - * Return current monotonic/gettimeofday timestamp as best estimate. + * Return gettimeofday timestamp as best estimate. */ - *tvblank = get_drm_timestamp(); + do_gettimeofday(tvblank); return 0; } @@ -826,47 +802,6 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, } EXPORT_SYMBOL(drm_vblank_count_and_time); -static void send_vblank_event(struct drm_device *dev, - struct drm_pending_vblank_event *e, - unsigned long seq, struct timeval *now) -{ - WARN_ON_SMP(!spin_is_locked(&dev->event_lock)); - e->event.sequence = seq; - e->event.tv_sec = now->tv_sec; - e->event.tv_usec = now->tv_usec; - - list_add_tail(&e->base.link, - &e->base.file_priv->event_list); - wake_up_interruptible(&e->base.file_priv->event_wait); - trace_drm_vblank_event_delivered(e->base.pid, e->pipe, - e->event.sequence); -} - -/** - * drm_send_vblank_event - helper to send vblank event after pageflip - * @dev: DRM device - * @crtc: CRTC in question - * @e: the event to send - * - * Updates sequence # and timestamp on event, and sends it to userspace. - * Caller must hold event lock. - */ -void drm_send_vblank_event(struct drm_device *dev, int crtc, - struct drm_pending_vblank_event *e) -{ - struct timeval now; - unsigned int seq; - if (crtc >= 0) { - seq = drm_vblank_count_and_time(dev, crtc, &now); - } else { - seq = 0; - - now = get_drm_timestamp(); - } - send_vblank_event(dev, e, seq, &now); -} -EXPORT_SYMBOL(drm_send_vblank_event); - /** * drm_update_vblank_count - update the master vblank counter * @dev: DRM device @@ -1001,13 +936,6 @@ void drm_vblank_put(struct drm_device *dev, int crtc) } EXPORT_SYMBOL(drm_vblank_put); -/** - * drm_vblank_off - disable vblank events on a CRTC - * @dev: DRM device - * @crtc: CRTC in question - * - * Caller must hold event lock. - */ void drm_vblank_off(struct drm_device *dev, int crtc) { struct drm_pending_vblank_event *e, *t; @@ -1021,19 +949,22 @@ void drm_vblank_off(struct drm_device *dev, int crtc) /* Send any queued vblank events, lest the natives grow disquiet */ seq = drm_vblank_count_and_time(dev, crtc, &now); - - spin_lock(&dev->event_lock); list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { if (e->pipe != crtc) continue; DRM_DEBUG("Sending premature vblank event on disable: \ wanted %d, current %d\n", e->event.sequence, seq); - list_del(&e->base.link); + + e->event.sequence = seq; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; drm_vblank_put(dev, e->pipe); - send_vblank_event(dev, e, seq, &now); + list_move_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + trace_drm_vblank_event_delivered(e->base.pid, e->pipe, + e->event.sequence); } - spin_unlock(&dev->event_lock); spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } @@ -1176,9 +1107,15 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, e->event.sequence = vblwait->request.sequence; if ((seq - vblwait->request.sequence) <= (1 << 23)) { + e->event.sequence = seq; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; drm_vblank_put(dev, pipe); - send_vblank_event(dev, e, seq, &now); + list_add_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); vblwait->reply.sequence = seq; + trace_drm_vblank_event_delivered(current->pid, pipe, + vblwait->request.sequence); } else { /* drm_handle_vblank_events will call drm_vblank_put */ list_add_tail(&e->base.link, &dev->vblank_event_list); @@ -1319,9 +1256,14 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc) DRM_DEBUG("vblank event on %d, current %d\n", e->event.sequence, seq); - list_del(&e->base.link); + e->event.sequence = seq; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; drm_vblank_put(dev, e->pipe); - send_vblank_event(dev, e, seq, &now); + list_move_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + trace_drm_vblank_event_delivered(e->base.pid, e->pipe, + e->event.sequence); } spin_unlock_irqrestore(&dev->event_lock, flags); diff --git a/trunk/drivers/gpu/drm/drm_modes.c b/trunk/drivers/gpu/drm/drm_modes.c index d8da30e90db5..59450f39bf96 100644 --- a/trunk/drivers/gpu/drm/drm_modes.c +++ b/trunk/drivers/gpu/drm/drm_modes.c @@ -46,7 +46,7 @@ * * Describe @mode using DRM_DEBUG. */ -void drm_mode_debug_printmodeline(const struct drm_display_mode *mode) +void drm_mode_debug_printmodeline(struct drm_display_mode *mode) { DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d " "0x%x 0x%x\n", @@ -558,7 +558,7 @@ EXPORT_SYMBOL(drm_mode_list_concat); * RETURNS: * @mode->hdisplay */ -int drm_mode_width(const struct drm_display_mode *mode) +int drm_mode_width(struct drm_display_mode *mode) { return mode->hdisplay; @@ -579,7 +579,7 @@ EXPORT_SYMBOL(drm_mode_width); * RETURNS: * @mode->vdisplay */ -int drm_mode_height(const struct drm_display_mode *mode) +int drm_mode_height(struct drm_display_mode *mode) { return mode->vdisplay; } @@ -768,7 +768,7 @@ EXPORT_SYMBOL(drm_mode_duplicate); * RETURNS: * True if the modes are equal, false otherwise. */ -bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) +bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2) { /* do clock check convert to PICOS so fb modes get matched * the same */ diff --git a/trunk/drivers/gpu/drm/drm_pci.c b/trunk/drivers/gpu/drm/drm_pci.c index 754bc96e10c7..ba33144257e5 100644 --- a/trunk/drivers/gpu/drm/drm_pci.c +++ b/trunk/drivers/gpu/drm/drm_pci.c @@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask) { struct pci_dev *root; int pos; - u32 lnkcap = 0, lnkcap2 = 0; + u32 lnkcap, lnkcap2; *mask = 0; if (!dev->pdev) diff --git a/trunk/drivers/gpu/drm/drm_stub.c b/trunk/drivers/gpu/drm/drm_stub.c index 200e104f1fa0..c236fd27eba6 100644 --- a/trunk/drivers/gpu/drm/drm_stub.c +++ b/trunk/drivers/gpu/drm/drm_stub.c @@ -46,24 +46,16 @@ EXPORT_SYMBOL(drm_vblank_offdelay); unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ EXPORT_SYMBOL(drm_timestamp_precision); -/* - * Default to use monotonic timestamps for wait-for-vblank and page-flip - * complete events. - */ -unsigned int drm_timestamp_monotonic = 1; - MODULE_AUTHOR(CORE_AUTHOR); MODULE_DESCRIPTION(CORE_DESC); MODULE_LICENSE("GPL and additional rights"); MODULE_PARM_DESC(debug, "Enable debug output"); MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); -MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); module_param_named(debug, drm_debug, int, 0600); module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); -module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); struct idr drm_minors_idr; @@ -229,20 +221,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, if (!file_priv->master) return -EINVAL; - if (file_priv->minor->master) - return -EINVAL; - - mutex_lock(&dev->struct_mutex); - file_priv->minor->master = drm_master_get(file_priv->master); - file_priv->is_master = 1; - if (dev->driver->master_set) { - ret = dev->driver->master_set(dev, file_priv, false); - if (unlikely(ret != 0)) { - file_priv->is_master = 0; - drm_master_put(&file_priv->minor->master); + if (!file_priv->minor->master && + file_priv->minor->master != file_priv->master) { + mutex_lock(&dev->struct_mutex); + file_priv->minor->master = drm_master_get(file_priv->master); + file_priv->is_master = 1; + if (dev->driver->master_set) { + ret = dev->driver->master_set(dev, file_priv, false); + if (unlikely(ret != 0)) { + file_priv->is_master = 0; + drm_master_put(&file_priv->minor->master); + } } + mutex_unlock(&dev->struct_mutex); } - mutex_unlock(&dev->struct_mutex); return 0; } @@ -500,7 +492,10 @@ void drm_put_dev(struct drm_device *dev) drm_put_minor(&dev->primary); list_del(&dev->driver_item); - kfree(dev->devname); + if (dev->devname) { + kfree(dev->devname); + dev->devname = NULL; + } kfree(dev); } EXPORT_SYMBOL(drm_put_dev); diff --git a/trunk/drivers/gpu/drm/drm_sysfs.c b/trunk/drivers/gpu/drm/drm_sysfs.c index 02296653a058..05cd8fe062af 100644 --- a/trunk/drivers/gpu/drm/drm_sysfs.c +++ b/trunk/drivers/gpu/drm/drm_sysfs.c @@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device, uint64_t dpms_status; int ret; - ret = drm_object_property_get_value(&connector->base, + ret = drm_connector_property_get_value(connector, dev->mode_config.dpms_property, &dpms_status); if (ret) @@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device, return 0; } - ret = drm_object_property_get_value(&connector->base, prop, &subconnector); + ret = drm_connector_property_get_value(connector, prop, &subconnector); if (ret) return 0; @@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device, return 0; } - ret = drm_object_property_get_value(&connector->base, prop, &subconnector); + ret = drm_connector_property_get_value(connector, prop, &subconnector); if (ret) return 0; diff --git a/trunk/drivers/gpu/drm/exynos/Kconfig b/trunk/drivers/gpu/drm/exynos/Kconfig index 86fb75d3fcad..fc345d4ebb03 100644 --- a/trunk/drivers/gpu/drm/exynos/Kconfig +++ b/trunk/drivers/gpu/drm/exynos/Kconfig @@ -10,12 +10,6 @@ config DRM_EXYNOS Choose this option if you have a Samsung SoC EXYNOS chipset. If M is selected the module will be called exynosdrm. -config DRM_EXYNOS_IOMMU - bool "EXYNOS DRM IOMMU Support" - depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU - help - Choose this option if you want to use IOMMU feature for DRM. - config DRM_EXYNOS_DMABUF bool "EXYNOS DRM DMABUF" depends on DRM_EXYNOS diff --git a/trunk/drivers/gpu/drm/exynos/Makefile b/trunk/drivers/gpu/drm/exynos/Makefile index 26813b8a5056..eb651ca8e2a8 100644 --- a/trunk/drivers/gpu/drm/exynos/Makefile +++ b/trunk/drivers/gpu/drm/exynos/Makefile @@ -8,7 +8,6 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \ exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ exynos_drm_plane.o -exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \ diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c index 72bf97b96ba0..118c117b3226 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c @@ -33,42 +33,73 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, unsigned int flags, struct exynos_drm_gem_buf *buf) { + dma_addr_t start_addr; + unsigned int npages, i = 0; + struct scatterlist *sgl; int ret = 0; - enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS; DRM_DEBUG_KMS("%s\n", __FILE__); + if (IS_NONCONTIG_BUFFER(flags)) { + DRM_DEBUG_KMS("not support allocation type.\n"); + return -EINVAL; + } + if (buf->dma_addr) { DRM_DEBUG_KMS("already allocated.\n"); return 0; } - init_dma_attrs(&buf->dma_attrs); + if (buf->size >= SZ_1M) { + npages = buf->size >> SECTION_SHIFT; + buf->page_size = SECTION_SIZE; + } else if (buf->size >= SZ_64K) { + npages = buf->size >> 16; + buf->page_size = SZ_64K; + } else { + npages = buf->size >> PAGE_SHIFT; + buf->page_size = PAGE_SIZE; + } - if (flags & EXYNOS_BO_NONCONTIG) - attr = DMA_ATTR_WRITE_COMBINE; + buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!buf->sgt) { + DRM_ERROR("failed to allocate sg table.\n"); + return -ENOMEM; + } - dma_set_attr(attr, &buf->dma_attrs); + ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); + if (ret < 0) { + DRM_ERROR("failed to initialize sg table.\n"); + kfree(buf->sgt); + buf->sgt = NULL; + return -ENOMEM; + } - buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size, - &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs); + buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size, + &buf->dma_addr, GFP_KERNEL); if (!buf->kvaddr) { DRM_ERROR("failed to allocate buffer.\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err1; } - buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); - if (!buf->sgt) { - DRM_ERROR("failed to allocate sg table.\n"); + buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); + if (!buf->pages) { + DRM_ERROR("failed to allocate pages.\n"); ret = -ENOMEM; - goto err_free_attrs; + goto err2; } - ret = dma_get_sgtable(dev->dev, buf->sgt, buf->kvaddr, buf->dma_addr, - buf->size); - if (ret < 0) { - DRM_ERROR("failed to get sgtable.\n"); - goto err_free_sgt; + sgl = buf->sgt->sgl; + start_addr = buf->dma_addr; + + while (i < npages) { + buf->pages[i] = phys_to_page(start_addr); + sg_set_page(sgl, buf->pages[i], buf->page_size, 0); + sg_dma_address(sgl) = start_addr; + start_addr += buf->page_size; + sgl = sg_next(sgl); + i++; } DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", @@ -77,14 +108,14 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, buf->size); return ret; - -err_free_sgt: +err2: + dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, + (dma_addr_t)buf->dma_addr); + buf->dma_addr = (dma_addr_t)NULL; +err1: + sg_free_table(buf->sgt); kfree(buf->sgt); buf->sgt = NULL; -err_free_attrs: - dma_free_attrs(dev->dev, buf->size, buf->kvaddr, - (dma_addr_t)buf->dma_addr, &buf->dma_attrs); - buf->dma_addr = (dma_addr_t)NULL; return ret; } @@ -94,6 +125,16 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev, { DRM_DEBUG_KMS("%s.\n", __FILE__); + /* + * release only physically continuous memory and + * non-continuous memory would be released by exynos + * gem framework. + */ + if (IS_NONCONTIG_BUFFER(flags)) { + DRM_DEBUG_KMS("not support allocation type.\n"); + return; + } + if (!buf->dma_addr) { DRM_DEBUG_KMS("dma_addr is invalid.\n"); return; @@ -109,8 +150,11 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev, kfree(buf->sgt); buf->sgt = NULL; - dma_free_attrs(dev->dev, buf->size, buf->kvaddr, - (dma_addr_t)buf->dma_addr, &buf->dma_attrs); + kfree(buf->pages); + buf->pages = NULL; + + dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, + (dma_addr_t)buf->dma_addr); buf->dma_addr = (dma_addr_t)NULL; } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h index 25cf16285033..3388e4eb4ba2 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h @@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, void exynos_drm_fini_buf(struct drm_device *dev, struct exynos_drm_gem_buf *buffer); -/* allocate physical memory region and setup sgt. */ +/* allocate physical memory region and setup sgt and pages. */ int exynos_drm_alloc_buf(struct drm_device *dev, struct exynos_drm_gem_buf *buf, unsigned int flags); -/* release physical memory region, and sgt. */ +/* release physical memory region, sgt and pages. */ void exynos_drm_free_buf(struct drm_device *dev, unsigned int flags, struct exynos_drm_gem_buf *buffer); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 2efa4b031d73..fce245f64c4f 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -236,21 +236,16 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, goto out; } - spin_lock_irq(&dev->event_lock); list_add_tail(&event->base.link, &dev_priv->pageflip_event_list); - spin_unlock_irq(&dev->event_lock); crtc->fb = fb; ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL); if (ret) { crtc->fb = old_fb; - - spin_lock_irq(&dev->event_lock); drm_vblank_put(dev, exynos_crtc->pipe); list_del(&event->base.link); - spin_unlock_irq(&dev->event_lock); goto out; } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index 539da9f4eb97..fae1f2ec886c 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -30,22 +30,26 @@ #include -static struct sg_table *exynos_get_sgt(struct drm_device *drm_dev, - struct exynos_drm_gem_buf *buf) +static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages, + unsigned int page_size) { struct sg_table *sgt = NULL; - int ret; + struct scatterlist *sgl; + int i, ret; sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) goto out; - ret = dma_get_sgtable(drm_dev->dev, sgt, buf->kvaddr, - buf->dma_addr, buf->size); - if (ret < 0) { - DRM_ERROR("failed to get sgtable.\n"); + ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL); + if (ret) goto err_free_sgt; - } + + if (page_size < PAGE_SIZE) + page_size = PAGE_SIZE; + + for_each_sg(sgt->sgl, sgl, nr_pages, i) + sg_set_page(sgl, pages[i], page_size, 0); return sgt; @@ -64,30 +68,32 @@ static struct sg_table * struct drm_device *dev = gem_obj->base.dev; struct exynos_drm_gem_buf *buf; struct sg_table *sgt = NULL; + unsigned int npages; int nents; DRM_DEBUG_PRIME("%s\n", __FILE__); - buf = gem_obj->buffer; - if (!buf) { - DRM_ERROR("buffer is null.\n"); - return sgt; - } - mutex_lock(&dev->struct_mutex); - sgt = exynos_get_sgt(dev, buf); - if (!sgt) + buf = gem_obj->buffer; + + /* there should always be pages allocated. */ + if (!buf->pages) { + DRM_ERROR("pages is null.\n"); goto err_unlock; + } - nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); - if (!nents) { - DRM_ERROR("failed to map sgl with iommu.\n"); - sgt = NULL; + npages = buf->size / buf->page_size; + + sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size); + if (!sgt) { + DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n"); goto err_unlock; } + nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); - DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size); + DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n", + npages, buf->size, buf->page_size); err_unlock: mutex_unlock(&dev->struct_mutex); @@ -99,7 +105,6 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); - sg_free_table(sgt); kfree(sgt); sgt = NULL; @@ -191,6 +196,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, struct scatterlist *sgl; struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_buf *buffer; + struct page *page; int ret; DRM_DEBUG_PRIME("%s\n", __FILE__); @@ -227,27 +233,38 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, goto err_unmap_attach; } + buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL); + if (!buffer->pages) { + DRM_ERROR("failed to allocate pages.\n"); + ret = -ENOMEM; + goto err_free_buffer; + } + exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); if (!exynos_gem_obj) { ret = -ENOMEM; - goto err_free_buffer; + goto err_free_pages; } sgl = sgt->sgl; - buffer->size = dma_buf->size; - buffer->dma_addr = sg_dma_address(sgl); - if (sgt->nents == 1) { + buffer->dma_addr = sg_dma_address(sgt->sgl); + buffer->size = sg_dma_len(sgt->sgl); + /* always physically continuous memory if sgt->nents is 1. */ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; } else { - /* - * this case could be CONTIG or NONCONTIG type but for now - * sets NONCONTIG. - * TODO. we have to find a way that exporter can notify - * the type of its own buffer to importer. - */ + unsigned int i = 0; + + buffer->dma_addr = sg_dma_address(sgl); + while (i < sgt->nents) { + buffer->pages[i] = sg_page(sgl); + buffer->size += sg_dma_len(sgl); + sgl = sg_next(sgl); + i++; + } + exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; } @@ -260,6 +277,9 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, return &exynos_gem_obj->base; +err_free_pages: + kfree(buffer->pages); + buffer->pages = NULL; err_free_buffer: kfree(buffer); buffer = NULL; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c index 2b287d2fc92e..1de7baafddd0 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -40,7 +40,6 @@ #include "exynos_drm_vidi.h" #include "exynos_drm_dmabuf.h" #include "exynos_drm_g2d.h" -#include "exynos_drm_iommu.h" #define DRIVER_NAME "exynos" #define DRIVER_DESC "Samsung SoC DRM" @@ -67,18 +66,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) INIT_LIST_HEAD(&private->pageflip_event_list); dev->dev_private = (void *)private; - /* - * create mapping to manage iommu table and set a pointer to iommu - * mapping structure to iommu_mapping of private data. - * also this iommu_mapping can be used to check if iommu is supported - * or not. - */ - ret = drm_create_iommu_mapping(dev); - if (ret < 0) { - DRM_ERROR("failed to create iommu mapping.\n"); - goto err_crtc; - } - drm_mode_config_init(dev); /* init kms poll for handling hpd */ @@ -93,7 +80,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) for (nr = 0; nr < MAX_CRTC; nr++) { ret = exynos_drm_crtc_create(dev, nr); if (ret) - goto err_release_iommu_mapping; + goto err_crtc; } for (nr = 0; nr < MAX_PLANE; nr++) { @@ -102,12 +89,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) plane = exynos_plane_init(dev, possible_crtcs, false); if (!plane) - goto err_release_iommu_mapping; + goto err_crtc; } ret = drm_vblank_init(dev, MAX_CRTC); if (ret) - goto err_release_iommu_mapping; + goto err_crtc; /* * probe sub drivers such as display controller and hdmi driver, @@ -139,8 +126,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) exynos_drm_device_unregister(dev); err_vblank: drm_vblank_cleanup(dev); -err_release_iommu_mapping: - drm_release_iommu_mapping(dev); err_crtc: drm_mode_config_cleanup(dev); kfree(private); @@ -157,8 +142,6 @@ static int exynos_drm_unload(struct drm_device *dev) drm_vblank_cleanup(dev); drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); - - drm_release_iommu_mapping(dev); kfree(dev->dev_private); dev->dev_private = NULL; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h index 9c9c2dc75828..a34231036496 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -231,7 +231,8 @@ struct exynos_drm_g2d_private { struct device *dev; struct list_head inuse_cmdlist; struct list_head event_list; - struct list_head userptr_list; + struct list_head gem_list; + unsigned int gem_nr; }; struct drm_exynos_file_private { @@ -240,13 +241,6 @@ struct drm_exynos_file_private { /* * Exynos drm private structure. - * - * @da_start: start address to device address space. - * with iommu, device address space starts from this address - * otherwise default one. - * @da_space_size: size of device address space. - * if 0 then default value is used for it. - * @da_space_order: order to device address space. */ struct exynos_drm_private { struct drm_fb_helper *fb_helper; @@ -261,10 +255,6 @@ struct exynos_drm_private { struct drm_crtc *crtc[MAX_CRTC]; struct drm_property *plane_zpos_property; struct drm_property *crtc_mode_property; - - unsigned long da_start; - unsigned long da_space_size; - unsigned long da_space_order; }; /* diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c index e5001dd85afc..f2df06c603f7 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c @@ -234,39 +234,6 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder) exynos_encoder->dpms = DRM_MODE_DPMS_ON; } -void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb) -{ - struct exynos_drm_encoder *exynos_encoder; - struct exynos_drm_overlay_ops *overlay_ops; - struct exynos_drm_manager *manager; - struct drm_device *dev = fb->dev; - struct drm_encoder *encoder; - - /* - * make sure that overlay data are updated to real hardware - * for all encoders. - */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - exynos_encoder = to_exynos_encoder(encoder); - - /* if exynos was disabled, just ignor it. */ - if (exynos_encoder->dpms > DRM_MODE_DPMS_ON) - continue; - - manager = exynos_encoder->manager; - overlay_ops = manager->overlay_ops; - - /* - * wait for vblank interrupt - * - this makes sure that overlay data are updated to - * real hardware. - */ - if (overlay_ops->wait_for_vblank) - overlay_ops->wait_for_vblank(manager->dev); - } -} - - static void exynos_drm_encoder_disable(struct drm_encoder *encoder) { struct drm_plane *plane; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h index 88bb25a2a917..6470d9ddf5a1 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h @@ -46,6 +46,5 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data); -void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb); #endif diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c index 7413f4b729b0..4ef4cd3f9936 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -30,13 +30,10 @@ #include #include #include -#include #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" #include "exynos_drm_gem.h" -#include "exynos_drm_iommu.h" -#include "exynos_drm_encoder.h" #define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) @@ -53,32 +50,6 @@ struct exynos_drm_fb { struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER]; }; -static int check_fb_gem_memory_type(struct drm_device *drm_dev, - struct exynos_drm_gem_obj *exynos_gem_obj) -{ - unsigned int flags; - - /* - * if exynos drm driver supports iommu then framebuffer can use - * all the buffer types. - */ - if (is_drm_iommu_supported(drm_dev)) - return 0; - - flags = exynos_gem_obj->flags; - - /* - * without iommu support, not support physically non-continuous memory - * for framebuffer. - */ - if (IS_NONCONTIG_BUFFER(flags)) { - DRM_ERROR("cannot use this gem memory type for fb.\n"); - return -EINVAL; - } - - return 0; -} - static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) { struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); @@ -86,9 +57,6 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) DRM_DEBUG_KMS("%s\n", __FILE__); - /* make sure that overlay data are updated before relesing fb. */ - exynos_drm_encoder_complete_scanout(fb); - drm_framebuffer_cleanup(fb); for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { @@ -160,25 +128,14 @@ exynos_drm_framebuffer_init(struct drm_device *dev, struct drm_gem_object *obj) { struct exynos_drm_fb *exynos_fb; - struct exynos_drm_gem_obj *exynos_gem_obj; int ret; - exynos_gem_obj = to_exynos_gem_obj(obj); - - ret = check_fb_gem_memory_type(dev, exynos_gem_obj); - if (ret < 0) { - DRM_ERROR("cannot use this gem memory type for fb.\n"); - return ERR_PTR(-EINVAL); - } - exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); if (!exynos_fb) { DRM_ERROR("failed to allocate exynos drm framebuffer\n"); return ERR_PTR(-ENOMEM); } - exynos_fb->exynos_gem_obj[0] = exynos_gem_obj; - ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); if (ret) { DRM_ERROR("failed to initialize framebuffer\n"); @@ -186,6 +143,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev, } drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); + exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj); return &exynos_fb->fb; } @@ -256,9 +214,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); for (i = 1; i < exynos_fb->buf_cnt; i++) { - struct exynos_drm_gem_obj *exynos_gem_obj; - int ret; - obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]); if (!obj) { @@ -267,15 +222,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, return ERR_PTR(-ENOENT); } - exynos_gem_obj = to_exynos_gem_obj(obj); - - ret = check_fb_gem_memory_type(dev, exynos_gem_obj); - if (ret < 0) { - DRM_ERROR("cannot use this gem memory type for fb.\n"); - exynos_drm_fb_destroy(fb); - return ERR_PTR(ret); - } - exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index a2232792e0c0..e7466c4414cb 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -46,38 +46,8 @@ struct exynos_drm_fbdev { struct exynos_drm_gem_obj *exynos_gem_obj; }; -static int exynos_drm_fb_mmap(struct fb_info *info, - struct vm_area_struct *vma) -{ - struct drm_fb_helper *helper = info->par; - struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper); - struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj; - struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer; - unsigned long vm_size; - int ret; - - DRM_DEBUG_KMS("%s\n", __func__); - - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - - vm_size = vma->vm_end - vma->vm_start; - - if (vm_size > buffer->size) - return -EINVAL; - - ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->kvaddr, - buffer->dma_addr, buffer->size, &buffer->dma_attrs); - if (ret < 0) { - DRM_ERROR("failed to mmap.\n"); - return ret; - } - - return 0; -} - static struct fb_ops exynos_drm_fb_ops = { .owner = THIS_MODULE, - .fb_mmap = exynos_drm_fb_mmap, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, @@ -117,8 +87,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; fbi->screen_base = buffer->kvaddr + offset; - fbi->fix.smem_start = (unsigned long) - (page_to_phys(sg_page(buffer->sgt->sgl)) + offset); + fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) + + offset); fbi->screen_size = size; fbi->fix.smem_len = size; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 00bd266a31bb..e08478f19f1a 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -25,7 +25,6 @@ #include "exynos_drm_drv.h" #include "exynos_drm_fbdev.h" #include "exynos_drm_crtc.h" -#include "exynos_drm_iommu.h" /* * FIMD is stand for Fully Interactive Mobile Display and @@ -624,6 +623,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc) struct drm_pending_vblank_event *e, *t; struct timeval now; unsigned long flags; + bool is_checked = false; spin_lock_irqsave(&drm_dev->event_lock, flags); @@ -633,6 +633,8 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc) if (crtc != e->pipe) continue; + is_checked = true; + do_gettimeofday(&now); e->event.sequence = 0; e->event.tv_sec = now.tv_sec; @@ -640,7 +642,22 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc) list_move_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); - drm_vblank_put(drm_dev, crtc); + } + + if (is_checked) { + /* + * call drm_vblank_put only in case that drm_vblank_get was + * called. + */ + if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0) + drm_vblank_put(drm_dev, crtc); + + /* + * don't off vblank if vblank_disable_allowed is 1, + * because vblank would be off by timer handler. + */ + if (!drm_dev->vblank_disable_allowed) + drm_vblank_off(drm_dev, crtc); } spin_unlock_irqrestore(&drm_dev->event_lock, flags); @@ -692,10 +709,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev) */ drm_dev->vblank_disable_allowed = 1; - /* attach this sub driver to iommu mapping if supported. */ - if (is_drm_iommu_supported(drm_dev)) - drm_iommu_attach_device(drm_dev, dev); - return 0; } @@ -703,9 +716,7 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev) { DRM_DEBUG_KMS("%s\n", __FILE__); - /* detach this sub driver from iommu mapping if supported. */ - if (is_drm_iommu_supported(drm_dev)) - drm_iommu_detach_device(drm_dev, dev); + /* TODO. */ } static int fimd_calc_clkdiv(struct fimd_context *ctx, @@ -846,16 +857,18 @@ static int __devinit fimd_probe(struct platform_device *pdev) if (!ctx) return -ENOMEM; - ctx->bus_clk = devm_clk_get(dev, "fimd"); + ctx->bus_clk = clk_get(dev, "fimd"); if (IS_ERR(ctx->bus_clk)) { dev_err(dev, "failed to get bus clock\n"); - return PTR_ERR(ctx->bus_clk); + ret = PTR_ERR(ctx->bus_clk); + goto err_clk_get; } - ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd"); + ctx->lcd_clk = clk_get(dev, "sclk_fimd"); if (IS_ERR(ctx->lcd_clk)) { dev_err(dev, "failed to get lcd clock\n"); - return PTR_ERR(ctx->lcd_clk); + ret = PTR_ERR(ctx->lcd_clk); + goto err_bus_clk; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -863,13 +876,14 @@ static int __devinit fimd_probe(struct platform_device *pdev) ctx->regs = devm_request_and_ioremap(&pdev->dev, res); if (!ctx->regs) { dev_err(dev, "failed to map registers\n"); - return -ENXIO; + ret = -ENXIO; + goto err_clk; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(dev, "irq request failed.\n"); - return -ENXIO; + goto err_clk; } ctx->irq = res->start; @@ -878,7 +892,7 @@ static int __devinit fimd_probe(struct platform_device *pdev) 0, "drm_fimd", ctx); if (ret) { dev_err(dev, "irq request failed.\n"); - return ret; + goto err_clk; } ctx->vidcon0 = pdata->vidcon0; @@ -912,6 +926,17 @@ static int __devinit fimd_probe(struct platform_device *pdev) exynos_drm_subdrv_register(subdrv); return 0; + +err_clk: + clk_disable(ctx->lcd_clk); + clk_put(ctx->lcd_clk); + +err_bus_clk: + clk_disable(ctx->bus_clk); + clk_put(ctx->bus_clk); + +err_clk_get: + return ret; } static int __devexit fimd_remove(struct platform_device *pdev) @@ -935,6 +960,9 @@ static int __devexit fimd_remove(struct platform_device *pdev) out: pm_runtime_disable(dev); + clk_put(ctx->lcd_clk); + clk_put(ctx->bus_clk); + return 0; } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 6ffa0763c078..f7aab24ea46c 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -17,14 +17,11 @@ #include #include #include -#include -#include #include #include #include "exynos_drm_drv.h" #include "exynos_drm_gem.h" -#include "exynos_drm_iommu.h" #define G2D_HW_MAJOR_VER 4 #define G2D_HW_MINOR_VER 1 @@ -95,21 +92,11 @@ #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) -#define MAX_BUF_ADDR_NR 6 - -/* maximum buffer pool size of userptr is 64MB as default */ -#define MAX_POOL (64 * 1024 * 1024) - -enum { - BUF_TYPE_GEM = 1, - BUF_TYPE_USERPTR, -}; - /* cmdlist data structure */ struct g2d_cmdlist { - u32 head; - unsigned long data[G2D_CMDLIST_DATA_NUM]; - u32 last; /* last data offset */ + u32 head; + u32 data[G2D_CMDLIST_DATA_NUM]; + u32 last; /* last data offset */ }; struct drm_exynos_pending_g2d_event { @@ -117,26 +104,15 @@ struct drm_exynos_pending_g2d_event { struct drm_exynos_g2d_event event; }; -struct g2d_cmdlist_userptr { +struct g2d_gem_node { struct list_head list; - dma_addr_t dma_addr; - unsigned long userptr; - unsigned long size; - struct page **pages; - unsigned int npages; - struct sg_table *sgt; - struct vm_area_struct *vma; - atomic_t refcount; - bool in_pool; - bool out_of_list; + unsigned int handle; }; struct g2d_cmdlist_node { struct list_head list; struct g2d_cmdlist *cmdlist; - unsigned int map_nr; - unsigned long handles[MAX_BUF_ADDR_NR]; - unsigned int obj_type[MAX_BUF_ADDR_NR]; + unsigned int gem_nr; dma_addr_t dma_addr; struct drm_exynos_pending_g2d_event *event; @@ -146,7 +122,6 @@ struct g2d_runqueue_node { struct list_head list; struct list_head run_cmdlist; struct list_head event_list; - struct drm_file *filp; pid_t pid; struct completion complete; int async; @@ -168,33 +143,23 @@ struct g2d_data { struct mutex cmdlist_mutex; dma_addr_t cmdlist_pool; void *cmdlist_pool_virt; - struct dma_attrs cmdlist_dma_attrs; /* runqueue*/ struct g2d_runqueue_node *runqueue_node; struct list_head runqueue; struct mutex runqueue_mutex; struct kmem_cache *runqueue_slab; - - unsigned long current_pool; - unsigned long max_pool; }; static int g2d_init_cmdlist(struct g2d_data *g2d) { struct device *dev = g2d->dev; struct g2d_cmdlist_node *node = g2d->cmdlist_node; - struct exynos_drm_subdrv *subdrv = &g2d->subdrv; int nr; int ret; - init_dma_attrs(&g2d->cmdlist_dma_attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); - - g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev, - G2D_CMDLIST_POOL_SIZE, - &g2d->cmdlist_pool, GFP_KERNEL, - &g2d->cmdlist_dma_attrs); + g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE, + &g2d->cmdlist_pool, GFP_KERNEL); if (!g2d->cmdlist_pool_virt) { dev_err(dev, "failed to allocate dma memory\n"); return -ENOMEM; @@ -219,20 +184,18 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) return 0; err: - dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, - g2d->cmdlist_pool_virt, - g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); + dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, + g2d->cmdlist_pool); return ret; } static void g2d_fini_cmdlist(struct g2d_data *g2d) { - struct exynos_drm_subdrv *subdrv = &g2d->subdrv; + struct device *dev = g2d->dev; kfree(g2d->cmdlist_node); - dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, - g2d->cmdlist_pool_virt, - g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); + dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, + g2d->cmdlist_pool); } static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) @@ -282,300 +245,62 @@ static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv, list_add_tail(&node->event->base.link, &g2d_priv->event_list); } -static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev, - unsigned long obj, - bool force) -{ - struct g2d_cmdlist_userptr *g2d_userptr = - (struct g2d_cmdlist_userptr *)obj; - - if (!obj) - return; - - if (force) - goto out; - - atomic_dec(&g2d_userptr->refcount); - - if (atomic_read(&g2d_userptr->refcount) > 0) - return; - - if (g2d_userptr->in_pool) - return; - -out: - exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, - DMA_BIDIRECTIONAL); - - exynos_gem_put_pages_to_userptr(g2d_userptr->pages, - g2d_userptr->npages, - g2d_userptr->vma); - - if (!g2d_userptr->out_of_list) - list_del_init(&g2d_userptr->list); - - sg_free_table(g2d_userptr->sgt); - kfree(g2d_userptr->sgt); - g2d_userptr->sgt = NULL; - - kfree(g2d_userptr->pages); - g2d_userptr->pages = NULL; - kfree(g2d_userptr); - g2d_userptr = NULL; -} - -dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, - unsigned long userptr, - unsigned long size, - struct drm_file *filp, - unsigned long *obj) -{ - struct drm_exynos_file_private *file_priv = filp->driver_priv; - struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; - struct g2d_cmdlist_userptr *g2d_userptr; - struct g2d_data *g2d; - struct page **pages; - struct sg_table *sgt; - struct vm_area_struct *vma; - unsigned long start, end; - unsigned int npages, offset; - int ret; - - if (!size) { - DRM_ERROR("invalid userptr size.\n"); - return ERR_PTR(-EINVAL); - } - - g2d = dev_get_drvdata(g2d_priv->dev); - - /* check if userptr already exists in userptr_list. */ - list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) { - if (g2d_userptr->userptr == userptr) { - /* - * also check size because there could be same address - * and different size. - */ - if (g2d_userptr->size == size) { - atomic_inc(&g2d_userptr->refcount); - *obj = (unsigned long)g2d_userptr; - - return &g2d_userptr->dma_addr; - } - - /* - * at this moment, maybe g2d dma is accessing this - * g2d_userptr memory region so just remove this - * g2d_userptr object from userptr_list not to be - * referred again and also except it the userptr - * pool to be released after the dma access completion. - */ - g2d_userptr->out_of_list = true; - g2d_userptr->in_pool = false; - list_del_init(&g2d_userptr->list); - - break; - } - } - - g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL); - if (!g2d_userptr) { - DRM_ERROR("failed to allocate g2d_userptr.\n"); - return ERR_PTR(-ENOMEM); - } - - atomic_set(&g2d_userptr->refcount, 1); - - start = userptr & PAGE_MASK; - offset = userptr & ~PAGE_MASK; - end = PAGE_ALIGN(userptr + size); - npages = (end - start) >> PAGE_SHIFT; - g2d_userptr->npages = npages; - - pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL); - if (!pages) { - DRM_ERROR("failed to allocate pages.\n"); - kfree(g2d_userptr); - return ERR_PTR(-ENOMEM); - } - - vma = find_vma(current->mm, userptr); - if (!vma) { - DRM_ERROR("failed to get vm region.\n"); - ret = -EFAULT; - goto err_free_pages; - } - - if (vma->vm_end < userptr + size) { - DRM_ERROR("vma is too small.\n"); - ret = -EFAULT; - goto err_free_pages; - } - - g2d_userptr->vma = exynos_gem_get_vma(vma); - if (!g2d_userptr->vma) { - DRM_ERROR("failed to copy vma.\n"); - ret = -ENOMEM; - goto err_free_pages; - } - - g2d_userptr->size = size; - - ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK, - npages, pages, vma); - if (ret < 0) { - DRM_ERROR("failed to get user pages from userptr.\n"); - goto err_put_vma; - } - - g2d_userptr->pages = pages; - - sgt = kzalloc(sizeof *sgt, GFP_KERNEL); - if (!sgt) { - DRM_ERROR("failed to allocate sg table.\n"); - ret = -ENOMEM; - goto err_free_userptr; - } - - ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, - size, GFP_KERNEL); - if (ret < 0) { - DRM_ERROR("failed to get sgt from pages.\n"); - goto err_free_sgt; - } - - g2d_userptr->sgt = sgt; - - ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt, - DMA_BIDIRECTIONAL); - if (ret < 0) { - DRM_ERROR("failed to map sgt with dma region.\n"); - goto err_free_sgt; - } - - g2d_userptr->dma_addr = sgt->sgl[0].dma_address; - g2d_userptr->userptr = userptr; - - list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list); - - if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) { - g2d->current_pool += npages << PAGE_SHIFT; - g2d_userptr->in_pool = true; - } - - *obj = (unsigned long)g2d_userptr; - - return &g2d_userptr->dma_addr; - -err_free_sgt: - sg_free_table(sgt); - kfree(sgt); - sgt = NULL; - -err_free_userptr: - exynos_gem_put_pages_to_userptr(g2d_userptr->pages, - g2d_userptr->npages, - g2d_userptr->vma); - -err_put_vma: - exynos_gem_put_vma(g2d_userptr->vma); - -err_free_pages: - kfree(pages); - kfree(g2d_userptr); - pages = NULL; - g2d_userptr = NULL; - - return ERR_PTR(ret); -} - -static void g2d_userptr_free_all(struct drm_device *drm_dev, - struct g2d_data *g2d, - struct drm_file *filp) +static int g2d_get_cmdlist_gem(struct drm_device *drm_dev, + struct drm_file *file, + struct g2d_cmdlist_node *node) { - struct drm_exynos_file_private *file_priv = filp->driver_priv; + struct drm_exynos_file_private *file_priv = file->driver_priv; struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; - struct g2d_cmdlist_userptr *g2d_userptr, *n; - - list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list) - if (g2d_userptr->in_pool) - g2d_userptr_put_dma_addr(drm_dev, - (unsigned long)g2d_userptr, - true); - - g2d->current_pool = 0; -} - -static int g2d_map_cmdlist_gem(struct g2d_data *g2d, - struct g2d_cmdlist_node *node, - struct drm_device *drm_dev, - struct drm_file *file) -{ struct g2d_cmdlist *cmdlist = node->cmdlist; + dma_addr_t *addr; int offset; int i; - for (i = 0; i < node->map_nr; i++) { - unsigned long handle; - dma_addr_t *addr; + for (i = 0; i < node->gem_nr; i++) { + struct g2d_gem_node *gem_node; + + gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL); + if (!gem_node) { + dev_err(g2d_priv->dev, "failed to allocate gem node\n"); + return -ENOMEM; + } offset = cmdlist->last - (i * 2 + 1); - handle = cmdlist->data[offset]; - - if (node->obj_type[i] == BUF_TYPE_GEM) { - addr = exynos_drm_gem_get_dma_addr(drm_dev, handle, - file); - if (IS_ERR(addr)) { - node->map_nr = i; - return -EFAULT; - } - } else { - struct drm_exynos_g2d_userptr g2d_userptr; - - if (copy_from_user(&g2d_userptr, (void __user *)handle, - sizeof(struct drm_exynos_g2d_userptr))) { - node->map_nr = i; - return -EFAULT; - } - - addr = g2d_userptr_get_dma_addr(drm_dev, - g2d_userptr.userptr, - g2d_userptr.size, - file, - &handle); - if (IS_ERR(addr)) { - node->map_nr = i; - return -EFAULT; - } + gem_node->handle = cmdlist->data[offset]; + + addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle, + file); + if (IS_ERR(addr)) { + node->gem_nr = i; + kfree(gem_node); + return PTR_ERR(addr); } cmdlist->data[offset] = *addr; - node->handles[i] = handle; + list_add_tail(&gem_node->list, &g2d_priv->gem_list); + g2d_priv->gem_nr++; } return 0; } -static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, - struct g2d_cmdlist_node *node, - struct drm_file *filp) +static void g2d_put_cmdlist_gem(struct drm_device *drm_dev, + struct drm_file *file, + unsigned int nr) { - struct exynos_drm_subdrv *subdrv = &g2d->subdrv; - int i; - - for (i = 0; i < node->map_nr; i++) { - unsigned long handle = node->handles[i]; + struct drm_exynos_file_private *file_priv = file->driver_priv; + struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; + struct g2d_gem_node *node, *n; - if (node->obj_type[i] == BUF_TYPE_GEM) - exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, - filp); - else - g2d_userptr_put_dma_addr(subdrv->drm_dev, handle, - false); + list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) { + if (!nr) + break; - node->handles[i] = 0; + exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file); + list_del_init(&node->list); + kfree(node); + nr--; } - - node->map_nr = 0; } static void g2d_dma_start(struct g2d_data *g2d, @@ -612,18 +337,10 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d) static void g2d_free_runqueue_node(struct g2d_data *g2d, struct g2d_runqueue_node *runqueue_node) { - struct g2d_cmdlist_node *node; - if (!runqueue_node) return; mutex_lock(&g2d->cmdlist_mutex); - /* - * commands in run_cmdlist have been completed so unmap all gem - * objects in each command node so that they are unreferenced. - */ - list_for_each_entry(node, &runqueue_node->run_cmdlist, list) - g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp); list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist); mutex_unlock(&g2d->cmdlist_mutex); @@ -713,28 +430,15 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } -static int g2d_check_reg_offset(struct device *dev, - struct g2d_cmdlist_node *node, +static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist, int nr, bool for_addr) { - struct g2d_cmdlist *cmdlist = node->cmdlist; int reg_offset; int index; int i; for (i = 0; i < nr; i++) { index = cmdlist->last - 2 * (i + 1); - - if (for_addr) { - /* check userptr buffer type. */ - reg_offset = (cmdlist->data[index] & - ~0x7fffffff) >> 31; - if (reg_offset) { - node->obj_type[i] = BUF_TYPE_USERPTR; - cmdlist->data[index] &= ~G2D_BUF_USERPTR; - } - } - reg_offset = cmdlist->data[index] & ~0xfffff000; if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) @@ -751,9 +455,6 @@ static int g2d_check_reg_offset(struct device *dev, case G2D_MSK_BASE_ADDR: if (!for_addr) goto err; - - if (node->obj_type[i] != BUF_TYPE_USERPTR) - node->obj_type[i] = BUF_TYPE_GEM; break; default: if (for_addr) @@ -765,7 +466,7 @@ static int g2d_check_reg_offset(struct device *dev, return 0; err: - dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]); + dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]); return -EINVAL; } @@ -865,7 +566,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, } /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ - size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2; + size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2; if (size > G2D_CMDLIST_DATA_NUM) { dev_err(dev, "cmdlist size is too big\n"); ret = -EINVAL; @@ -882,29 +583,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, } cmdlist->last += req->cmd_nr * 2; - ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false); + ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false); if (ret < 0) goto err_free_event; - node->map_nr = req->cmd_buf_nr; - if (req->cmd_buf_nr) { - struct drm_exynos_g2d_cmd *cmd_buf; + node->gem_nr = req->cmd_gem_nr; + if (req->cmd_gem_nr) { + struct drm_exynos_g2d_cmd *cmd_gem; - cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf; + cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem; if (copy_from_user(cmdlist->data + cmdlist->last, - (void __user *)cmd_buf, - sizeof(*cmd_buf) * req->cmd_buf_nr)) { + (void __user *)cmd_gem, + sizeof(*cmd_gem) * req->cmd_gem_nr)) { ret = -EFAULT; goto err_free_event; } - cmdlist->last += req->cmd_buf_nr * 2; + cmdlist->last += req->cmd_gem_nr * 2; - ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true); + ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true); if (ret < 0) goto err_free_event; - ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file); + ret = g2d_get_cmdlist_gem(drm_dev, file, node); if (ret < 0) goto err_unmap; } @@ -923,7 +624,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, return 0; err_unmap: - g2d_unmap_cmdlist_gem(g2d, node, file); + g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr); err_free_event: if (node->event) { spin_lock_irqsave(&drm_dev->event_lock, flags); @@ -979,7 +680,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, mutex_lock(&g2d->runqueue_mutex); runqueue_node->pid = current->pid; - runqueue_node->filp = file; list_add_tail(&runqueue_node->list, &g2d->runqueue); if (!g2d->runqueue_node) g2d_exec_runqueue(g2d); @@ -996,43 +696,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, } EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); -static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) -{ - struct g2d_data *g2d; - int ret; - - g2d = dev_get_drvdata(dev); - if (!g2d) - return -EFAULT; - - /* allocate dma-aware cmdlist buffer. */ - ret = g2d_init_cmdlist(g2d); - if (ret < 0) { - dev_err(dev, "cmdlist init failed\n"); - return ret; - } - - if (!is_drm_iommu_supported(drm_dev)) - return 0; - - ret = drm_iommu_attach_device(drm_dev, dev); - if (ret < 0) { - dev_err(dev, "failed to enable iommu.\n"); - g2d_fini_cmdlist(g2d); - } - - return ret; - -} - -static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev) -{ - if (!is_drm_iommu_supported(drm_dev)) - return; - - drm_iommu_detach_device(drm_dev, dev); -} - static int g2d_open(struct drm_device *drm_dev, struct device *dev, struct drm_file *file) { @@ -1050,7 +713,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev, INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); INIT_LIST_HEAD(&g2d_priv->event_list); - INIT_LIST_HEAD(&g2d_priv->userptr_list); + INIT_LIST_HEAD(&g2d_priv->gem_list); return 0; } @@ -1071,21 +734,11 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev, return; mutex_lock(&g2d->cmdlist_mutex); - list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) { - /* - * unmap all gem objects not completed. - * - * P.S. if current process was terminated forcely then - * there may be some commands in inuse_cmdlist so unmap - * them. - */ - g2d_unmap_cmdlist_gem(g2d, node, file); + list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) list_move_tail(&node->list, &g2d->free_cmdlist); - } mutex_unlock(&g2d->cmdlist_mutex); - /* release all g2d_userptr in pool. */ - g2d_userptr_free_all(drm_dev, g2d, file); + g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr); kfree(file_priv->g2d_priv); } @@ -1125,11 +778,15 @@ static int __devinit g2d_probe(struct platform_device *pdev) mutex_init(&g2d->cmdlist_mutex); mutex_init(&g2d->runqueue_mutex); - g2d->gate_clk = devm_clk_get(dev, "fimg2d"); + ret = g2d_init_cmdlist(g2d); + if (ret < 0) + goto err_destroy_workqueue; + + g2d->gate_clk = clk_get(dev, "fimg2d"); if (IS_ERR(g2d->gate_clk)) { dev_err(dev, "failed to get gate clock\n"); ret = PTR_ERR(g2d->gate_clk); - goto err_destroy_workqueue; + goto err_fini_cmdlist; } pm_runtime_enable(dev); @@ -1157,14 +814,10 @@ static int __devinit g2d_probe(struct platform_device *pdev) goto err_put_clk; } - g2d->max_pool = MAX_POOL; - platform_set_drvdata(pdev, g2d); subdrv = &g2d->subdrv; subdrv->dev = dev; - subdrv->probe = g2d_subdrv_probe; - subdrv->remove = g2d_subdrv_remove; subdrv->open = g2d_open; subdrv->close = g2d_close; @@ -1181,6 +834,9 @@ static int __devinit g2d_probe(struct platform_device *pdev) err_put_clk: pm_runtime_disable(dev); + clk_put(g2d->gate_clk); +err_fini_cmdlist: + g2d_fini_cmdlist(g2d); err_destroy_workqueue: destroy_workqueue(g2d->g2d_workq); err_destroy_slab: @@ -1201,6 +857,7 @@ static int __devexit g2d_remove(struct platform_device *pdev) } pm_runtime_disable(&pdev->dev); + clk_put(g2d->gate_clk); g2d_fini_cmdlist(g2d); destroy_workqueue(g2d->g2d_workq); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c index 99227246ce82..d2545560664f 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -83,40 +83,157 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj, static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) { - /* TODO */ - + if (!IS_NONCONTIG_BUFFER(flags)) { + if (size >= SZ_1M) + return roundup(size, SECTION_SIZE); + else if (size >= SZ_64K) + return roundup(size, SZ_64K); + else + goto out; + } +out: return roundup(size, PAGE_SIZE); } -static int exynos_drm_gem_map_buf(struct drm_gem_object *obj, +struct page **exynos_gem_get_pages(struct drm_gem_object *obj, + gfp_t gfpmask) +{ + struct page *p, **pages; + int i, npages; + + npages = obj->size >> PAGE_SHIFT; + + pages = drm_malloc_ab(npages, sizeof(struct page *)); + if (pages == NULL) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < npages; i++) { + p = alloc_page(gfpmask); + if (IS_ERR(p)) + goto fail; + pages[i] = p; + } + + return pages; + +fail: + while (--i) + __free_page(pages[i]); + + drm_free_large(pages); + return ERR_CAST(p); +} + +static void exynos_gem_put_pages(struct drm_gem_object *obj, + struct page **pages) +{ + int npages; + + npages = obj->size >> PAGE_SHIFT; + + while (--npages >= 0) + __free_page(pages[npages]); + + drm_free_large(pages); +} + +static int exynos_drm_gem_map_pages(struct drm_gem_object *obj, struct vm_area_struct *vma, unsigned long f_vaddr, pgoff_t page_offset) { struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; - struct scatterlist *sgl; unsigned long pfn; - int i; - if (!buf->sgt) - return -EINTR; + if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { + if (!buf->pages) + return -EINTR; + + pfn = page_to_pfn(buf->pages[page_offset++]); + } else + pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset; + + return vm_insert_mixed(vma, f_vaddr, pfn); +} - if (page_offset >= (buf->size >> PAGE_SHIFT)) { - DRM_ERROR("invalid page offset\n"); +static int exynos_drm_gem_get_pages(struct drm_gem_object *obj) +{ + struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); + struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; + struct scatterlist *sgl; + struct page **pages; + unsigned int npages, i = 0; + int ret; + + if (buf->pages) { + DRM_DEBUG_KMS("already allocated.\n"); return -EINVAL; } + pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE); + if (IS_ERR(pages)) { + DRM_ERROR("failed to get pages.\n"); + return PTR_ERR(pages); + } + + npages = obj->size >> PAGE_SHIFT; + buf->page_size = PAGE_SIZE; + + buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!buf->sgt) { + DRM_ERROR("failed to allocate sg table.\n"); + ret = -ENOMEM; + goto err; + } + + ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); + if (ret < 0) { + DRM_ERROR("failed to initialize sg table.\n"); + ret = -EFAULT; + goto err1; + } + sgl = buf->sgt->sgl; - for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) { - if (page_offset < (sgl->length >> PAGE_SHIFT)) - break; - page_offset -= (sgl->length >> PAGE_SHIFT); + + /* set all pages to sg list. */ + while (i < npages) { + sg_set_page(sgl, pages[i], PAGE_SIZE, 0); + sg_dma_address(sgl) = page_to_phys(pages[i]); + i++; + sgl = sg_next(sgl); } - pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset; + /* add some codes for UNCACHED type here. TODO */ - return vm_insert_mixed(vma, f_vaddr, pfn); + buf->pages = pages; + return ret; +err1: + kfree(buf->sgt); + buf->sgt = NULL; +err: + exynos_gem_put_pages(obj, pages); + return ret; + +} + +static void exynos_drm_gem_put_pages(struct drm_gem_object *obj) +{ + struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); + struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; + + /* + * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages + * allocated at gem fault handler. + */ + sg_free_table(buf->sgt); + kfree(buf->sgt); + buf->sgt = NULL; + + exynos_gem_put_pages(obj, buf->pages); + buf->pages = NULL; + + /* add some codes for UNCACHED type here. TODO */ } static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, @@ -153,6 +270,9 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); + if (!buf->pages) + return; + /* * do not release memory region from exporter. * @@ -162,7 +282,10 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) if (obj->import_attach) goto out; - exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf); + if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) + exynos_drm_gem_put_pages(obj); + else + exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf); out: exynos_drm_fini_buf(obj->dev, buf); @@ -241,10 +364,22 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, /* set memory type and cache attribute from user side. */ exynos_gem_obj->flags = flags; - ret = exynos_drm_alloc_buf(dev, buf, flags); - if (ret < 0) { - drm_gem_object_release(&exynos_gem_obj->base); - goto err_fini_buf; + /* + * allocate all pages as desired size if user wants to allocate + * physically non-continuous memory. + */ + if (flags & EXYNOS_BO_NONCONTIG) { + ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base); + if (ret < 0) { + drm_gem_object_release(&exynos_gem_obj->base); + goto err_fini_buf; + } + } else { + ret = exynos_drm_alloc_buf(dev, buf, flags); + if (ret < 0) { + drm_gem_object_release(&exynos_gem_obj->base); + goto err_fini_buf; + } } return exynos_gem_obj; @@ -277,14 +412,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, return 0; } -dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, +void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, unsigned int gem_handle, - struct drm_file *filp) + struct drm_file *file_priv) { struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_gem_object *obj; - obj = drm_gem_object_lookup(dev, filp, gem_handle); + obj = drm_gem_object_lookup(dev, file_priv, gem_handle); if (!obj) { DRM_ERROR("failed to lookup gem object.\n"); return ERR_PTR(-EINVAL); @@ -292,17 +427,25 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, exynos_gem_obj = to_exynos_gem_obj(obj); + if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { + DRM_DEBUG_KMS("not support NONCONTIG type.\n"); + drm_gem_object_unreference_unlocked(obj); + + /* TODO */ + return ERR_PTR(-EINVAL); + } + return &exynos_gem_obj->buffer->dma_addr; } void exynos_drm_gem_put_dma_addr(struct drm_device *dev, unsigned int gem_handle, - struct drm_file *filp) + struct drm_file *file_priv) { struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_gem_object *obj; - obj = drm_gem_object_lookup(dev, filp, gem_handle); + obj = drm_gem_object_lookup(dev, file_priv, gem_handle); if (!obj) { DRM_ERROR("failed to lookup gem object.\n"); return; @@ -310,6 +453,14 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, exynos_gem_obj = to_exynos_gem_obj(obj); + if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { + DRM_DEBUG_KMS("not support NONCONTIG type.\n"); + drm_gem_object_unreference_unlocked(obj); + + /* TODO */ + return; + } + drm_gem_object_unreference_unlocked(obj); /* @@ -338,57 +489,22 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, &args->offset); } -static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev, - struct file *filp) -{ - struct drm_file *file_priv; - - mutex_lock(&drm_dev->struct_mutex); - - /* find current process's drm_file from filelist. */ - list_for_each_entry(file_priv, &drm_dev->filelist, lhead) { - if (file_priv->filp == filp) { - mutex_unlock(&drm_dev->struct_mutex); - return file_priv; - } - } - - mutex_unlock(&drm_dev->struct_mutex); - WARN_ON(1); - - return ERR_PTR(-EFAULT); -} - static int exynos_drm_gem_mmap_buffer(struct file *filp, struct vm_area_struct *vma) { struct drm_gem_object *obj = filp->private_data; struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); - struct drm_device *drm_dev = obj->dev; struct exynos_drm_gem_buf *buffer; - struct drm_file *file_priv; - unsigned long vm_size; + unsigned long pfn, vm_size, usize, uaddr = vma->vm_start; int ret; DRM_DEBUG_KMS("%s\n", __FILE__); vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - vma->vm_private_data = obj; - vma->vm_ops = drm_dev->driver->gem_vm_ops; - - /* restore it to driver's fops. */ - filp->f_op = fops_get(drm_dev->driver->fops); - - file_priv = exynos_drm_find_drm_file(drm_dev, filp); - if (IS_ERR(file_priv)) - return PTR_ERR(file_priv); - - /* restore it to drm_file. */ - filp->private_data = file_priv; update_vm_cache_attr(exynos_gem_obj, vma); - vm_size = vma->vm_end - vma->vm_start; + vm_size = usize = vma->vm_end - vma->vm_start; /* * a buffer contains information to physically continuous memory @@ -400,23 +516,40 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, if (vm_size > buffer->size) return -EINVAL; - ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->kvaddr, - buffer->dma_addr, buffer->size, - &buffer->dma_attrs); - if (ret < 0) { - DRM_ERROR("failed to mmap.\n"); - return ret; - } + if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { + int i = 0; - /* - * take a reference to this mapping of the object. And this reference - * is unreferenced by the corresponding vm_close call. - */ - drm_gem_object_reference(obj); + if (!buffer->pages) + return -EINVAL; + + vma->vm_flags |= VM_MIXEDMAP; - mutex_lock(&drm_dev->struct_mutex); - drm_vm_open_locked(drm_dev, vma); - mutex_unlock(&drm_dev->struct_mutex); + do { + ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); + if (ret) { + DRM_ERROR("failed to remap user space.\n"); + return ret; + } + + uaddr += PAGE_SIZE; + usize -= PAGE_SIZE; + } while (usize > 0); + } else { + /* + * get page frame number to physical memory to be mapped + * to user space. + */ + pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> + PAGE_SHIFT; + + DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); + + if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size, + vma->vm_page_prot)) { + DRM_ERROR("failed to remap pfn range.\n"); + return -EAGAIN; + } + } return 0; } @@ -445,29 +578,16 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - /* - * Set specific mmper's fops. And it will be restored by - * exynos_drm_gem_mmap_buffer to dev->driver->fops. - * This is used to call specific mapper temporarily. - */ - file_priv->filp->f_op = &exynos_drm_gem_fops; + obj->filp->f_op = &exynos_drm_gem_fops; + obj->filp->private_data = obj; - /* - * Set gem object to private_data so that specific mmaper - * can get the gem object. And it will be restored by - * exynos_drm_gem_mmap_buffer to drm_file. - */ - file_priv->filp->private_data = obj; - - addr = vm_mmap(file_priv->filp, 0, args->size, + addr = vm_mmap(obj->filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, 0); drm_gem_object_unreference_unlocked(obj); - if (IS_ERR((void *)addr)) { - file_priv->filp->private_data = file_priv; + if (IS_ERR((void *)addr)) return PTR_ERR((void *)addr); - } args->mapped = addr; @@ -502,129 +622,6 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, return 0; } -struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma) -{ - struct vm_area_struct *vma_copy; - - vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL); - if (!vma_copy) - return NULL; - - if (vma->vm_ops && vma->vm_ops->open) - vma->vm_ops->open(vma); - - if (vma->vm_file) - get_file(vma->vm_file); - - memcpy(vma_copy, vma, sizeof(*vma)); - - vma_copy->vm_mm = NULL; - vma_copy->vm_next = NULL; - vma_copy->vm_prev = NULL; - - return vma_copy; -} - -void exynos_gem_put_vma(struct vm_area_struct *vma) -{ - if (!vma) - return; - - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); - - if (vma->vm_file) - fput(vma->vm_file); - - kfree(vma); -} - -int exynos_gem_get_pages_from_userptr(unsigned long start, - unsigned int npages, - struct page **pages, - struct vm_area_struct *vma) -{ - int get_npages; - - /* the memory region mmaped with VM_PFNMAP. */ - if (vma_is_io(vma)) { - unsigned int i; - - for (i = 0; i < npages; ++i, start += PAGE_SIZE) { - unsigned long pfn; - int ret = follow_pfn(vma, start, &pfn); - if (ret) - return ret; - - pages[i] = pfn_to_page(pfn); - } - - if (i != npages) { - DRM_ERROR("failed to get user_pages.\n"); - return -EINVAL; - } - - return 0; - } - - get_npages = get_user_pages(current, current->mm, start, - npages, 1, 1, pages, NULL); - get_npages = max(get_npages, 0); - if (get_npages != npages) { - DRM_ERROR("failed to get user_pages.\n"); - while (get_npages) - put_page(pages[--get_npages]); - return -EFAULT; - } - - return 0; -} - -void exynos_gem_put_pages_to_userptr(struct page **pages, - unsigned int npages, - struct vm_area_struct *vma) -{ - if (!vma_is_io(vma)) { - unsigned int i; - - for (i = 0; i < npages; i++) { - set_page_dirty_lock(pages[i]); - - /* - * undo the reference we took when populating - * the table. - */ - put_page(pages[i]); - } - } -} - -int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, - struct sg_table *sgt, - enum dma_data_direction dir) -{ - int nents; - - mutex_lock(&drm_dev->struct_mutex); - - nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); - if (!nents) { - DRM_ERROR("failed to map sgl with dma.\n"); - mutex_unlock(&drm_dev->struct_mutex); - return nents; - } - - mutex_unlock(&drm_dev->struct_mutex); - return 0; -} - -void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev, - struct sg_table *sgt, - enum dma_data_direction dir) -{ - dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); -} - int exynos_drm_gem_init_object(struct drm_gem_object *obj) { DRM_DEBUG_KMS("%s\n", __FILE__); @@ -756,9 +753,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) mutex_lock(&dev->struct_mutex); - ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset); + ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); if (ret < 0) - DRM_ERROR("failed to map a buffer with user.\n"); + DRM_ERROR("failed to map pages.\n"); mutex_unlock(&dev->struct_mutex); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.h index d3ea106a9a77..085b2a5d5f70 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -35,25 +35,21 @@ * exynos drm gem buffer structure. * * @kvaddr: kernel virtual address to allocated memory region. - * *userptr: user space address. * @dma_addr: bus address(accessed by dma) to allocated memory region. * - this address could be physical address without IOMMU and * device address with IOMMU. - * @write: whether pages will be written to by the caller. * @sgt: sg table to transfer page data. + * @pages: contain all pages to allocated memory region. + * @page_size: could be 4K, 64K or 1MB. * @size: size of allocated memory region. - * @pfnmap: indicate whether memory region from userptr is mmaped with - * VM_PFNMAP or not. */ struct exynos_drm_gem_buf { void __iomem *kvaddr; - unsigned long userptr; dma_addr_t dma_addr; - struct dma_attrs dma_attrs; - unsigned int write; struct sg_table *sgt; + struct page **pages; + unsigned long page_size; unsigned long size; - bool pfnmap; }; /* @@ -69,7 +65,6 @@ struct exynos_drm_gem_buf { * or at framebuffer creation. * @size: size requested from user, in bytes and this size is aligned * in page unit. - * @vma: a pointer to vm_area. * @flags: indicate memory type to allocated buffer and cache attruibute. * * P.S. this object would be transfered to user as kms_bo.handle so @@ -79,7 +74,6 @@ struct exynos_drm_gem_obj { struct drm_gem_object base; struct exynos_drm_gem_buf *buffer; unsigned long size; - struct vm_area_struct *vma; unsigned int flags; }; @@ -110,9 +104,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, * other drivers such as 2d/3d acceleration drivers. * with this function call, gem object reference count would be increased. */ -dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, +void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, unsigned int gem_handle, - struct drm_file *filp); + struct drm_file *file_priv); /* * put dma address from gem handle and this function could be used for @@ -121,7 +115,7 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, */ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, unsigned int gem_handle, - struct drm_file *filp); + struct drm_file *file_priv); /* get buffer offset to map to user space. */ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, @@ -134,10 +128,6 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -/* map user space allocated by malloc to pages. */ -int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); - /* get buffer information to memory region allocated by gem. */ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -173,36 +163,4 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); /* set vm_flags and we can change the vm attribute to other one at here. */ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); -static inline int vma_is_io(struct vm_area_struct *vma) -{ - return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); -} - -/* get a copy of a virtual memory region. */ -struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma); - -/* release a userspace virtual memory area. */ -void exynos_gem_put_vma(struct vm_area_struct *vma); - -/* get pages from user space. */ -int exynos_gem_get_pages_from_userptr(unsigned long start, - unsigned int npages, - struct page **pages, - struct vm_area_struct *vma); - -/* drop the reference to pages. */ -void exynos_gem_put_pages_to_userptr(struct page **pages, - unsigned int npages, - struct vm_area_struct *vma); - -/* map sgt with dma region. */ -int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, - struct sg_table *sgt, - enum dma_data_direction dir); - -/* unmap sgt from dma region. */ -void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev, - struct sg_table *sgt, - enum dma_data_direction dir); - #endif diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index 2d11e70b601a..c3b9e2b45185 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c @@ -346,23 +346,9 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev, ctx->hdmi_ctx->drm_dev = drm_dev; ctx->mixer_ctx->drm_dev = drm_dev; - if (mixer_ops->iommu_on) - mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true); - return 0; } -static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev) -{ - struct drm_hdmi_context *ctx; - struct exynos_drm_subdrv *subdrv = to_subdrv(dev); - - ctx = get_ctx_from_subdrv(subdrv); - - if (mixer_ops->iommu_on) - mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false); -} - static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -382,7 +368,6 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) subdrv->dev = dev; subdrv->manager = &hdmi_manager; subdrv->probe = hdmi_subdrv_probe; - subdrv->remove = hdmi_subdrv_remove; platform_set_drvdata(pdev, subdrv); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.h index 54b522353e48..2da5ffd3a059 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.h +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.h @@ -62,7 +62,6 @@ struct exynos_hdmi_ops { struct exynos_mixer_ops { /* manager */ - int (*iommu_on)(void *ctx, bool enable); int (*enable_vblank)(void *ctx, int pipe); void (*disable_vblank)(void *ctx); void (*dpms)(void *ctx, int mode); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.c deleted file mode 100644 index 09db1983eb1a..000000000000 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.c +++ /dev/null @@ -1,150 +0,0 @@ -/* exynos_drm_iommu.c - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd. - * Author: Inki Dae - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include - -#include -#include -#include - -#include - -#include "exynos_drm_drv.h" -#include "exynos_drm_iommu.h" - -/* - * drm_create_iommu_mapping - create a mapping structure - * - * @drm_dev: DRM device - */ -int drm_create_iommu_mapping(struct drm_device *drm_dev) -{ - struct dma_iommu_mapping *mapping = NULL; - struct exynos_drm_private *priv = drm_dev->dev_private; - struct device *dev = drm_dev->dev; - - if (!priv->da_start) - priv->da_start = EXYNOS_DEV_ADDR_START; - if (!priv->da_space_size) - priv->da_space_size = EXYNOS_DEV_ADDR_SIZE; - if (!priv->da_space_order) - priv->da_space_order = EXYNOS_DEV_ADDR_ORDER; - - mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start, - priv->da_space_size, - priv->da_space_order); - if (!mapping) - return -ENOMEM; - - dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), - GFP_KERNEL); - dma_set_max_seg_size(dev, 0xffffffffu); - dev->archdata.mapping = mapping; - - return 0; -} - -/* - * drm_release_iommu_mapping - release iommu mapping structure - * - * @drm_dev: DRM device - * - * if mapping->kref becomes 0 then all things related to iommu mapping - * will be released - */ -void drm_release_iommu_mapping(struct drm_device *drm_dev) -{ - struct device *dev = drm_dev->dev; - - arm_iommu_release_mapping(dev->archdata.mapping); -} - -/* - * drm_iommu_attach_device- attach device to iommu mapping - * - * @drm_dev: DRM device - * @subdrv_dev: device to be attach - * - * This function should be called by sub drivers to attach it to iommu - * mapping. - */ -int drm_iommu_attach_device(struct drm_device *drm_dev, - struct device *subdrv_dev) -{ - struct device *dev = drm_dev->dev; - int ret; - - if (!dev->archdata.mapping) { - DRM_ERROR("iommu_mapping is null.\n"); - return -EFAULT; - } - - subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev, - sizeof(*subdrv_dev->dma_parms), - GFP_KERNEL); - dma_set_max_seg_size(subdrv_dev, 0xffffffffu); - - ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping); - if (ret < 0) { - DRM_DEBUG_KMS("failed iommu attach.\n"); - return ret; - } - - /* - * Set dma_ops to drm_device just one time. - * - * The dma mapping api needs device object and the api is used - * to allocate physial memory and map it with iommu table. - * If iommu attach succeeded, the sub driver would have dma_ops - * for iommu and also all sub drivers have same dma_ops. - */ - if (!dev->archdata.dma_ops) - dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops; - - return 0; -} - -/* - * drm_iommu_detach_device -detach device address space mapping from device - * - * @drm_dev: DRM device - * @subdrv_dev: device to be detached - * - * This function should be called by sub drivers to detach it from iommu - * mapping - */ -void drm_iommu_detach_device(struct drm_device *drm_dev, - struct device *subdrv_dev) -{ - struct device *dev = drm_dev->dev; - struct dma_iommu_mapping *mapping = dev->archdata.mapping; - - if (!mapping || !mapping->domain) - return; - - iommu_detach_device(mapping->domain, subdrv_dev); - drm_release_iommu_mapping(drm_dev); -} diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.h deleted file mode 100644 index 18a0ca190b98..000000000000 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.h +++ /dev/null @@ -1,85 +0,0 @@ -/* exynos_drm_iommu.h - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd. - * Authoer: Inki Dae - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef _EXYNOS_DRM_IOMMU_H_ -#define _EXYNOS_DRM_IOMMU_H_ - -#define EXYNOS_DEV_ADDR_START 0x20000000 -#define EXYNOS_DEV_ADDR_SIZE 0x40000000 -#define EXYNOS_DEV_ADDR_ORDER 0x4 - -#ifdef CONFIG_DRM_EXYNOS_IOMMU - -int drm_create_iommu_mapping(struct drm_device *drm_dev); - -void drm_release_iommu_mapping(struct drm_device *drm_dev); - -int drm_iommu_attach_device(struct drm_device *drm_dev, - struct device *subdrv_dev); - -void drm_iommu_detach_device(struct drm_device *dev_dev, - struct device *subdrv_dev); - -static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) -{ -#ifdef CONFIG_ARM_DMA_USE_IOMMU - struct device *dev = drm_dev->dev; - - return dev->archdata.mapping ? true : false; -#else - return false; -#endif -} - -#else - -struct dma_iommu_mapping; -static inline int drm_create_iommu_mapping(struct drm_device *drm_dev) -{ - return 0; -} - -static inline void drm_release_iommu_mapping(struct drm_device *drm_dev) -{ -} - -static inline int drm_iommu_attach_device(struct drm_device *drm_dev, - struct device *subdrv_dev) -{ - return 0; -} - -static inline void drm_iommu_detach_device(struct drm_device *drm_dev, - struct device *subdrv_dev) -{ -} - -static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) -{ - return false; -} - -#endif -#endif diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 4b0c16bfd1da..e4b8a8f741f7 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -382,6 +382,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc) struct drm_pending_vblank_event *e, *t; struct timeval now; unsigned long flags; + bool is_checked = false; spin_lock_irqsave(&drm_dev->event_lock, flags); @@ -391,6 +392,8 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc) if (crtc != e->pipe) continue; + is_checked = true; + do_gettimeofday(&now); e->event.sequence = 0; e->event.tv_sec = now.tv_sec; @@ -398,7 +401,22 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc) list_move_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); - drm_vblank_put(drm_dev, crtc); + } + + if (is_checked) { + /* + * call drm_vblank_put only in case that drm_vblank_get was + * called. + */ + if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0) + drm_vblank_put(drm_dev, crtc); + + /* + * don't off vblank if vblank_disable_allowed is 1, + * because vblank would be off by timer handler. + */ + if (!drm_dev->vblank_disable_allowed) + drm_vblank_off(drm_dev, crtc); } spin_unlock_irqrestore(&drm_dev->event_lock, flags); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c b/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c index bafb65389562..2c115f8a62a3 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -50,29 +50,6 @@ #define MAX_HEIGHT 1080 #define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) -/* AVI header and aspect ratio */ -#define HDMI_AVI_VERSION 0x02 -#define HDMI_AVI_LENGTH 0x0D -#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4) -#define AVI_SAME_AS_PIC_ASPECT_RATIO 8 - -/* AUI header info */ -#define HDMI_AUI_VERSION 0x01 -#define HDMI_AUI_LENGTH 0x0A - -/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */ -enum HDMI_PACKET_TYPE { - /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */ - /* InfoFrame packet type */ - HDMI_PACKET_TYPE_INFOFRAME = 0x80, - /* Vendor-Specific InfoFrame */ - HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1, - /* Auxiliary Video information InfoFrame */ - HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2, - /* Audio information InfoFrame */ - HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4 -}; - enum hdmi_type { HDMI_TYPE13, HDMI_TYPE14, @@ -97,7 +74,6 @@ struct hdmi_context { struct mutex hdmi_mutex; void __iomem *regs; - void *parent_ctx; int external_irq; int internal_irq; @@ -108,6 +84,7 @@ struct hdmi_context { int cur_conf; struct hdmi_resources res; + void *parent_ctx; int hpd_gpio; @@ -205,7 +182,6 @@ struct hdmi_v13_conf { int height; int vrefresh; bool interlace; - int cea_video_id; const u8 *hdmiphy_data; const struct hdmi_v13_preset_conf *conf; }; @@ -377,20 +353,15 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = { }; static const struct hdmi_v13_conf hdmi_v13_confs[] = { - { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25, - &hdmi_v13_conf_720p60 }, - { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25, - &hdmi_v13_conf_720p60 }, - { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027, - &hdmi_v13_conf_480p }, - { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25, - &hdmi_v13_conf_1080i50 }, - { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5, - &hdmi_v13_conf_1080p50 }, - { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25, - &hdmi_v13_conf_1080i60 }, - { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5, - &hdmi_v13_conf_1080p60 }, + { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, + { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, + { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p }, + { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 }, + { 1920, 1080, 50, false, hdmiphy_v13_conf148_5, + &hdmi_v13_conf_1080p50 }, + { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 }, + { 1920, 1080, 60, false, hdmiphy_v13_conf148_5, + &hdmi_v13_conf_1080p60 }, }; /* HDMI Version 1.4 */ @@ -508,7 +479,6 @@ struct hdmi_conf { int height; int vrefresh; bool interlace; - int cea_video_id; const u8 *hdmiphy_data; const struct hdmi_preset_conf *conf; }; @@ -964,21 +934,16 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = { }; static const struct hdmi_conf hdmi_confs[] = { - { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 }, - { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 }, - { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 }, - { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 }, - { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 }, - { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 }, - { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 }, - { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 }, + { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 }, + { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 }, + { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 }, + { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 }, + { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 }, + { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 }, + { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 }, + { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 }, }; -struct hdmi_infoframe { - enum HDMI_PACKET_TYPE type; - u8 ver; - u8 len; -}; static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) { @@ -1302,88 +1267,6 @@ static int hdmi_conf_index(struct hdmi_context *hdata, return hdmi_v14_conf_index(mode); } -static u8 hdmi_chksum(struct hdmi_context *hdata, - u32 start, u8 len, u32 hdr_sum) -{ - int i; - - /* hdr_sum : header0 + header1 + header2 - * start : start address of packet byte1 - * len : packet bytes - 1 */ - for (i = 0; i < len; ++i) - hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4); - - /* return 2's complement of 8 bit hdr_sum */ - return (u8)(~(hdr_sum & 0xff) + 1); -} - -static void hdmi_reg_infoframe(struct hdmi_context *hdata, - struct hdmi_infoframe *infoframe) -{ - u32 hdr_sum; - u8 chksum; - u32 aspect_ratio; - u32 mod; - u32 vic; - - DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); - - mod = hdmi_reg_read(hdata, HDMI_MODE_SEL); - if (hdata->dvi_mode) { - hdmi_reg_writeb(hdata, HDMI_VSI_CON, - HDMI_VSI_CON_DO_NOT_TRANSMIT); - hdmi_reg_writeb(hdata, HDMI_AVI_CON, - HDMI_AVI_CON_DO_NOT_TRANSMIT); - hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN); - return; - } - - switch (infoframe->type) { - case HDMI_PACKET_TYPE_AVI: - hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); - hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type); - hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver); - hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len); - hdr_sum = infoframe->type + infoframe->ver + infoframe->len; - - /* Output format zero hardcoded ,RGB YBCR selection */ - hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | - AVI_ACTIVE_FORMAT_VALID | - AVI_UNDERSCANNED_DISPLAY_VALID); - - aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9; - - hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio | - AVI_SAME_AS_PIC_ASPECT_RATIO); - - if (hdata->type == HDMI_TYPE13) - vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id; - else - vic = hdmi_confs[hdata->cur_conf].cea_video_id; - - hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); - - chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), - infoframe->len, hdr_sum); - DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); - hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); - break; - case HDMI_PACKET_TYPE_AUI: - hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); - hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type); - hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver); - hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len); - hdr_sum = infoframe->type + infoframe->ver + infoframe->len; - chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), - infoframe->len, hdr_sum); - DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); - hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); - break; - default: - break; - } -} - static bool hdmi_is_connected(void *ctx) { struct hdmi_context *hdata = ctx; @@ -1410,7 +1293,6 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector, DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), raw_edid->width_cm, raw_edid->height_cm); - kfree(raw_edid); } else { return -ENODEV; } @@ -1659,8 +1541,6 @@ static void hdmi_conf_reset(struct hdmi_context *hdata) static void hdmi_conf_init(struct hdmi_context *hdata) { - struct hdmi_infoframe infoframe; - /* disable HPD interrupts */ hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); @@ -1695,17 +1575,9 @@ static void hdmi_conf_init(struct hdmi_context *hdata) hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); } else { - infoframe.type = HDMI_PACKET_TYPE_AVI; - infoframe.ver = HDMI_AVI_VERSION; - infoframe.len = HDMI_AVI_LENGTH; - hdmi_reg_infoframe(hdata, &infoframe); - - infoframe.type = HDMI_PACKET_TYPE_AUI; - infoframe.ver = HDMI_AUI_VERSION; - infoframe.len = HDMI_AUI_LENGTH; - hdmi_reg_infoframe(hdata, &infoframe); - /* enable AVI packet every vsync, fixes purple line problem */ + hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02); + hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5); hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5); } } @@ -2106,18 +1978,9 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector, index = hdmi_v14_conf_index(m); if (index >= 0) { - struct drm_mode_object base; - struct list_head head; - DRM_INFO("desired mode doesn't exist so\n"); DRM_INFO("use the most suitable mode among modes.\n"); - - /* preserve display mode header while copying. */ - head = adjusted_mode->head; - base = adjusted_mode->base; memcpy(adjusted_mode, m, sizeof(*m)); - adjusted_mode->head = head; - adjusted_mode->base = base; break; } } @@ -2303,27 +2166,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata) memset(res, 0, sizeof(*res)); /* get clocks, power */ - res->hdmi = devm_clk_get(dev, "hdmi"); + res->hdmi = clk_get(dev, "hdmi"); if (IS_ERR_OR_NULL(res->hdmi)) { DRM_ERROR("failed to get clock 'hdmi'\n"); goto fail; } - res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); + res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); if (IS_ERR_OR_NULL(res->sclk_hdmi)) { DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); goto fail; } - res->sclk_pixel = devm_clk_get(dev, "sclk_pixel"); + res->sclk_pixel = clk_get(dev, "sclk_pixel"); if (IS_ERR_OR_NULL(res->sclk_pixel)) { DRM_ERROR("failed to get clock 'sclk_pixel'\n"); goto fail; } - res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy"); + res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy"); if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) { DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); goto fail; } - res->hdmiphy = devm_clk_get(dev, "hdmiphy"); + res->hdmiphy = clk_get(dev, "hdmiphy"); if (IS_ERR_OR_NULL(res->hdmiphy)) { DRM_ERROR("failed to get clock 'hdmiphy'\n"); goto fail; @@ -2331,7 +2194,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata) clk_set_parent(res->sclk_hdmi, res->sclk_pixel); - res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) * + res->regul_bulk = kzalloc(ARRAY_SIZE(supply) * sizeof(res->regul_bulk[0]), GFP_KERNEL); if (!res->regul_bulk) { DRM_ERROR("failed to get memory for regulators\n"); @@ -2341,7 +2204,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata) res->regul_bulk[i].supply = supply[i]; res->regul_bulk[i].consumer = NULL; } - ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); + ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); if (ret) { DRM_ERROR("failed to get regulators\n"); goto fail; @@ -2354,6 +2217,28 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata) return -ENODEV; } +static int hdmi_resources_cleanup(struct hdmi_context *hdata) +{ + struct hdmi_resources *res = &hdata->res; + + regulator_bulk_free(res->regul_count, res->regul_bulk); + /* kfree is NULL-safe */ + kfree(res->regul_bulk); + if (!IS_ERR_OR_NULL(res->hdmiphy)) + clk_put(res->hdmiphy); + if (!IS_ERR_OR_NULL(res->sclk_hdmiphy)) + clk_put(res->sclk_hdmiphy); + if (!IS_ERR_OR_NULL(res->sclk_pixel)) + clk_put(res->sclk_pixel); + if (!IS_ERR_OR_NULL(res->sclk_hdmi)) + clk_put(res->sclk_hdmi); + if (!IS_ERR_OR_NULL(res->hdmi)) + clk_put(res->hdmi); + memset(res, 0, sizeof(*res)); + + return 0; +} + static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy; void hdmi_attach_ddc_client(struct i2c_client *ddc) @@ -2493,32 +2378,36 @@ static int __devinit hdmi_probe(struct platform_device *pdev) ret = hdmi_resources_init(hdata); if (ret) { + ret = -EINVAL; DRM_ERROR("hdmi_resources_init failed\n"); - return -EINVAL; + goto err_data; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { DRM_ERROR("failed to find registers\n"); - return -ENOENT; + ret = -ENOENT; + goto err_resource; } hdata->regs = devm_request_and_ioremap(&pdev->dev, res); if (!hdata->regs) { DRM_ERROR("failed to map registers\n"); - return -ENXIO; + ret = -ENXIO; + goto err_resource; } - ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD"); + ret = gpio_request(hdata->hpd_gpio, "HPD"); if (ret) { DRM_ERROR("failed to request HPD gpio\n"); - return ret; + goto err_resource; } /* DDC i2c driver */ if (i2c_add_driver(&ddc_driver)) { DRM_ERROR("failed to register ddc i2c driver\n"); - return -ENOENT; + ret = -ENOENT; + goto err_gpio; } hdata->ddc_port = hdmi_ddc; @@ -2581,6 +2470,11 @@ static int __devinit hdmi_probe(struct platform_device *pdev) i2c_del_driver(&hdmiphy_driver); err_ddc: i2c_del_driver(&ddc_driver); +err_gpio: + gpio_free(hdata->hpd_gpio); +err_resource: + hdmi_resources_cleanup(hdata); +err_data: return ret; } @@ -2597,6 +2491,9 @@ static int __devexit hdmi_remove(struct platform_device *pdev) free_irq(hdata->internal_irq, hdata); free_irq(hdata->external_irq, hdata); + gpio_free(hdata->hpd_gpio); + + hdmi_resources_cleanup(hdata); /* hdmiphy i2c driver */ i2c_del_driver(&hdmiphy_driver); diff --git a/trunk/drivers/gpu/drm/exynos/exynos_mixer.c b/trunk/drivers/gpu/drm/exynos/exynos_mixer.c index 40a6e1906fbb..e7fbb823fd8e 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_mixer.c @@ -36,7 +36,6 @@ #include "exynos_drm_drv.h" #include "exynos_drm_hdmi.h" -#include "exynos_drm_iommu.h" #define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) @@ -81,7 +80,6 @@ enum mixer_version_id { struct mixer_context { struct device *dev; - struct drm_device *drm_dev; int pipe; bool interlace; bool powered; @@ -92,7 +90,6 @@ struct mixer_context { struct mixer_resources mixer_res; struct hdmi_win_data win_data[MIXER_WIN_NR]; enum mixer_version_id mxr_ver; - void *parent_ctx; }; struct mixer_drv_data { @@ -668,24 +665,6 @@ static void mixer_win_reset(struct mixer_context *ctx) spin_unlock_irqrestore(&res->reg_slock, flags); } -static int mixer_iommu_on(void *ctx, bool enable) -{ - struct exynos_drm_hdmi_context *drm_hdmi_ctx; - struct mixer_context *mdata = ctx; - struct drm_device *drm_dev; - - drm_hdmi_ctx = mdata->parent_ctx; - drm_dev = drm_hdmi_ctx->drm_dev; - - if (is_drm_iommu_supported(drm_dev)) { - if (enable) - return drm_iommu_attach_device(drm_dev, mdata->dev); - - drm_iommu_detach_device(drm_dev, mdata->dev); - } - return 0; -} - static void mixer_poweron(struct mixer_context *ctx) { struct mixer_resources *res = &ctx->mixer_res; @@ -887,7 +866,6 @@ static void mixer_win_disable(void *ctx, int win) static struct exynos_mixer_ops mixer_ops = { /* manager */ - .iommu_on = mixer_iommu_on, .enable_vblank = mixer_enable_vblank, .disable_vblank = mixer_disable_vblank, .dpms = mixer_dpms, @@ -906,6 +884,7 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc) struct drm_pending_vblank_event *e, *t; struct timeval now; unsigned long flags; + bool is_checked = false; spin_lock_irqsave(&drm_dev->event_lock, flags); @@ -915,6 +894,7 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc) if (crtc != e->pipe) continue; + is_checked = true; do_gettimeofday(&now); e->event.sequence = 0; e->event.tv_sec = now.tv_sec; @@ -922,9 +902,16 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc) list_move_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); - drm_vblank_put(drm_dev, crtc); } + if (is_checked) + /* + * call drm_vblank_put only in case that drm_vblank_get was + * called. + */ + if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0) + drm_vblank_put(drm_dev, crtc); + spin_unlock_irqrestore(&drm_dev->event_lock, flags); } @@ -984,45 +971,57 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx, spin_lock_init(&mixer_res->reg_slock); - mixer_res->mixer = devm_clk_get(dev, "mixer"); + mixer_res->mixer = clk_get(dev, "mixer"); if (IS_ERR_OR_NULL(mixer_res->mixer)) { dev_err(dev, "failed to get clock 'mixer'\n"); - return -ENODEV; + ret = -ENODEV; + goto fail; } - mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); + mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) { dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); - return -ENODEV; + ret = -ENODEV; + goto fail; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(dev, "get memory resource failed.\n"); - return -ENXIO; + ret = -ENXIO; + goto fail; } mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (mixer_res->mixer_regs == NULL) { dev_err(dev, "register mapping failed.\n"); - return -ENXIO; + ret = -ENXIO; + goto fail; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(dev, "get interrupt resource failed.\n"); - return -ENXIO; + ret = -ENXIO; + goto fail; } ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler, 0, "drm_mixer", ctx); if (ret) { dev_err(dev, "request interrupt failed.\n"); - return ret; + goto fail; } mixer_res->irq = res->start; return 0; + +fail: + if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) + clk_put(mixer_res->sclk_hdmi); + if (!IS_ERR_OR_NULL(mixer_res->mixer)) + clk_put(mixer_res->mixer); + return ret; } static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, @@ -1032,21 +1031,25 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, struct device *dev = &pdev->dev; struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; struct resource *res; + int ret; - mixer_res->vp = devm_clk_get(dev, "vp"); + mixer_res->vp = clk_get(dev, "vp"); if (IS_ERR_OR_NULL(mixer_res->vp)) { dev_err(dev, "failed to get clock 'vp'\n"); - return -ENODEV; + ret = -ENODEV; + goto fail; } - mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer"); + mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer"); if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { dev_err(dev, "failed to get clock 'sclk_mixer'\n"); - return -ENODEV; + ret = -ENODEV; + goto fail; } - mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac"); + mixer_res->sclk_dac = clk_get(dev, "sclk_dac"); if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { dev_err(dev, "failed to get clock 'sclk_dac'\n"); - return -ENODEV; + ret = -ENODEV; + goto fail; } if (mixer_res->sclk_hdmi) @@ -1055,17 +1058,28 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res == NULL) { dev_err(dev, "get memory resource failed.\n"); - return -ENXIO; + ret = -ENXIO; + goto fail; } mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (mixer_res->vp_regs == NULL) { dev_err(dev, "register mapping failed.\n"); - return -ENXIO; + ret = -ENXIO; + goto fail; } return 0; + +fail: + if (!IS_ERR_OR_NULL(mixer_res->sclk_dac)) + clk_put(mixer_res->sclk_dac); + if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer)) + clk_put(mixer_res->sclk_mixer); + if (!IS_ERR_OR_NULL(mixer_res->vp)) + clk_put(mixer_res->vp); + return ret; } static struct mixer_drv_data exynos5_mxr_drv_data = { @@ -1135,7 +1149,6 @@ static int __devinit mixer_probe(struct platform_device *pdev) } ctx->dev = &pdev->dev; - ctx->parent_ctx = (void *)drm_hdmi_ctx; drm_hdmi_ctx->ctx = (void *)ctx; ctx->vp_enabled = drv->is_vp_enabled; ctx->mxr_ver = drv->version; diff --git a/trunk/drivers/gpu/drm/exynos/regs-hdmi.h b/trunk/drivers/gpu/drm/exynos/regs-hdmi.h index 970cdb518eb1..9cc7c5e9718c 100644 --- a/trunk/drivers/gpu/drm/exynos/regs-hdmi.h +++ b/trunk/drivers/gpu/drm/exynos/regs-hdmi.h @@ -298,14 +298,14 @@ #define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714) #define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718) #define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C) -#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1)) +#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n)) #define HDMI_AUI_CON HDMI_CORE_BASE(0x0800) #define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810) #define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814) #define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818) #define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C) -#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1)) +#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n)) #define HDMI_MPG_CON HDMI_CORE_BASE(0x0900) #define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C) @@ -338,19 +338,6 @@ #define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60) #define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64) -/* AVI bit definition */ -#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1) -#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1) - -#define AVI_ACTIVE_FORMAT_VALID (1 << 4) -#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1) - -/* AUI bit definition */ -#define HDMI_AUI_CON_NO_TRAN (0 << 0) - -/* VSI bit definition */ -#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0) - /* HDCP related registers */ #define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n)) #define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n)) diff --git a/trunk/drivers/gpu/drm/gma500/cdv_device.c b/trunk/drivers/gpu/drm/gma500/cdv_device.c index 23e14e93991f..1ceca3d13b65 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_device.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_device.c @@ -523,7 +523,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector) dev_priv->force_audio_property = prop; } - drm_object_attach_property(&connector->base, prop, 0); + drm_connector_attach_property(connector, prop, 0); } @@ -553,7 +553,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector) dev_priv->broadcast_rgb_property = prop; } - drm_object_attach_property(&connector->base, prop, 0); + drm_connector_attach_property(connector, prop, 0); } /* Cedarview */ diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c index 51044cc55cf2..e3a3978cf320 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -1650,7 +1650,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector, struct cdv_intel_dp *intel_dp = encoder->dev_priv; int ret; - ret = drm_object_property_set_value(&connector->base, property, val); + ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index e223b500022e..7272a461edfe 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -185,14 +185,14 @@ static int cdv_hdmi_set_property(struct drm_connector *connector, return -1; } - if (drm_object_property_get_value(&connector->base, + if (drm_connector_property_get_value(connector, property, &curValue)) return -1; if (curValue == value) return 0; - if (drm_object_property_set_value(&connector->base, + if (drm_connector_property_set_value(connector, property, value)) return -1; @@ -341,7 +341,7 @@ void cdv_hdmi_init(struct drm_device *dev, connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c index d81dbc3368f0..b362dd39bf5a 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -479,7 +479,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector, return -1; } - if (drm_object_property_get_value(&connector->base, + if (drm_connector_property_get_value(connector, property, &curValue)) return -1; @@ -487,7 +487,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector, if (curValue == value) return 0; - if (drm_object_property_set_value(&connector->base, + if (drm_connector_property_set_value(connector, property, value)) return -1; @@ -502,7 +502,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector, return -1; } } else if (!strcmp(property->name, "backlight") && encoder) { - if (drm_object_property_set_value(&connector->base, + if (drm_connector_property_set_value(connector, property, value)) return -1; @@ -671,10 +671,10 @@ void cdv_intel_lvds_init(struct drm_device *dev, connector->doublescan_allowed = false; /*Attach connector properties*/ - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL); diff --git a/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c index 2d4ab48f07a2..32dba2ab53e1 100644 --- a/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -265,13 +265,13 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector, goto set_prop_error; } - if (drm_object_property_get_value(&connector->base, property, &val)) + if (drm_connector_property_get_value(connector, property, &val)) goto set_prop_error; if (val == value) goto set_prop_done; - if (drm_object_property_set_value(&connector->base, + if (drm_connector_property_set_value(connector, property, value)) goto set_prop_error; @@ -296,7 +296,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector, } } } else if (!strcmp(property->name, "backlight") && encoder) { - if (drm_object_property_set_value(&connector->base, property, + if (drm_connector_property_set_value(connector, property, value)) goto set_prop_error; else @@ -506,7 +506,7 @@ void mdfld_dsi_output_init(struct drm_device *dev, dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe); - if (pipe != 0 && pipe != 2) { + if (!dev || ((pipe != 0) && (pipe != 2))) { DRM_ERROR("Invalid parameter\n"); return; } @@ -572,10 +572,10 @@ void mdfld_dsi_output_init(struct drm_device *dev, connector->doublescan_allowed = false; /*attach properties*/ - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev_priv->backlight_property, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL); diff --git a/trunk/drivers/gpu/drm/gma500/mdfld_intel_display.c b/trunk/drivers/gpu/drm/gma500/mdfld_intel_display.c index 74485dc43945..dec6a9aea3c6 100644 --- a/trunk/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ b/trunk/drivers/gpu/drm/gma500/mdfld_intel_display.c @@ -820,7 +820,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc, REG_WRITE(map->pos, 0); if (psb_intel_encoder) - drm_object_property_get_value(&connector->base, + drm_connector_property_get_value(connector, dev->mode_config.scaling_mode_property, &scalingType); if (scalingType == DRM_MODE_SCALE_NO_SCALE) { diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail.h b/trunk/drivers/gpu/drm/gma500/oaktrail.h index 30adbbe23024..f2f9f38a5362 100644 --- a/trunk/drivers/gpu/drm/gma500/oaktrail.h +++ b/trunk/drivers/gpu/drm/gma500/oaktrail.h @@ -249,9 +249,3 @@ extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev); extern void oaktrail_hdmi_save(struct drm_device *dev); extern void oaktrail_hdmi_restore(struct drm_device *dev); extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev); -extern int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, int x, int y, - struct drm_framebuffer *old_fb); -extern void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode); - - diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_crtc.c b/trunk/drivers/gpu/drm/gma500/oaktrail_crtc.c index 3071526bc3c1..cdafd2acc72f 100644 --- a/trunk/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/trunk/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -168,11 +168,6 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode) const struct psb_offset *map = &dev_priv->regmap[pipe]; u32 temp; - if (pipe == 1) { - oaktrail_crtc_hdmi_dpms(crtc, mode); - return; - } - if (!gma_power_begin(dev, true)) return; @@ -307,9 +302,6 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc, uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; struct drm_connector *connector; - if (pipe == 1) - return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb); - if (!gma_power_begin(dev, true)) return 0; @@ -351,7 +343,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc, (mode->crtc_vdisplay - 1)); if (psb_intel_encoder) - drm_object_property_get_value(&connector->base, + drm_connector_property_get_value(connector, dev->mode_config.scaling_mode_property, &scalingType); if (scalingType == DRM_MODE_SCALE_NO_SCALE) { diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_device.c b/trunk/drivers/gpu/drm/gma500/oaktrail_device.c index 08747fd7105c..010b820744a5 100644 --- a/trunk/drivers/gpu/drm/gma500/oaktrail_device.c +++ b/trunk/drivers/gpu/drm/gma500/oaktrail_device.c @@ -544,7 +544,7 @@ const struct psb_ops oaktrail_chip_ops = { .accel_2d = 1, .pipes = 2, .crtcs = 2, - .hdmi_mask = (1 << 1), + .hdmi_mask = (1 << 0), .lvds_mask = (1 << 0), .cursor_needs_phys = 0, .sgx_offset = MRST_SGX_OFFSET, diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c index f036f1fc161e..69e51e903f35 100644 --- a/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -155,345 +155,6 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev) HDMI_READ(HDMI_HCR); } -static void wait_for_vblank(struct drm_device *dev) -{ - /* Wait for 20ms, i.e. one cycle at 50hz. */ - mdelay(20); -} - -static unsigned int htotal_calculate(struct drm_display_mode *mode) -{ - u32 htotal, new_crtc_htotal; - - htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16); - - /* - * 1024 x 768 new_crtc_htotal = 0x1024; - * 1280 x 1024 new_crtc_htotal = 0x0c34; - */ - new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock; - - DRM_DEBUG_KMS("new crtc htotal 0x%4x\n", new_crtc_htotal); - return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16); -} - -static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target, - int refclk, struct oaktrail_hdmi_clock *best_clock) -{ - int np_min, np_max, nr_min, nr_max; - int np, nr, nf; - - np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10); - np_max = oaktrail_hdmi_limit.vco.max / (target * 10); - if (np_min < oaktrail_hdmi_limit.np.min) - np_min = oaktrail_hdmi_limit.np.min; - if (np_max > oaktrail_hdmi_limit.np.max) - np_max = oaktrail_hdmi_limit.np.max; - - nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max)); - nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min)); - if (nr_min < oaktrail_hdmi_limit.nr.min) - nr_min = oaktrail_hdmi_limit.nr.min; - if (nr_max > oaktrail_hdmi_limit.nr.max) - nr_max = oaktrail_hdmi_limit.nr.max; - - np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max)); - nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np)); - nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk); - DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf); - - /* - * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000; - * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000; - */ - best_clock->np = np; - best_clock->nr = nr - 1; - best_clock->nf = (nf << 14); -} - -static void scu_busy_loop(void __iomem *scu_base) -{ - u32 status = 0; - u32 loop_count = 0; - - status = readl(scu_base + 0x04); - while (status & 1) { - udelay(1); /* scu processing time is in few u secods */ - status = readl(scu_base + 0x04); - loop_count++; - /* break if scu doesn't reset busy bit after huge retry */ - if (loop_count > 1000) { - DRM_DEBUG_KMS("SCU IPC timed out"); - return; - } - } -} - -/* - * You don't want to know, you really really don't want to know.... - * - * This is magic. However it's safe magic because of the way the platform - * works and it is necessary magic. - */ -static void oaktrail_hdmi_reset(struct drm_device *dev) -{ - void __iomem *base; - unsigned long scu_ipc_mmio = 0xff11c000UL; - int scu_len = 1024; - - base = ioremap((resource_size_t)scu_ipc_mmio, scu_len); - if (base == NULL) { - DRM_ERROR("failed to map scu mmio\n"); - return; - } - - /* scu ipc: assert hdmi controller reset */ - writel(0xff11d118, base + 0x0c); - writel(0x7fffffdf, base + 0x80); - writel(0x42005, base + 0x0); - scu_busy_loop(base); - - /* scu ipc: de-assert hdmi controller reset */ - writel(0xff11d118, base + 0x0c); - writel(0x7fffffff, base + 0x80); - writel(0x42005, base + 0x0); - scu_busy_loop(base); - - iounmap(base); -} - -int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *old_fb) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv; - int pipe = 1; - int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; - int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; - int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; - int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; - int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; - int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; - int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; - int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; - int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; - int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; - int refclk; - struct oaktrail_hdmi_clock clock; - u32 dspcntr, pipeconf, dpll, temp; - int dspcntr_reg = DSPBCNTR; - - if (!gma_power_begin(dev, true)) - return 0; - - /* Disable the VGA plane that we never use */ - REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); - - /* Disable dpll if necessary */ - dpll = REG_READ(DPLL_CTRL); - if ((dpll & DPLL_PWRDN) == 0) { - REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET)); - REG_WRITE(DPLL_DIV_CTRL, 0x00000000); - REG_WRITE(DPLL_STATUS, 0x1); - } - udelay(150); - - /* Reset controller */ - oaktrail_hdmi_reset(dev); - - /* program and enable dpll */ - refclk = 25000; - oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock); - - /* Set the DPLL */ - dpll = REG_READ(DPLL_CTRL); - dpll &= ~DPLL_PDIV_MASK; - dpll &= ~(DPLL_PWRDN | DPLL_RESET); - REG_WRITE(DPLL_CTRL, 0x00000008); - REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr)); - REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1)); - REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN)); - REG_WRITE(DPLL_UPDATE, 0x80000000); - REG_WRITE(DPLL_CLK_ENABLE, 0x80050102); - udelay(150); - - /* configure HDMI */ - HDMI_WRITE(0x1004, 0x1fd); - HDMI_WRITE(0x2000, 0x1); - HDMI_WRITE(0x2008, 0x0); - HDMI_WRITE(0x3130, 0x8); - HDMI_WRITE(0x101c, 0x1800810); - - temp = htotal_calculate(adjusted_mode); - REG_WRITE(htot_reg, temp); - REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); - REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); - REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); - REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); - REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); - REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1)); - - REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); - REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); - REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); - REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); - REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); - REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); - REG_WRITE(PCH_PIPEBSRC, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1)); - - temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; - HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp); - - REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); - REG_WRITE(dsppos_reg, 0); - - /* Flush the plane changes */ - { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - crtc_funcs->mode_set_base(crtc, x, y, old_fb); - } - - /* Set up the display plane register */ - dspcntr = REG_READ(dspcntr_reg); - dspcntr |= DISPPLANE_GAMMA_ENABLE; - dspcntr |= DISPPLANE_SEL_PIPE_B; - dspcntr |= DISPLAY_PLANE_ENABLE; - - /* setup pipeconf */ - pipeconf = REG_READ(pipeconf_reg); - pipeconf |= PIPEACONF_ENABLE; - - REG_WRITE(pipeconf_reg, pipeconf); - REG_READ(pipeconf_reg); - - REG_WRITE(PCH_PIPEBCONF, pipeconf); - REG_READ(PCH_PIPEBCONF); - wait_for_vblank(dev); - - REG_WRITE(dspcntr_reg, dspcntr); - wait_for_vblank(dev); - - gma_power_end(dev); - - return 0; -} - -void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode) -{ - struct drm_device *dev = crtc->dev; - u32 temp; - - DRM_DEBUG_KMS("%s %d\n", __func__, mode); - - switch (mode) { - case DRM_MODE_DPMS_OFF: - REG_WRITE(VGACNTRL, 0x80000000); - - /* Disable plane */ - temp = REG_READ(DSPBCNTR); - if ((temp & DISPLAY_PLANE_ENABLE) != 0) { - REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE); - REG_READ(DSPBCNTR); - /* Flush the plane changes */ - REG_WRITE(DSPBSURF, REG_READ(DSPBSURF)); - REG_READ(DSPBSURF); - } - - /* Disable pipe B */ - temp = REG_READ(PIPEBCONF); - if ((temp & PIPEACONF_ENABLE) != 0) { - REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE); - REG_READ(PIPEBCONF); - } - - /* Disable LNW Pipes, etc */ - temp = REG_READ(PCH_PIPEBCONF); - if ((temp & PIPEACONF_ENABLE) != 0) { - REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE); - REG_READ(PCH_PIPEBCONF); - } - - /* wait for pipe off */ - udelay(150); - - /* Disable dpll */ - temp = REG_READ(DPLL_CTRL); - if ((temp & DPLL_PWRDN) == 0) { - REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET)); - REG_WRITE(DPLL_STATUS, 0x1); - } - - /* wait for dpll off */ - udelay(150); - - break; - case DRM_MODE_DPMS_ON: - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - /* Enable dpll */ - temp = REG_READ(DPLL_CTRL); - if ((temp & DPLL_PWRDN) != 0) { - REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET)); - temp = REG_READ(DPLL_CLK_ENABLE); - REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI); - REG_READ(DPLL_CLK_ENABLE); - } - /* wait for dpll warm up */ - udelay(150); - - /* Enable pipe B */ - temp = REG_READ(PIPEBCONF); - if ((temp & PIPEACONF_ENABLE) == 0) { - REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE); - REG_READ(PIPEBCONF); - } - - /* Enable LNW Pipe B */ - temp = REG_READ(PCH_PIPEBCONF); - if ((temp & PIPEACONF_ENABLE) == 0) { - REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE); - REG_READ(PCH_PIPEBCONF); - } - - wait_for_vblank(dev); - - /* Enable plane */ - temp = REG_READ(DSPBCNTR); - if ((temp & DISPLAY_PLANE_ENABLE) == 0) { - REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - REG_WRITE(DSPBSURF, REG_READ(DSPBSURF)); - REG_READ(DSPBSURF); - } - - psb_intel_crtc_load_lut(crtc); - } - - /* DSPARB */ - REG_WRITE(DSPARB, 0x00003fbf); - - /* FW1 */ - REG_WRITE(0x70034, 0x3f880a0a); - - /* FW2 */ - REG_WRITE(0x70038, 0x0b060808); - - /* FW4 */ - REG_WRITE(0x70050, 0x08030404); - - /* FW5 */ - REG_WRITE(0x70054, 0x04040404); - - /* LNC Chicken Bits - Squawk! */ - REG_WRITE(0x70400, 0x4000); - - return; -} - static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode) { static int dpms_mode = -1; @@ -572,15 +233,13 @@ static const unsigned char raw_edid[] = { static int oaktrail_hdmi_get_modes(struct drm_connector *connector) { + struct drm_device *dev = connector->dev; + struct drm_psb_private *dev_priv = dev->dev_private; struct i2c_adapter *i2c_adap; struct edid *edid; - int ret = 0; + struct drm_display_mode *mode, *t; + int i = 0, ret = 0; - /* - * FIXME: We need to figure this lot out. In theory we can - * read the EDID somehow but I've yet to find working reference - * code. - */ i2c_adap = i2c_get_adapter(3); if (i2c_adap == NULL) { DRM_ERROR("No ddc adapter available!\n"); @@ -594,7 +253,17 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector) drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); } - return ret; + + /* + * prune modes that require frame buffer bigger than stolen mem + */ + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { + if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) { + i++; + drm_mode_remove(connector, mode); + } + } + return ret - i; } static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder, @@ -680,7 +349,6 @@ void oaktrail_hdmi_init(struct drm_device *dev, connector->interlace_allowed = false; connector->doublescan_allowed = false; drm_sysfs_connector_add(connector); - dev_info(dev->dev, "HDMI initialised.\n"); return; @@ -735,9 +403,6 @@ void oaktrail_hdmi_setup(struct drm_device *dev) dev_priv->hdmi_priv = hdmi_dev; oaktrail_hdmi_audio_disable(dev); - - dev_info(dev->dev, "HDMI hardware present.\n"); - return; free: diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_lvds.c b/trunk/drivers/gpu/drm/gma500/oaktrail_lvds.c index 325013a9c48c..558c77fb55ec 100644 --- a/trunk/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/trunk/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -133,8 +133,8 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder, return; } - drm_object_property_get_value( - &connector->base, + drm_connector_property_get_value( + connector, dev->mode_config.scaling_mode_property, &v); @@ -363,10 +363,10 @@ void oaktrail_lvds_init(struct drm_device *dev, connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL); diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c b/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c index 9fa5fa2e6192..2a4c3a9e33e3 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -603,7 +603,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector, goto set_prop_error; } - if (drm_object_property_get_value(&connector->base, + if (drm_connector_property_get_value(connector, property, &curval)) goto set_prop_error; @@ -611,7 +611,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector, if (curval == value) goto set_prop_done; - if (drm_object_property_set_value(&connector->base, + if (drm_connector_property_set_value(connector, property, value)) goto set_prop_error; @@ -626,7 +626,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector, goto set_prop_error; } } else if (!strcmp(property->name, "backlight")) { - if (drm_object_property_set_value(&connector->base, + if (drm_connector_property_set_value(connector, property, value)) goto set_prop_error; @@ -746,10 +746,10 @@ void psb_intel_lvds_init(struct drm_device *dev, connector->doublescan_allowed = false; /*Attach connector properties*/ - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev_priv->backlight_property, BRIGHTNESS_MAX_LEVEL); diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c index a4cc777ab7a6..fc9292705dbf 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -1694,7 +1694,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector, uint8_t cmd; int ret; - ret = drm_object_property_set_value(&connector->base, property, val); + ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; @@ -1749,7 +1749,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector, } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) { temp_value = val; if (psb_intel_sdvo_connector->left == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, psb_intel_sdvo_connector->right, val); if (psb_intel_sdvo_connector->left_margin == temp_value) return 0; @@ -1761,7 +1761,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector, cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (psb_intel_sdvo_connector->right == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, psb_intel_sdvo_connector->left, val); if (psb_intel_sdvo_connector->right_margin == temp_value) return 0; @@ -1773,7 +1773,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector, cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (psb_intel_sdvo_connector->top == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, psb_intel_sdvo_connector->bottom, val); if (psb_intel_sdvo_connector->top_margin == temp_value) return 0; @@ -1785,7 +1785,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector, cmd = SDVO_CMD_SET_OVERSCAN_V; goto set_value; } else if (psb_intel_sdvo_connector->bottom == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, psb_intel_sdvo_connector->top, val); if (psb_intel_sdvo_connector->bottom_margin == temp_value) return 0; @@ -2286,7 +2286,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]); psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0]; - drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base, + drm_connector_attach_property(&psb_intel_sdvo_connector->base.base, psb_intel_sdvo_connector->tv_format, 0); return true; @@ -2302,7 +2302,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s psb_intel_sdvo_connector->name = \ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ if (!psb_intel_sdvo_connector->name) return false; \ - drm_object_attach_property(&connector->base, \ + drm_connector_attach_property(connector, \ psb_intel_sdvo_connector->name, \ psb_intel_sdvo_connector->cur_##name); \ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ @@ -2339,7 +2339,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo, if (!psb_intel_sdvo_connector->left) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, psb_intel_sdvo_connector->left, psb_intel_sdvo_connector->left_margin); @@ -2348,7 +2348,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo, if (!psb_intel_sdvo_connector->right) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, psb_intel_sdvo_connector->right, psb_intel_sdvo_connector->right_margin); DRM_DEBUG_KMS("h_overscan: max %d, " @@ -2375,7 +2375,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo, if (!psb_intel_sdvo_connector->top) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, psb_intel_sdvo_connector->top, psb_intel_sdvo_connector->top_margin); @@ -2384,7 +2384,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo, if (!psb_intel_sdvo_connector->bottom) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, psb_intel_sdvo_connector->bottom, psb_intel_sdvo_connector->bottom_margin); DRM_DEBUG_KMS("v_overscan: max %d, " @@ -2416,7 +2416,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo, if (!psb_intel_sdvo_connector->dot_crawl) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, psb_intel_sdvo_connector->dot_crawl, psb_intel_sdvo_connector->cur_dot_crawl); DRM_DEBUG_KMS("dot crawl: current %d\n", response); diff --git a/trunk/drivers/gpu/drm/i2c/ch7006_drv.c b/trunk/drivers/gpu/drm/i2c/ch7006_drv.c index b865d0728e28..599099fe76e3 100644 --- a/trunk/drivers/gpu/drm/i2c/ch7006_drv.c +++ b/trunk/drivers/gpu/drm/i2c/ch7006_drv.c @@ -214,7 +214,7 @@ static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encod else priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, encoder->dev->mode_config.tv_subconnector_property, priv->subconnector); @@ -254,23 +254,23 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder, priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2); - drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property, + drm_connector_attach_property(connector, conf->tv_select_subconnector_property, priv->select_subconnector); - drm_object_attach_property(&connector->base, conf->tv_subconnector_property, + drm_connector_attach_property(connector, conf->tv_subconnector_property, priv->subconnector); - drm_object_attach_property(&connector->base, conf->tv_left_margin_property, + drm_connector_attach_property(connector, conf->tv_left_margin_property, priv->hmargin); - drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property, + drm_connector_attach_property(connector, conf->tv_bottom_margin_property, priv->vmargin); - drm_object_attach_property(&connector->base, conf->tv_mode_property, + drm_connector_attach_property(connector, conf->tv_mode_property, priv->norm); - drm_object_attach_property(&connector->base, conf->tv_brightness_property, + drm_connector_attach_property(connector, conf->tv_brightness_property, priv->brightness); - drm_object_attach_property(&connector->base, conf->tv_contrast_property, + drm_connector_attach_property(connector, conf->tv_contrast_property, priv->contrast); - drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property, + drm_connector_attach_property(connector, conf->tv_flicker_reduction_property, priv->flicker); - drm_object_attach_property(&connector->base, priv->scale_property, + drm_connector_attach_property(connector, priv->scale_property, priv->scale); return 0; diff --git a/trunk/drivers/gpu/drm/i915/i915_debugfs.c b/trunk/drivers/gpu/drm/i915/i915_debugfs.c index 4568e7d8a060..dde8b505bf7f 100644 --- a/trunk/drivers/gpu/drm/i915/i915_debugfs.c +++ b/trunk/drivers/gpu/drm/i915/i915_debugfs.c @@ -1068,7 +1068,7 @@ static int gen6_drpc_info(struct seq_file *m) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; + u32 rpmodectl1, gt_core_status, rcctl1; unsigned forcewake_count; int count=0, ret; @@ -1097,9 +1097,6 @@ static int gen6_drpc_info(struct seq_file *m) rpmodectl1 = I915_READ(GEN6_RP_CONTROL); rcctl1 = I915_READ(GEN6_RC_CONTROL); mutex_unlock(&dev->struct_mutex); - mutex_lock(&dev_priv->rps.hw_lock); - sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); - mutex_unlock(&dev_priv->rps.hw_lock); seq_printf(m, "Video Turbo Mode: %s\n", yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); @@ -1151,12 +1148,6 @@ static int gen6_drpc_info(struct seq_file *m) seq_printf(m, "RC6++ residency since boot: %u\n", I915_READ(GEN6_GT_GFX_RC6pp)); - seq_printf(m, "RC6 voltage: %dmV\n", - GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); - seq_printf(m, "RC6+ voltage: %dmV\n", - GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); - seq_printf(m, "RC6++ voltage: %dmV\n", - GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); return 0; } @@ -1282,7 +1273,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) return 0; } - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; @@ -1291,14 +1282,19 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) for (gpu_freq = dev_priv->rps.min_delay; gpu_freq <= dev_priv->rps.max_delay; gpu_freq++) { - ia_freq = gpu_freq; - sandybridge_pcode_read(dev_priv, - GEN6_PCODE_READ_MIN_FREQ_TABLE, - &ia_freq); + I915_WRITE(GEN6_PCODE_DATA, gpu_freq); + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | + GEN6_PCODE_READ_MIN_FREQ_TABLE); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & + GEN6_PCODE_READY) == 0, 10)) { + DRM_ERROR("pcode read of freq table timed out\n"); + continue; + } + ia_freq = I915_READ(GEN6_PCODE_DATA); seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -1402,15 +1398,15 @@ static int i915_context_status(struct seq_file *m, void *unused) if (ret) return ret; - if (dev_priv->ips.pwrctx) { + if (dev_priv->pwrctx) { seq_printf(m, "power context "); - describe_obj(m, dev_priv->ips.pwrctx); + describe_obj(m, dev_priv->pwrctx); seq_printf(m, "\n"); } - if (dev_priv->ips.renderctx) { + if (dev_priv->renderctx) { seq_printf(m, "render context "); - describe_obj(m, dev_priv->ips.renderctx); + describe_obj(m, dev_priv->renderctx); seq_printf(m, "\n"); } @@ -1715,13 +1711,13 @@ i915_max_freq_read(struct file *filp, if (!(IS_GEN6(dev) || IS_GEN7(dev))) return -ENODEV; - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; len = snprintf(buf, sizeof(buf), "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); if (len > sizeof(buf)) len = sizeof(buf); @@ -1756,7 +1752,7 @@ i915_max_freq_write(struct file *filp, DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; @@ -1766,7 +1762,7 @@ i915_max_freq_write(struct file *filp, dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return cnt; } @@ -1791,13 +1787,13 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, if (!(IS_GEN6(dev) || IS_GEN7(dev))) return -ENODEV; - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; len = snprintf(buf, sizeof(buf), "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); if (len > sizeof(buf)) len = sizeof(buf); @@ -1830,7 +1826,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; @@ -1840,7 +1836,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER; gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return cnt; } diff --git a/trunk/drivers/gpu/drm/i915/i915_dma.c b/trunk/drivers/gpu/drm/i915/i915_dma.c index a48e4910ea2c..61ae104dca8c 100644 --- a/trunk/drivers/gpu/drm/i915/i915_dma.c +++ b/trunk/drivers/gpu/drm/i915/i915_dma.c @@ -103,6 +103,32 @@ static void i915_write_hws_pga(struct drm_device *dev) I915_WRITE(HWS_PGA, addr); } +/** + * Sets up the hardware status page for devices that need a physical address + * in the register. + */ +static int i915_init_phys_hws(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + /* Program Hardware Status Page */ + dev_priv->status_page_dmah = + drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); + + if (!dev_priv->status_page_dmah) { + DRM_ERROR("Can not allocate hardware status page\n"); + return -ENOMEM; + } + + memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr, + 0, PAGE_SIZE); + + i915_write_hws_pga(dev); + + DRM_DEBUG_DRIVER("Enabled hardware status page\n"); + return 0; +} + /** * Frees the hardware status page, whether it's a physical address or a virtual * address set up by the X Server. @@ -425,16 +451,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - dev_priv->dri1.counter++; - if (dev_priv->dri1.counter > 0x7FFFFFFFUL) - dev_priv->dri1.counter = 0; + dev_priv->counter++; + if (dev_priv->counter > 0x7FFFFFFFUL) + dev_priv->counter = 0; if (master_priv->sarea_priv) - master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; + master_priv->sarea_priv->last_enqueue = dev_priv->counter; if (BEGIN_LP_RING(4) == 0) { OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->dri1.counter); + OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); } @@ -576,12 +602,12 @@ static int i915_dispatch_flip(struct drm_device * dev) ADVANCE_LP_RING(); - master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; + master_priv->sarea_priv->last_enqueue = dev_priv->counter++; if (BEGIN_LP_RING(4) == 0) { OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->dri1.counter); + OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); } @@ -749,21 +775,21 @@ static int i915_emit_irq(struct drm_device * dev) DRM_DEBUG_DRIVER("\n"); - dev_priv->dri1.counter++; - if (dev_priv->dri1.counter > 0x7FFFFFFFUL) - dev_priv->dri1.counter = 1; + dev_priv->counter++; + if (dev_priv->counter > 0x7FFFFFFFUL) + dev_priv->counter = 1; if (master_priv->sarea_priv) - master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; + master_priv->sarea_priv->last_enqueue = dev_priv->counter; if (BEGIN_LP_RING(4) == 0) { OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->dri1.counter); + OUT_RING(dev_priv->counter); OUT_RING(MI_USER_INTERRUPT); ADVANCE_LP_RING(); } - return dev_priv->dri1.counter; + return dev_priv->counter; } static int i915_wait_irq(struct drm_device * dev, int irq_nr) @@ -794,7 +820,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) if (ret == -EBUSY) { DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", - READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); + READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); } return ret; @@ -988,9 +1014,6 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_PRIME_VMAP_FLUSH: value = 1; break; - case I915_PARAM_HAS_SECURE_BATCHES: - value = capable(CAP_SYS_ADMIN); - break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); @@ -1303,8 +1326,6 @@ static int i915_load_modeset_init(struct drm_device *dev) intel_modeset_gem_init(dev); - INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); - ret = drm_irq_install(dev); if (ret) goto cleanup_gem; @@ -1470,9 +1491,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto free_priv; } - ret = i915_gem_gtt_init(dev); - if (ret) + ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); + if (!ret) { + DRM_ERROR("failed to set up gmch\n"); + ret = -EIO; goto put_bridge; + } + + dev_priv->mm.gtt = intel_gtt_get(); + if (!dev_priv->mm.gtt) { + DRM_ERROR("Failed to initialize GTT\n"); + ret = -ENODEV; + goto put_gmch; + } if (drm_core_check_feature(dev, DRIVER_MODESET)) i915_kick_out_firmware_fb(dev_priv); @@ -1559,10 +1590,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_setup_gmbus(dev); intel_opregion_setup(dev); + /* Make sure the bios did its job and set up vital registers */ intel_setup_bios(dev); i915_gem_load(dev); + /* Init HWS */ + if (!I915_NEED_GFX_HWS(dev)) { + ret = i915_init_phys_hws(dev); + if (ret) + goto out_gem_unload; + } + /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there * according to the published specs. It doesn't appear to function @@ -1582,8 +1621,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->rps.lock); spin_lock_init(&dev_priv->dpio_lock); - mutex_init(&dev_priv->rps.hw_lock); - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) dev_priv->num_pipe = 3; else if (IS_MOBILE(dev) || !IS_GEN2(dev)) @@ -1641,7 +1678,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) out_rmmap: pci_iounmap(dev->pdev, dev_priv->regs); put_gmch: - i915_gem_gtt_fini(dev); + intel_gmch_remove(); put_bridge: pci_dev_put(dev_priv->bridge_dev); free_priv: @@ -1684,7 +1721,6 @@ int i915_driver_unload(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_fbdev_fini(dev); intel_modeset_cleanup(dev); - cancel_work_sync(&dev_priv->console_resume_work); /* * free the memory space allocated for the child device diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c index 6745c7f976db..6770ee6084b4 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.c +++ b/trunk/drivers/gpu/drm/i915/i915_drv.c @@ -47,11 +47,11 @@ MODULE_PARM_DESC(modeset, unsigned int i915_fbpercrtc __always_unused = 0; module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); -int i915_panel_ignore_lid __read_mostly = 1; +int i915_panel_ignore_lid __read_mostly = 0; module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); MODULE_PARM_DESC(panel_ignore_lid, - "Override lid status (0=autodetect, 1=autodetect disabled [default], " - "-1=force lid closed, -2=force lid open)"); + "Override lid status (0=autodetect [default], 1=lid open, " + "-1=lid closed)"); unsigned int i915_powersave __read_mostly = 1; module_param_named(powersave, i915_powersave, int, 0600); @@ -396,6 +396,12 @@ static const struct pci_device_id pciidlist[] = { /* aka */ MODULE_DEVICE_TABLE(pci, pciidlist); #endif +#define INTEL_PCH_DEVICE_ID_MASK 0xff00 +#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 +#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 +#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 +#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 + void intel_detect_pch(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -410,36 +416,26 @@ void intel_detect_pch(struct drm_device *dev) pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); if (pch) { if (pch->vendor == PCI_VENDOR_ID_INTEL) { - unsigned short id; + int id; id = pch->device & INTEL_PCH_DEVICE_ID_MASK; - dev_priv->pch_id = id; if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_IBX; dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); - WARN_ON(!IS_GEN5(dev)); } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_CPT; dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found CougarPoint PCH\n"); - WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { /* PantherPoint is CPT compatible */ dev_priv->pch_type = PCH_CPT; dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found PatherPoint PCH\n"); - WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_LPT; dev_priv->num_pch_pll = 0; DRM_DEBUG_KMS("Found LynxPoint PCH\n"); - WARN_ON(!IS_HASWELL(dev)); - } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { - dev_priv->pch_type = PCH_LPT; - dev_priv->num_pch_pll = 0; - DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); - WARN_ON(!IS_HASWELL(dev)); } BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); } @@ -481,8 +477,6 @@ static int i915_drm_freeze(struct drm_device *dev) return error; } - cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); - intel_modeset_disable(dev); drm_irq_uninstall(dev); @@ -532,23 +526,17 @@ int i915_suspend(struct drm_device *dev, pm_message_t state) return 0; } -void intel_console_resume(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, - console_resume_work); - struct drm_device *dev = dev_priv->dev; - - console_lock(); - intel_fbdev_set_suspend(dev, 0); - console_unlock(); -} - -static int __i915_drm_thaw(struct drm_device *dev) +static int i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + mutex_lock(&dev->struct_mutex); + i915_gem_restore_gtt_mappings(dev); + mutex_unlock(&dev->struct_mutex); + } + i915_restore_state(dev); intel_opregion_setup(dev); @@ -565,6 +553,7 @@ static int __i915_drm_thaw(struct drm_device *dev) intel_modeset_init_hw(dev); intel_modeset_setup_hw_state(dev); + drm_mode_config_reset(dev); drm_irq_install(dev); } @@ -572,41 +561,14 @@ static int __i915_drm_thaw(struct drm_device *dev) dev_priv->modeset_on_lid = 0; - /* - * The console lock can be pretty contented on resume due - * to all the printk activity. Try to keep it out of the hot - * path of resume if possible. - */ - if (console_trylock()) { - intel_fbdev_set_suspend(dev, 0); - console_unlock(); - } else { - schedule_work(&dev_priv->console_resume_work); - } - - return error; -} - -static int i915_drm_thaw(struct drm_device *dev) -{ - int error = 0; - - intel_gt_reset(dev); - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - mutex_lock(&dev->struct_mutex); - i915_gem_restore_gtt_mappings(dev); - mutex_unlock(&dev->struct_mutex); - } - - __i915_drm_thaw(dev); - + console_lock(); + intel_fbdev_set_suspend(dev, 0); + console_unlock(); return error; } int i915_resume(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) @@ -617,20 +579,7 @@ int i915_resume(struct drm_device *dev) pci_set_master(dev->pdev); - intel_gt_reset(dev); - - /* - * Platforms with opregion should have sane BIOS, older ones (gen3 and - * earlier) need this since the BIOS might clear all our scratch PTEs. - */ - if (drm_core_check_feature(dev, DRIVER_MODESET) && - !dev_priv->opregion.header) { - mutex_lock(&dev->struct_mutex); - i915_gem_restore_gtt_mappings(dev); - mutex_unlock(&dev->struct_mutex); - } - - ret = __i915_drm_thaw(dev); + ret = i915_drm_thaw(dev); if (ret) return ret; @@ -884,7 +833,7 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct intel_device_info *intel_info = (struct intel_device_info *) ent->driver_data; - if (intel_info->is_valleyview) + if (intel_info->is_haswell || intel_info->is_valleyview) if(!i915_preliminary_hw_support) { DRM_ERROR("Preliminary hardware support disabled\n"); return -ENODEV; @@ -1191,40 +1140,12 @@ static bool IS_DISPLAYREG(u32 reg) if (reg == GEN6_GDRST) return false; - switch (reg) { - case _3D_CHICKEN3: - case IVB_CHICKEN3: - case GEN7_COMMON_SLICE_CHICKEN1: - case GEN7_L3CNTLREG1: - case GEN7_L3_CHICKEN_MODE_REGISTER: - case GEN7_ROW_CHICKEN2: - case GEN7_L3SQCREG4: - case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG: - case GEN7_HALF_SLICE_CHICKEN1: - case GEN6_MBCTL: - case GEN6_UCGCTL2: - return false; - default: - break; - } - return true; } -static void -ilk_dummy_write(struct drm_i915_private *dev_priv) -{ - /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the - * chip from rc6 before touching it for real. MI_MODE is masked, hence - * harmless to write 0 into. */ - I915_WRITE_NOTRACE(MI_MODE, 0); -} - #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x val = 0; \ - if (IS_GEN5(dev_priv->dev)) \ - ilk_dummy_write(dev_priv); \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ unsigned long irqflags; \ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ @@ -1256,12 +1177,6 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ - if (IS_GEN5(dev_priv->dev)) \ - ilk_dummy_write(dev_priv); \ - if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ - DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \ - I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \ - } \ if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ write##y(val, dev_priv->regs + reg + 0x180000); \ } else { \ diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.h b/trunk/drivers/gpu/drm/i915/i915_drv.h index 87c06f97fa89..f511fa2f4168 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.h +++ b/trunk/drivers/gpu/drm/i915/i915_drv.h @@ -58,14 +58,6 @@ enum pipe { }; #define pipe_name(p) ((p) + 'A') -enum transcoder { - TRANSCODER_A = 0, - TRANSCODER_B, - TRANSCODER_C, - TRANSCODER_EDP = 0xF, -}; -#define transcoder_name(t) ((t) + 'A') - enum plane { PLANE_A = 0, PLANE_B, @@ -101,12 +93,6 @@ struct intel_pch_pll { }; #define I915_NUM_PLLS 2 -struct intel_ddi_plls { - int spll_refcount; - int wrpll1_refcount; - int wrpll2_refcount; -}; - /* Interface history: * * 1.1: Original. @@ -137,6 +123,14 @@ struct drm_i915_gem_phys_object { struct drm_i915_gem_object *cur_obj; }; +struct mem_block { + struct mem_block *next; + struct mem_block *prev; + int start; + int size; + struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ +}; + struct opregion_header; struct opregion_acpi; struct opregion_swsci; @@ -257,7 +251,6 @@ struct drm_i915_display_funcs { uint32_t sprite_width, int pixel_size); void (*update_linetime_wm)(struct drm_device *dev, int pipe, struct drm_display_mode *mode); - void (*modeset_global_resources)(struct drm_device *dev); int (*crtc_mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -270,6 +263,7 @@ struct drm_i915_display_funcs { struct drm_crtc *crtc); void (*fdi_link_train)(struct drm_crtc *crtc); void (*init_clock_gating)(struct drm_device *dev); + void (*init_pch_clock_gating)(struct drm_device *dev); int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj); @@ -344,7 +338,6 @@ struct intel_device_info { #define I915_PPGTT_PD_ENTRIES 512 #define I915_PPGTT_PT_ENTRIES 1024 struct i915_hw_ppgtt { - struct drm_device *dev; unsigned num_pd_entries; struct page **pt_pages; uint32_t pd_offset; @@ -390,18 +383,154 @@ struct intel_fbc_work; struct intel_gmbus { struct i2c_adapter adapter; - u32 force_bit; + bool force_bit; u32 reg0; u32 gpio_reg; struct i2c_algo_bit_data bit_algo; struct drm_i915_private *dev_priv; }; -struct i915_suspend_saved_registers { +typedef struct drm_i915_private { + struct drm_device *dev; + + const struct intel_device_info *info; + + int relative_constants_mode; + + void __iomem *regs; + + struct drm_i915_gt_funcs gt; + /** gt_fifo_count and the subsequent register write are synchronized + * with dev->struct_mutex. */ + unsigned gt_fifo_count; + /** forcewake_count is protected by gt_lock */ + unsigned forcewake_count; + /** gt_lock is also taken in irq contexts. */ + struct spinlock gt_lock; + + struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; + + /** gmbus_mutex protects against concurrent usage of the single hw gmbus + * controller on different i2c buses. */ + struct mutex gmbus_mutex; + + /** + * Base address of the gmbus and gpio block. + */ + uint32_t gpio_mmio_base; + + struct pci_dev *bridge_dev; + struct intel_ring_buffer ring[I915_NUM_RINGS]; + uint32_t next_seqno; + + drm_dma_handle_t *status_page_dmah; + uint32_t counter; + struct drm_i915_gem_object *pwrctx; + struct drm_i915_gem_object *renderctx; + + struct resource mch_res; + + atomic_t irq_received; + + /* protects the irq masks */ + spinlock_t irq_lock; + + /* DPIO indirect register protection */ + spinlock_t dpio_lock; + + /** Cached value of IMR to avoid reads in updating the bitfield */ + u32 pipestat[2]; + u32 irq_mask; + u32 gt_irq_mask; + u32 pch_irq_mask; + + u32 hotplug_supported_mask; + struct work_struct hotplug_work; + + int num_pipe; + int num_pch_pll; + + /* For hangcheck timer */ +#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ + struct timer_list hangcheck_timer; + int hangcheck_count; + uint32_t last_acthd[I915_NUM_RINGS]; + uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; + + unsigned int stop_rings; + + unsigned long cfb_size; + unsigned int cfb_fb; + enum plane cfb_plane; + int cfb_y; + struct intel_fbc_work *fbc_work; + + struct intel_opregion opregion; + + /* overlay */ + struct intel_overlay *overlay; + bool sprite_scaling_enabled; + + /* LVDS info */ + int backlight_level; /* restore backlight to this value */ + bool backlight_enabled; + struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ + struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ + + /* Feature bits from the VBIOS */ + unsigned int int_tv_support:1; + unsigned int lvds_dither:1; + unsigned int lvds_vbt:1; + unsigned int int_crt_support:1; + unsigned int lvds_use_ssc:1; + unsigned int display_clock_mode:1; + int lvds_ssc_freq; + unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ + unsigned int lvds_val; /* used for checking LVDS channel mode */ + struct { + int rate; + int lanes; + int preemphasis; + int vswing; + + bool initialized; + bool support; + int bpp; + struct edp_power_seq pps; + } edp; + bool no_aux_handshake; + + struct notifier_block lid_notifier; + + int crt_ddc_pin; + struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ + int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ + int num_fence_regs; /* 8 on pre-965, 16 otherwise */ + + unsigned int fsb_freq, mem_freq, is_ddr3; + + spinlock_t error_lock; + /* Protected by dev->error_lock. */ + struct drm_i915_error_state *first_error; + struct work_struct error_work; + struct completion error_completion; + struct workqueue_struct *wq; + + /* Display functions */ + struct drm_i915_display_funcs display; + + /* PCH chipset type */ + enum intel_pch pch_type; + + unsigned long quirks; + + /* Register state */ + bool modeset_on_lid; u8 saveLBB; u32 saveDSPACNTR; u32 saveDSPBCNTR; u32 saveDSPARB; + u32 saveHWS; u32 savePIPEACONF; u32 savePIPEBCONF; u32 savePIPEASRC; @@ -547,206 +676,10 @@ struct i915_suspend_saved_registers { u32 savePIPEB_LINK_N1; u32 saveMCHBAR_RENDER_STANDBY; u32 savePCH_PORT_HOTPLUG; -}; - -struct intel_gen6_power_mgmt { - struct work_struct work; - u32 pm_iir; - /* lock - irqsave spinlock that protectects the work_struct and - * pm_iir. */ - spinlock_t lock; - - /* The below variables an all the rps hw state are protected by - * dev->struct mutext. */ - u8 cur_delay; - u8 min_delay; - u8 max_delay; - - struct delayed_work delayed_resume_work; - - /* - * Protects RPS/RC6 register access and PCU communication. - * Must be taken after struct_mutex if nested. - */ - struct mutex hw_lock; -}; - -struct intel_ilk_power_mgmt { - u8 cur_delay; - u8 min_delay; - u8 max_delay; - u8 fmax; - u8 fstart; - - u64 last_count1; - unsigned long last_time1; - unsigned long chipset_power; - u64 last_count2; - struct timespec last_time2; - unsigned long gfx_power; - u8 corr; - - int c_m; - int r_t; - - struct drm_i915_gem_object *pwrctx; - struct drm_i915_gem_object *renderctx; -}; - -struct i915_dri1_state { - unsigned allow_batchbuffer : 1; - u32 __iomem *gfx_hws_cpu_addr; - - unsigned int cpp; - int back_offset; - int front_offset; - int current_page; - int page_flipping; - - uint32_t counter; -}; - -struct intel_l3_parity { - u32 *remap_info; - struct work_struct error_work; -}; - -typedef struct drm_i915_private { - struct drm_device *dev; - - const struct intel_device_info *info; - - int relative_constants_mode; - - void __iomem *regs; - - struct drm_i915_gt_funcs gt; - /** gt_fifo_count and the subsequent register write are synchronized - * with dev->struct_mutex. */ - unsigned gt_fifo_count; - /** forcewake_count is protected by gt_lock */ - unsigned forcewake_count; - /** gt_lock is also taken in irq contexts. */ - struct spinlock gt_lock; - - struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; - - /** gmbus_mutex protects against concurrent usage of the single hw gmbus - * controller on different i2c buses. */ - struct mutex gmbus_mutex; - - /** - * Base address of the gmbus and gpio block. - */ - uint32_t gpio_mmio_base; - - struct pci_dev *bridge_dev; - struct intel_ring_buffer ring[I915_NUM_RINGS]; - uint32_t next_seqno; - - drm_dma_handle_t *status_page_dmah; - struct resource mch_res; - - atomic_t irq_received; - - /* protects the irq masks */ - spinlock_t irq_lock; - - /* DPIO indirect register protection */ - spinlock_t dpio_lock; - - /** Cached value of IMR to avoid reads in updating the bitfield */ - u32 pipestat[2]; - u32 irq_mask; - u32 gt_irq_mask; - u32 pch_irq_mask; - - u32 hotplug_supported_mask; - struct work_struct hotplug_work; - - int num_pipe; - int num_pch_pll; - - /* For hangcheck timer */ -#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ -#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) - struct timer_list hangcheck_timer; - int hangcheck_count; - uint32_t last_acthd[I915_NUM_RINGS]; - uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; - - unsigned int stop_rings; - - unsigned long cfb_size; - unsigned int cfb_fb; - enum plane cfb_plane; - int cfb_y; - struct intel_fbc_work *fbc_work; - - struct intel_opregion opregion; - - /* overlay */ - struct intel_overlay *overlay; - bool sprite_scaling_enabled; - - /* LVDS info */ - int backlight_level; /* restore backlight to this value */ - bool backlight_enabled; - struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ - struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ - - /* Feature bits from the VBIOS */ - unsigned int int_tv_support:1; - unsigned int lvds_dither:1; - unsigned int lvds_vbt:1; - unsigned int int_crt_support:1; - unsigned int lvds_use_ssc:1; - unsigned int display_clock_mode:1; - int lvds_ssc_freq; - unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ - unsigned int lvds_val; /* used for checking LVDS channel mode */ - struct { - int rate; - int lanes; - int preemphasis; - int vswing; - - bool initialized; - bool support; - int bpp; - struct edp_power_seq pps; - } edp; - bool no_aux_handshake; - - int crt_ddc_pin; - struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ - int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ - int num_fence_regs; /* 8 on pre-965, 16 otherwise */ - - unsigned int fsb_freq, mem_freq, is_ddr3; - - spinlock_t error_lock; - /* Protected by dev->error_lock. */ - struct drm_i915_error_state *first_error; - struct work_struct error_work; - struct completion error_completion; - struct workqueue_struct *wq; - - /* Display functions */ - struct drm_i915_display_funcs display; - - /* PCH chipset type */ - enum intel_pch pch_type; - unsigned short pch_id; - - unsigned long quirks; - - /* Register state */ - bool modeset_on_lid; struct { /** Bridge to intel-gtt-ko */ - struct intel_gtt *gtt; + const struct intel_gtt *gtt; /** Memory allocator for GTT stolen memory */ struct drm_mm stolen; /** Memory allocator for GTT */ @@ -773,6 +706,8 @@ typedef struct drm_i915_private { /** PPGTT used for aliasing the PPGTT with the GTT */ struct i915_hw_ppgtt *aliasing_ppgtt; + u32 *l3_remap_info; + struct shrinker inactive_shrinker; /** @@ -850,6 +785,19 @@ typedef struct drm_i915_private { u32 object_count; } mm; + /* Old dri1 support infrastructure, beware the dragons ya fools entering + * here! */ + struct { + unsigned allow_batchbuffer : 1; + u32 __iomem *gfx_hws_cpu_addr; + + unsigned int cpp; + int back_offset; + int front_offset; + int current_page; + int page_flipping; + } dri1; + /* Kernel Modesetting */ struct sdvo_device_mapping sdvo_mappings[2]; @@ -863,7 +811,6 @@ typedef struct drm_i915_private { wait_queue_head_t pending_flip_queue; struct intel_pch_pll pch_plls[I915_NUM_PLLS]; - struct intel_ddi_plls ddi_plls; /* Reclocking support */ bool render_reclock_avail; @@ -873,17 +820,46 @@ typedef struct drm_i915_private { u16 orig_clock; int child_dev_num; struct child_device_config *child_dev; + struct drm_connector *int_lvds_connector; + struct drm_connector *int_edp_connector; bool mchbar_need_disable; - struct intel_l3_parity l3_parity; - /* gen6+ rps state */ - struct intel_gen6_power_mgmt rps; + struct { + struct work_struct work; + u32 pm_iir; + /* lock - irqsave spinlock that protectects the work_struct and + * pm_iir. */ + spinlock_t lock; + + /* The below variables an all the rps hw state are protected by + * dev->struct mutext. */ + u8 cur_delay; + u8 min_delay; + u8 max_delay; + } rps; /* ilk-only ips/rps state. Everything in here is protected by the global * mchdev_lock in intel_pm.c */ - struct intel_ilk_power_mgmt ips; + struct { + u8 cur_delay; + u8 min_delay; + u8 max_delay; + u8 fmax; + u8 fstart; + + u64 last_count1; + unsigned long last_time1; + unsigned long chipset_power; + u64 last_count2; + struct timespec last_time2; + unsigned long gfx_power; + u8 corr; + + int c_m; + int r_t; + } ips; enum no_fbc_reason no_fbc_reason; @@ -895,25 +871,14 @@ typedef struct drm_i915_private { /* list of fbdev register on this device */ struct intel_fbdev *fbdev; - /* - * The console may be contended at resume, but we don't - * want it to block on it. - */ - struct work_struct console_resume_work; - struct backlight_device *backlight; struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; + struct work_struct parity_error_work; bool hw_contexts_disabled; uint32_t hw_context_size; - - struct i915_suspend_saved_registers regfile; - - /* Old dri1 support infrastructure, beware the dragons ya fools entering - * here! */ - struct i915_dri1_state dri1; } drm_i915_private_t; /* Iterate over initialised rings */ @@ -1155,14 +1120,9 @@ struct drm_i915_file_private { #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) -#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ - (dev)->pci_device == 0x0152 || \ - (dev)->pci_device == 0x015a) #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) -#define IS_ULT(dev) (IS_HASWELL(dev) && \ - ((dev)->pci_device & 0xFF00) == 0x0A00) /* * The genX designation typically refers to the render engine, so render @@ -1208,13 +1168,6 @@ struct drm_i915_file_private { #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) -#define INTEL_PCH_DEVICE_ID_MASK 0xff00 -#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 -#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 -#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 -#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 -#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 - #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) @@ -1297,7 +1250,6 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); -extern void intel_console_resume(struct work_struct *work); /* i915_irq.c */ void i915_hangcheck_elapsed(unsigned long data); @@ -1305,7 +1257,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged); extern void intel_irq_init(struct drm_device *dev); extern void intel_gt_init(struct drm_device *dev); -extern void intel_gt_reset(struct drm_device *dev); void i915_error_state_free(struct kref *error_ref); @@ -1548,14 +1499,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start, unsigned long mappable_end, unsigned long end); -int i915_gem_gtt_init(struct drm_device *dev); -void i915_gem_gtt_fini(struct drm_device *dev); -static inline void i915_gem_chipset_flush(struct drm_device *dev) -{ - if (INTEL_INFO(dev)->gen < 6) - intel_gtt_chipset_flush(); -} - /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, @@ -1685,9 +1628,6 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); -int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); - #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index b0016bb65631..107f09befe92 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -845,12 +845,12 @@ i915_gem_shmem_pwrite(struct drm_device *dev, * domain anymore. */ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { i915_gem_clflush_object(obj); - i915_gem_chipset_flush(dev); + intel_gtt_chipset_flush(); } } if (needs_clflush_after) - i915_gem_chipset_flush(dev); + intel_gtt_chipset_flush(); return ret; } @@ -1345,17 +1345,30 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) trace_i915_gem_object_fault(obj, page_offset, true, write); /* Now bind it into the GTT if needed */ - ret = i915_gem_object_pin(obj, 0, true, false); - if (ret) - goto unlock; + if (!obj->map_and_fenceable) { + ret = i915_gem_object_unbind(obj); + if (ret) + goto unlock; + } + if (!obj->gtt_space) { + ret = i915_gem_object_bind_to_gtt(obj, 0, true, false); + if (ret) + goto unlock; - ret = i915_gem_object_set_to_gtt_domain(obj, write); - if (ret) - goto unpin; + ret = i915_gem_object_set_to_gtt_domain(obj, write); + if (ret) + goto unlock; + } + + if (!obj->has_global_gtt_mapping) + i915_gem_gtt_bind_object(obj, obj->cache_level); ret = i915_gem_object_get_fence(obj); if (ret) - goto unpin; + goto unlock; + + if (i915_gem_object_is_inactive(obj)) + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); obj->fault_mappable = true; @@ -1364,8 +1377,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Finally, remap it using the new GTT offset */ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); -unpin: - i915_gem_object_unpin(obj); unlock: mutex_unlock(&dev->struct_mutex); out: @@ -2011,12 +2022,12 @@ i915_add_request(struct intel_ring_buffer *ring, if (!dev_priv->mm.suspended) { if (i915_enable_hangcheck) { mod_timer(&dev_priv->hangcheck_timer, - round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); + jiffies + + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } if (was_empty) { queue_delayed_work(dev_priv->wq, - &dev_priv->mm.retire_work, - round_jiffies_up_relative(HZ)); + &dev_priv->mm.retire_work, HZ); intel_mark_busy(dev_priv->dev); } } @@ -2207,8 +2218,7 @@ i915_gem_retire_work_handler(struct work_struct *work) /* Come back later if the device is busy... */ if (!mutex_trylock(&dev->struct_mutex)) { - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, - round_jiffies_up_relative(HZ)); + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); return; } @@ -2226,8 +2236,7 @@ i915_gem_retire_work_handler(struct work_struct *work) } if (!dev_priv->mm.suspended && !idle) - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, - round_jiffies_up_relative(HZ)); + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); if (idle) intel_mark_idle(dev); @@ -2914,14 +2923,13 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, if (ret) return ret; - i915_gem_object_pin_pages(obj); - search_free: if (map_and_fenceable) - free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, - size, alignment, obj->cache_level, - 0, dev_priv->mm.gtt_mappable_end, - false); + free_space = + drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, + size, alignment, obj->cache_level, + 0, dev_priv->mm.gtt_mappable_end, + false); else free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, size, alignment, obj->cache_level, @@ -2929,60 +2937,60 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, if (free_space != NULL) { if (map_and_fenceable) - free_space = + obj->gtt_space = drm_mm_get_block_range_generic(free_space, size, alignment, obj->cache_level, 0, dev_priv->mm.gtt_mappable_end, false); else - free_space = + obj->gtt_space = drm_mm_get_block_generic(free_space, size, alignment, obj->cache_level, false); } - if (free_space == NULL) { + if (obj->gtt_space == NULL) { ret = i915_gem_evict_something(dev, size, alignment, obj->cache_level, map_and_fenceable, nonblocking); - if (ret) { - i915_gem_object_unpin_pages(obj); + if (ret) return ret; - } goto search_free; } if (WARN_ON(!i915_gem_valid_gtt_space(dev, - free_space, + obj->gtt_space, obj->cache_level))) { - i915_gem_object_unpin_pages(obj); - drm_mm_put_block(free_space); + drm_mm_put_block(obj->gtt_space); + obj->gtt_space = NULL; return -EINVAL; } + ret = i915_gem_gtt_prepare_object(obj); if (ret) { - i915_gem_object_unpin_pages(obj); - drm_mm_put_block(free_space); + drm_mm_put_block(obj->gtt_space); + obj->gtt_space = NULL; return ret; } + if (!dev_priv->mm.aliasing_ppgtt) + i915_gem_gtt_bind_object(obj, obj->cache_level); + list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - obj->gtt_space = free_space; - obj->gtt_offset = free_space->start; + obj->gtt_offset = obj->gtt_space->start; fenceable = - free_space->size == fence_size && - (free_space->start & (fence_alignment - 1)) == 0; + obj->gtt_space->size == fence_size && + (obj->gtt_space->start & (fence_alignment - 1)) == 0; mappable = obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; obj->map_and_fenceable = mappable && fenceable; - i915_gem_object_unpin_pages(obj); trace_i915_gem_object_bind(obj, map_and_fenceable); i915_gem_verify_gtt(dev); return 0; @@ -3051,7 +3059,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) return; i915_gem_clflush_object(obj); - i915_gem_chipset_flush(obj->base.dev); + intel_gtt_chipset_flush(); old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; @@ -3446,16 +3454,11 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, } if (obj->gtt_space == NULL) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - ret = i915_gem_object_bind_to_gtt(obj, alignment, map_and_fenceable, nonblocking); if (ret) return ret; - - if (!dev_priv->mm.aliasing_ppgtt) - i915_gem_gtt_bind_object(obj, obj->cache_level); } if (!obj->has_global_gtt_mapping && map_and_fenceable) @@ -3829,7 +3832,7 @@ void i915_gem_l3_remap(struct drm_device *dev) if (!IS_IVYBRIDGE(dev)) return; - if (!dev_priv->l3_parity.remap_info) + if (!dev_priv->mm.l3_remap_info) return; misccpctl = I915_READ(GEN7_MISCCPCTL); @@ -3838,12 +3841,12 @@ void i915_gem_l3_remap(struct drm_device *dev) for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { u32 remap = I915_READ(GEN7_L3LOG_BASE + i); - if (remap && remap != dev_priv->l3_parity.remap_info[i/4]) + if (remap && remap != dev_priv->mm.l3_remap_info[i/4]) DRM_DEBUG("0x%x was already programmed to %x\n", GEN7_L3LOG_BASE + i, remap); - if (remap && !dev_priv->l3_parity.remap_info[i/4]) + if (remap && !dev_priv->mm.l3_remap_info[i/4]) DRM_DEBUG_DRIVER("Clearing remapped register\n"); - I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]); + I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]); } /* Make sure all the writes land before disabling dop clock gating */ @@ -3873,6 +3876,68 @@ void i915_gem_init_swizzling(struct drm_device *dev) I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); } +void i915_gem_init_ppgtt(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + uint32_t pd_offset; + struct intel_ring_buffer *ring; + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + uint32_t __iomem *pd_addr; + uint32_t pd_entry; + int i; + + if (!dev_priv->mm.aliasing_ppgtt) + return; + + + pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); + for (i = 0; i < ppgtt->num_pd_entries; i++) { + dma_addr_t pt_addr; + + if (dev_priv->mm.gtt->needs_dmar) + pt_addr = ppgtt->pt_dma_addr[i]; + else + pt_addr = page_to_phys(ppgtt->pt_pages[i]); + + pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); + pd_entry |= GEN6_PDE_VALID; + + writel(pd_entry, pd_addr + i); + } + readl(pd_addr); + + pd_offset = ppgtt->pd_offset; + pd_offset /= 64; /* in cachelines, */ + pd_offset <<= 16; + + if (INTEL_INFO(dev)->gen == 6) { + uint32_t ecochk, gab_ctl, ecobits; + + ecobits = I915_READ(GAC_ECO_BITS); + I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); + + gab_ctl = I915_READ(GAB_CTL); + I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); + + ecochk = I915_READ(GAM_ECOCHK); + I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | + ECOCHK_PPGTT_CACHE64B); + I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + } else if (INTEL_INFO(dev)->gen >= 7) { + I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); + /* GFX_MODE is per-ring on gen7+ */ + } + + for_each_ring(ring, dev_priv, i) { + if (INTEL_INFO(dev)->gen >= 7) + I915_WRITE(RING_MODE_GEN7(ring), + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + + I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); + I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); + } +} + static bool intel_enable_blt(struct drm_device *dev) { @@ -3895,7 +3960,7 @@ i915_gem_init_hw(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; int ret; - if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) + if (!intel_enable_gtt()) return -EIO; if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) @@ -4230,7 +4295,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, page_cache_release(page); } } - i915_gem_chipset_flush(dev); + intel_gtt_chipset_flush(); obj->phys_obj->cur_obj = NULL; obj->phys_obj = NULL; @@ -4317,7 +4382,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, return -EFAULT; } - i915_gem_chipset_flush(dev); + intel_gtt_chipset_flush(); return 0; } @@ -4342,19 +4407,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) spin_unlock(&file_priv->mm.lock); } -static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) -{ - if (!mutex_is_locked(mutex)) - return false; - -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) - return mutex->owner == task; -#else - /* Since UP may be pre-empted, we cannot assume that we own the lock */ - return false; -#endif -} - static int i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) { @@ -4365,15 +4417,10 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) struct drm_device *dev = dev_priv->dev; struct drm_i915_gem_object *obj; int nr_to_scan = sc->nr_to_scan; - bool unlock = true; int cnt; - if (!mutex_trylock(&dev->struct_mutex)) { - if (!mutex_is_locked_by(&dev->struct_mutex, current)) - return 0; - - unlock = false; - } + if (!mutex_trylock(&dev->struct_mutex)) + return 0; if (nr_to_scan) { nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); @@ -4389,7 +4436,6 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) if (obj->pin_count == 0 && obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; - if (unlock) - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->struct_mutex); return cnt; } diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_context.c b/trunk/drivers/gpu/drm/i915/i915_gem_context.c index 0e510df80d73..05ed42f203d7 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_context.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_context.c @@ -146,7 +146,7 @@ create_hw_context(struct drm_device *dev, struct i915_hw_context *ctx; int ret, id; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL); if (ctx == NULL) return ERR_PTR(-ENOMEM); diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 48e4317e72dc..3eea143749f6 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -128,6 +128,15 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, target_i915_obj->cache_level); } + /* The target buffer should have appeared before us in the + * exec_object list, so it should have a GTT space bound by now. + */ + if (unlikely(target_offset == 0)) { + DRM_DEBUG("No GTT space found for object %d\n", + reloc->target_handle); + return ret; + } + /* Validate that the target is in a valid r/w GPU domain */ if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { DRM_DEBUG("reloc with multiple write domains: " @@ -663,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, } if (flush_domains & I915_GEM_DOMAIN_CPU) - i915_gem_chipset_flush(ring->dev); + intel_gtt_chipset_flush(); if (flush_domains & I915_GEM_DOMAIN_GTT) wmb(); @@ -791,7 +800,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, u32 exec_start, exec_len; u32 seqno; u32 mask; - u32 flags; int ret, mode, i; if (!i915_gem_check_execbuffer(args)) { @@ -803,14 +811,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) return ret; - flags = 0; - if (args->flags & I915_EXEC_SECURE) { - if (!file->is_master || !capable(CAP_SYS_ADMIN)) - return -EPERM; - - flags |= I915_DISPATCH_SECURE; - } - switch (args->flags & I915_EXEC_RING_MASK) { case I915_EXEC_DEFAULT: case I915_EXEC_RENDER: @@ -983,13 +983,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; - /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure - * batch" bit. Hence we need to pin secure batches into the global gtt. - * hsw should have this fixed, but let's be paranoid and do it - * unconditionally for now. */ - if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) - i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); - ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); if (ret) goto err; @@ -1035,7 +1028,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - trace_i915_gem_ring_dispatch(ring, seqno, flags); + trace_i915_gem_ring_dispatch(ring, seqno); exec_start = batch_obj->gtt_offset + args->batch_start_offset; exec_len = args->batch_len; @@ -1047,15 +1040,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; ret = ring->dispatch_execbuffer(ring, - exec_start, exec_len, - flags); + exec_start, exec_len); if (ret) goto err; } } else { - ret = ring->dispatch_execbuffer(ring, - exec_start, exec_len, - flags); + ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); if (ret) goto err; } diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c index f7ac61ee1504..df470b5e8d36 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -28,67 +28,19 @@ #include "i915_trace.h" #include "intel_drv.h" -typedef uint32_t gtt_pte_t; - -/* PPGTT stuff */ -#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) - -#define GEN6_PDE_VALID (1 << 0) -/* gen6+ has bit 11-4 for physical addr bit 39-32 */ -#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) - -#define GEN6_PTE_VALID (1 << 0) -#define GEN6_PTE_UNCACHED (1 << 1) -#define HSW_PTE_UNCACHED (0) -#define GEN6_PTE_CACHE_LLC (2 << 1) -#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) -#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) - -static inline gtt_pte_t pte_encode(struct drm_device *dev, - dma_addr_t addr, - enum i915_cache_level level) -{ - gtt_pte_t pte = GEN6_PTE_VALID; - pte |= GEN6_PTE_ADDR_ENCODE(addr); - - switch (level) { - case I915_CACHE_LLC_MLC: - /* Haswell doesn't set L3 this way */ - if (IS_HASWELL(dev)) - pte |= GEN6_PTE_CACHE_LLC; - else - pte |= GEN6_PTE_CACHE_LLC_MLC; - break; - case I915_CACHE_LLC: - pte |= GEN6_PTE_CACHE_LLC; - break; - case I915_CACHE_NONE: - if (IS_HASWELL(dev)) - pte |= HSW_PTE_UNCACHED; - else - pte |= GEN6_PTE_UNCACHED; - break; - default: - BUG(); - } - - - return pte; -} - /* PPGTT support for Sandybdrige/Gen6 and later */ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, unsigned first_entry, unsigned num_entries) { - gtt_pte_t *pt_vaddr; - gtt_pte_t scratch_pte; + uint32_t *pt_vaddr; + uint32_t scratch_pte; unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned last_pte, i; - scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr, - I915_CACHE_LLC); + scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr); + scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC; while (num_entries) { last_pte = first_pte + num_entries; @@ -125,7 +77,6 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) if (!ppgtt) return ret; - ppgtt->dev = dev; ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, GFP_KERNEL); @@ -167,7 +118,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) i915_ppgtt_clear_range(ppgtt, 0, ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); - ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t); + ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t); dev_priv->mm.aliasing_ppgtt = ppgtt; @@ -217,9 +168,9 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, const struct sg_table *pages, unsigned first_entry, - enum i915_cache_level cache_level) + uint32_t pte_flags) { - gtt_pte_t *pt_vaddr; + uint32_t *pt_vaddr, pte; unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned i, j, m, segment_len; @@ -237,8 +188,8 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); - pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr, - cache_level); + pte = GEN6_PTE_ADDR_ENCODE(page_addr); + pt_vaddr[j] = pte | pte_flags; /* grab the next page */ if (++m == segment_len) { @@ -262,10 +213,29 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { + uint32_t pte_flags = GEN6_PTE_VALID; + + switch (cache_level) { + case I915_CACHE_LLC_MLC: + pte_flags |= GEN6_PTE_CACHE_LLC_MLC; + break; + case I915_CACHE_LLC: + pte_flags |= GEN6_PTE_CACHE_LLC; + break; + case I915_CACHE_NONE: + if (IS_HASWELL(obj->base.dev)) + pte_flags |= HSW_PTE_UNCACHED; + else + pte_flags |= GEN6_PTE_UNCACHED; + break; + default: + BUG(); + } + i915_ppgtt_insert_sg_entries(ppgtt, obj->pages, obj->gtt_space->start >> PAGE_SHIFT, - cache_level); + pte_flags); } void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, @@ -276,65 +246,23 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, obj->base.size >> PAGE_SHIFT); } -void i915_gem_init_ppgtt(struct drm_device *dev) +/* XXX kill agp_type! */ +static unsigned int cache_level_to_agp_type(struct drm_device *dev, + enum i915_cache_level cache_level) { - drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t pd_offset; - struct intel_ring_buffer *ring; - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - uint32_t __iomem *pd_addr; - uint32_t pd_entry; - int i; - - if (!dev_priv->mm.aliasing_ppgtt) - return; - - - pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); - for (i = 0; i < ppgtt->num_pd_entries; i++) { - dma_addr_t pt_addr; - - if (dev_priv->mm.gtt->needs_dmar) - pt_addr = ppgtt->pt_dma_addr[i]; - else - pt_addr = page_to_phys(ppgtt->pt_pages[i]); - - pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); - pd_entry |= GEN6_PDE_VALID; - - writel(pd_entry, pd_addr + i); - } - readl(pd_addr); - - pd_offset = ppgtt->pd_offset; - pd_offset /= 64; /* in cachelines, */ - pd_offset <<= 16; - - if (INTEL_INFO(dev)->gen == 6) { - uint32_t ecochk, gab_ctl, ecobits; - - ecobits = I915_READ(GAC_ECO_BITS); - I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); - - gab_ctl = I915_READ(GAB_CTL); - I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); - - ecochk = I915_READ(GAM_ECOCHK); - I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | - ECOCHK_PPGTT_CACHE64B); - I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); - } else if (INTEL_INFO(dev)->gen >= 7) { - I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); - /* GFX_MODE is per-ring on gen7+ */ - } - - for_each_ring(ring, dev_priv, i) { - if (INTEL_INFO(dev)->gen >= 7) - I915_WRITE(RING_MODE_GEN7(ring), - _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); - - I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); - I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); + switch (cache_level) { + case I915_CACHE_LLC_MLC: + if (INTEL_INFO(dev)->gen >= 6) + return AGP_USER_CACHED_MEMORY_LLC_MLC; + /* Older chipsets do not have this extra level of CPU + * cacheing, so fallthrough and request the PTE simply + * as cached. + */ + case I915_CACHE_LLC: + return AGP_USER_CACHED_MEMORY; + default: + case I915_CACHE_NONE: + return AGP_USER_MEMORY; } } @@ -360,40 +288,13 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) dev_priv->mm.interruptible = interruptible; } - -static void i915_ggtt_clear_range(struct drm_device *dev, - unsigned first_entry, - unsigned num_entries) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - gtt_pte_t scratch_pte; - gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry; - const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; - int i; - - if (INTEL_INFO(dev)->gen < 6) { - intel_gtt_clear_range(first_entry, num_entries); - return; - } - - if (WARN(num_entries > max_entries, - "First entry = %d; Num entries = %d (max=%d)\n", - first_entry, num_entries, max_entries)) - num_entries = max_entries; - - scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC); - for (i = 0; i < num_entries; i++) - iowrite32(scratch_pte, >t_base[i]); - readl(gtt_base); -} - void i915_gem_restore_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; /* First fill our portion of the GTT with scratch pages */ - i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE, + intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { @@ -401,7 +302,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) i915_gem_gtt_bind_object(obj, obj->cache_level); } - i915_gem_chipset_flush(dev); + intel_gtt_chipset_flush(); } int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) @@ -417,76 +318,21 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) return 0; } -/* - * Binds an object into the global gtt with the specified cache level. The object - * will be accessible to the GPU via commands whose operands reference offsets - * within the global GTT as well as accessible by the GPU through the GMADR - * mapped BAR (dev_priv->mm.gtt->gtt). - */ -static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj, - enum i915_cache_level level) -{ - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct sg_table *st = obj->pages; - struct scatterlist *sg = st->sgl; - const int first_entry = obj->gtt_space->start >> PAGE_SHIFT; - const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; - gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry; - int unused, i = 0; - unsigned int len, m = 0; - dma_addr_t addr; - - for_each_sg(st->sgl, sg, st->nents, unused) { - len = sg_dma_len(sg) >> PAGE_SHIFT; - for (m = 0; m < len; m++) { - addr = sg_dma_address(sg) + (m << PAGE_SHIFT); - iowrite32(pte_encode(dev, addr, level), >t_entries[i]); - i++; - } - } - - BUG_ON(i > max_entries); - BUG_ON(i != obj->base.size / PAGE_SIZE); - - /* XXX: This serves as a posting read to make sure that the PTE has - * actually been updated. There is some concern that even though - * registers and PTEs are within the same BAR that they are potentially - * of NUMA access patterns. Therefore, even with the way we assume - * hardware should work, we must keep this posting read for paranoia. - */ - if (i != 0) - WARN_ON(readl(>t_entries[i-1]) != pte_encode(dev, addr, level)); - - /* This next bit makes the above posting read even more important. We - * want to flush the TLBs only after we're certain all the PTE updates - * have finished. - */ - I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); - POSTING_READ(GFX_FLSH_CNTL_GEN6); -} - void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { struct drm_device *dev = obj->base.dev; - if (INTEL_INFO(dev)->gen < 6) { - unsigned int flags = (cache_level == I915_CACHE_NONE) ? - AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; - intel_gtt_insert_sg_entries(obj->pages, - obj->gtt_space->start >> PAGE_SHIFT, - flags); - } else { - gen6_ggtt_bind_object(obj, cache_level); - } + unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); + intel_gtt_insert_sg_entries(obj->pages, + obj->gtt_space->start >> PAGE_SHIFT, + agp_type); obj->has_global_gtt_mapping = 1; } void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) { - i915_ggtt_clear_range(obj->base.dev, - obj->gtt_space->start >> PAGE_SHIFT, + intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT); obj->has_global_gtt_mapping = 0; @@ -544,161 +390,5 @@ void i915_gem_init_global_gtt(struct drm_device *dev, dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; /* ... but ensure that we clear the entire range. */ - i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE); -} - -static int setup_scratch_page(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct page *page; - dma_addr_t dma_addr; - - page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); - if (page == NULL) - return -ENOMEM; - get_page(page); - set_pages_uc(page, 1); - -#ifdef CONFIG_INTEL_IOMMU - dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(dev->pdev, dma_addr)) - return -EINVAL; -#else - dma_addr = page_to_phys(page); -#endif - dev_priv->mm.gtt->scratch_page = page; - dev_priv->mm.gtt->scratch_page_dma = dma_addr; - - return 0; -} - -static void teardown_scratch_page(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - set_pages_wb(dev_priv->mm.gtt->scratch_page, 1); - pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - put_page(dev_priv->mm.gtt->scratch_page); - __free_page(dev_priv->mm.gtt->scratch_page); -} - -static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) -{ - snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; - snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; - return snb_gmch_ctl << 20; -} - -static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl) -{ - snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; - snb_gmch_ctl &= SNB_GMCH_GMS_MASK; - return snb_gmch_ctl << 25; /* 32 MB units */ -} - -static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl) -{ - static const int stolen_decoder[] = { - 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352}; - snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT; - snb_gmch_ctl &= IVB_GMCH_GMS_MASK; - return stolen_decoder[snb_gmch_ctl] << 20; -} - -int i915_gem_gtt_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - phys_addr_t gtt_bus_addr; - u16 snb_gmch_ctl; - int ret; - - /* On modern platforms we need not worry ourself with the legacy - * hostbridge query stuff. Skip it entirely - */ - if (INTEL_INFO(dev)->gen < 6) { - ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); - if (!ret) { - DRM_ERROR("failed to set up gmch\n"); - return -EIO; - } - - dev_priv->mm.gtt = intel_gtt_get(); - if (!dev_priv->mm.gtt) { - DRM_ERROR("Failed to initialize GTT\n"); - intel_gmch_remove(); - return -ENODEV; - } - return 0; - } - - dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL); - if (!dev_priv->mm.gtt) - return -ENOMEM; - - if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) - pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); - - /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */ - gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20); - dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2); - - /* i9xx_setup */ - pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); - dev_priv->mm.gtt->gtt_total_entries = - gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t); - if (INTEL_INFO(dev)->gen < 7) - dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl); - else - dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl); - - dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT; - /* 64/512MB is the current min/max we actually know of, but this is just a - * coarse sanity check. - */ - if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 || - dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) { - DRM_ERROR("Unknown GMADR entries (%d)\n", - dev_priv->mm.gtt->gtt_mappable_entries); - ret = -ENXIO; - goto err_out; - } - - ret = setup_scratch_page(dev); - if (ret) { - DRM_ERROR("Scratch setup failed\n"); - goto err_out; - } - - dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr, - dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t)); - if (!dev_priv->mm.gtt->gtt) { - DRM_ERROR("Failed to map the gtt page table\n"); - teardown_scratch_page(dev); - ret = -ENOMEM; - goto err_out; - } - - /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */ - DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8); - DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8); - DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20); - - return 0; - -err_out: - kfree(dev_priv->mm.gtt); - if (INTEL_INFO(dev)->gen < 6) - intel_gmch_remove(); - return ret; -} - -void i915_gem_gtt_fini(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - iounmap(dev_priv->mm.gtt->gtt); - teardown_scratch_page(dev); - if (INTEL_INFO(dev)->gen < 6) - intel_gmch_remove(); - kfree(dev_priv->mm.gtt); + intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); } diff --git a/trunk/drivers/gpu/drm/i915/i915_irq.c b/trunk/drivers/gpu/drm/i915/i915_irq.c index 2604867e6b7d..32e1bda865b8 100644 --- a/trunk/drivers/gpu/drm/i915/i915_irq.c +++ b/trunk/drivers/gpu/drm/i915/i915_irq.c @@ -122,10 +122,7 @@ static int i915_pipe_enabled(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); - - return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; + return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; } /* Called from drm generic code, passed a 'crtc', which @@ -185,8 +182,6 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, int vbl_start, vbl_end, htotal, vtotal; bool in_vbl = true; int ret = 0; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); if (!i915_pipe_enabled(dev, pipe)) { DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " @@ -195,7 +190,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, } /* Get vtotal. */ - vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); + vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); if (INTEL_INFO(dev)->gen >= 4) { /* No obvious pixelcount register. Only query vertical @@ -215,13 +210,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, */ position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; - htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); + htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); *vpos = position / htotal; *hpos = position - (*vpos * htotal); } /* Query vblank area. */ - vbl = I915_READ(VBLANK(cpu_transcoder)); + vbl = I915_READ(VBLANK(pipe)); /* Test position against vblank region. */ vbl_start = vbl & 0x1fff; @@ -357,7 +352,8 @@ static void notify_ring(struct drm_device *dev, if (i915_enable_hangcheck) { dev_priv->hangcheck_count = 0; mod_timer(&dev_priv->hangcheck_timer, - round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); + jiffies + + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } } @@ -378,7 +374,7 @@ static void gen6_pm_rps_work(struct work_struct *work) if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) return; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->dev->struct_mutex); if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) new_delay = dev_priv->rps.cur_delay + 1; @@ -393,7 +389,7 @@ static void gen6_pm_rps_work(struct work_struct *work) gen6_set_rps(dev_priv->dev, new_delay); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->dev->struct_mutex); } @@ -409,7 +405,7 @@ static void gen6_pm_rps_work(struct work_struct *work) static void ivybridge_parity_work(struct work_struct *work) { drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, - l3_parity.error_work); + parity_error_work); u32 error_status, row, bank, subbank; char *parity_event[5]; uint32_t misccpctl; @@ -473,7 +469,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev) I915_WRITE(GTIMR, dev_priv->gt_irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); + queue_work(dev_priv->wq, &dev_priv->parity_error_work); } static void snb_gt_irq_handler(struct drm_device *dev, @@ -524,7 +520,7 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, queue_work(dev_priv->wq, &dev_priv->rps.work); } -static irqreturn_t valleyview_irq_handler(int irq, void *arg) +static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -610,9 +606,6 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; - if (pch_iir & SDE_HOTPLUG_MASK) - queue_work(dev_priv->wq, &dev_priv->hotplug_work); - if (pch_iir & SDE_AUDIO_POWER_MASK) DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", (pch_iir & SDE_AUDIO_POWER_MASK) >> @@ -653,9 +646,6 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; - if (pch_iir & SDE_HOTPLUG_MASK_CPT) - queue_work(dev_priv->wq, &dev_priv->hotplug_work); - if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> @@ -680,7 +670,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) I915_READ(FDI_RX_IIR(pipe))); } -static irqreturn_t ivybridge_irq_handler(int irq, void *arg) +static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -719,6 +709,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) if (de_iir & DE_PCH_EVENT_IVB) { u32 pch_iir = I915_READ(SDEIIR); + if (pch_iir & SDE_HOTPLUG_MASK_CPT) + queue_work(dev_priv->wq, &dev_priv->hotplug_work); cpt_irq_handler(dev, pch_iir); /* clear PCH hotplug event before clear CPU irq */ @@ -753,12 +745,13 @@ static void ilk_gt_irq_handler(struct drm_device *dev, notify_ring(dev, &dev_priv->ring[VCS]); } -static irqreturn_t ironlake_irq_handler(int irq, void *arg) +static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = IRQ_NONE; u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; + u32 hotplug_mask; atomic_inc(&dev_priv->irq_received); @@ -776,6 +769,11 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) (!IS_GEN6(dev) || pm_iir == 0)) goto done; + if (HAS_PCH_CPT(dev)) + hotplug_mask = SDE_HOTPLUG_MASK_CPT; + else + hotplug_mask = SDE_HOTPLUG_MASK; + ret = IRQ_HANDLED; if (IS_GEN5(dev)) @@ -804,6 +802,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) /* check event from PCH */ if (de_iir & DE_PCH_EVENT) { + if (pch_iir & hotplug_mask) + queue_work(dev_priv->wq, &dev_priv->hotplug_work); if (HAS_PCH_CPT(dev)) cpt_irq_handler(dev, pch_iir); else @@ -1751,7 +1751,7 @@ void i915_hangcheck_elapsed(unsigned long data) repeat: /* Reset timer case chip hangs without another request being added */ mod_timer(&dev_priv->hangcheck_timer, - round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); + jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } /* drm_dma.h hooks @@ -1956,7 +1956,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev) u32 enable_mask; u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; - u32 render_irqs; u16 msid; enable_mask = I915_DISPLAY_PORT_INTERRUPT; @@ -1996,12 +1995,21 @@ static int valleyview_irq_postinstall(struct drm_device *dev) I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff); + dev_priv->gt_irq_mask = ~0; + + I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - - render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | - GEN6_BLITTER_USER_INTERRUPT; - I915_WRITE(GTIER, render_irqs); + I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | + GT_GEN6_BLT_CS_ERROR_INTERRUPT | + GT_GEN6_BLT_USER_INTERRUPT | + GT_GEN6_BSD_USER_INTERRUPT | + GT_GEN6_BSD_CS_ERROR_INTERRUPT | + GT_GEN7_L3_PARITY_ERROR_INTERRUPT | + GT_PIPE_NOTIFY | + GT_RENDER_CS_ERROR_INTERRUPT | + GT_SYNC_STATUS | + GT_USER_INTERRUPT); POSTING_READ(GTIER); /* ack & enable invalid PTE error interrupts */ @@ -2011,6 +2019,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev) #endif I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); +#if 0 /* FIXME: check register definitions; some have moved */ /* Note HDMI and DP share bits */ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) hotplug_en |= HDMIB_HOTPLUG_INT_EN; @@ -2018,14 +2027,15 @@ static int valleyview_irq_postinstall(struct drm_device *dev) hotplug_en |= HDMIC_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) hotplug_en |= HDMID_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) hotplug_en |= SDVOC_HOTPLUG_INT_EN; - if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) hotplug_en |= SDVOB_HOTPLUG_INT_EN; if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { hotplug_en |= CRT_HOTPLUG_INT_EN; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; } +#endif I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); @@ -2119,7 +2129,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev) return 0; } -static irqreturn_t i8xx_irq_handler(int irq, void *arg) +static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -2297,7 +2307,7 @@ static int i915_irq_postinstall(struct drm_device *dev) return 0; } -static irqreturn_t i915_irq_handler(int irq, void *arg) +static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -2535,7 +2545,7 @@ static int i965_irq_postinstall(struct drm_device *dev) return 0; } -static irqreturn_t i965_irq_handler(int irq, void *arg) +static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -2681,7 +2691,7 @@ void intel_irq_init(struct drm_device *dev) INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->error_work, i915_error_work_func); INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); - INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); + INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ diff --git a/trunk/drivers/gpu/drm/i915/i915_reg.h b/trunk/drivers/gpu/drm/i915/i915_reg.h index 97fbd9d1823b..a4162ddff6c5 100644 --- a/trunk/drivers/gpu/drm/i915/i915_reg.h +++ b/trunk/drivers/gpu/drm/i915/i915_reg.h @@ -26,7 +26,6 @@ #define _I915_REG_H_ #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) -#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) @@ -41,14 +40,6 @@ */ #define INTEL_GMCH_CTRL 0x52 #define INTEL_GMCH_VGA_DISABLE (1 << 1) -#define SNB_GMCH_CTRL 0x50 -#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */ -#define SNB_GMCH_GGMS_MASK 0x3 -#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ -#define SNB_GMCH_GMS_MASK 0x1f -#define IVB_GMCH_GMS_SHIFT 4 -#define IVB_GMCH_GMS_MASK 0xf - /* PCI config space */ @@ -114,6 +105,23 @@ #define GEN6_GRDOM_MEDIA (1 << 2) #define GEN6_GRDOM_BLT (1 << 3) +/* PPGTT stuff */ +#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) + +#define GEN6_PDE_VALID (1 << 0) +#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */ +/* gen6+ has bit 11-4 for physical addr bit 39-32 */ +#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) + +#define GEN6_PTE_VALID (1 << 0) +#define GEN6_PTE_UNCACHED (1 << 1) +#define HSW_PTE_UNCACHED (0) +#define GEN6_PTE_CACHE_LLC (2 << 1) +#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) +#define GEN6_PTE_CACHE_BITS (3 << 1) +#define GEN6_PTE_GFDT (1 << 3) +#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) + #define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) #define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) #define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) @@ -233,18 +241,11 @@ */ #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ -#define MI_FLUSH_DW_STORE_INDEX (1<<21) -#define MI_INVALIDATE_TLB (1<<18) -#define MI_FLUSH_DW_OP_STOREDW (1<<14) -#define MI_INVALIDATE_BSD (1<<7) -#define MI_FLUSH_DW_USE_GTT (1<<2) -#define MI_FLUSH_DW_USE_PPGTT (0<<2) +#define MI_INVALIDATE_TLB (1<<18) +#define MI_INVALIDATE_BSD (1<<7) #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) -#define MI_BATCH_NON_SECURE (1) -/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ -#define MI_BATCH_NON_SECURE_I965 (1<<8) -#define MI_BATCH_PPGTT_HSW (1<<8) -#define MI_BATCH_NON_SECURE_HSW (1<<13) +#define MI_BATCH_NON_SECURE (1) +#define MI_BATCH_NON_SECURE_I965 (1<<8) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ @@ -368,7 +369,6 @@ #define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ #define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ #define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ -#define DPIO_PLL_REFCLK_SEL_MASK 3 #define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ #define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ #define _DPIO_REFSFR_B 0x8034 @@ -384,9 +384,6 @@ #define DPIO_FASTCLK_DISABLE 0x8100 -#define DPIO_DATA_CHANNEL1 0x8220 -#define DPIO_DATA_CHANNEL2 0x8420 - /* * Fence registers */ @@ -524,7 +521,6 @@ */ # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) #define _3D_CHICKEN3 0x02090 -#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) #define MI_MODE 0x0209c @@ -551,8 +547,6 @@ #define IIR 0x020a4 #define IMR 0x020a8 #define ISR 0x020ac -#define VLV_GUNIT_CLOCK_GATE 0x182060 -#define GCFG_DIS (1<<8) #define VLV_IIR_RW 0x182084 #define VLV_IER 0x1820a0 #define VLV_IIR 0x1820a4 @@ -667,7 +661,6 @@ #define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ #define CACHE_MODE_0 0x02120 /* 915+ only */ -#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8) #define CM0_IZ_OPT_DISABLE (1<<6) #define CM0_ZR_OPT_DISABLE (1<<5) #define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) @@ -677,8 +670,6 @@ #define CM0_RC_OP_FLUSH_DISABLE (1<<0) #define BB_ADDR 0x02140 /* 8 bytes */ #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ -#define GFX_FLSH_CNTL_GEN6 0x101008 -#define GFX_FLSH_CNTL_EN (1<<0) #define ECOSKPD 0x021d0 #define ECO_GATING_CX_ONLY (1<<3) #define ECO_FLIP_DONE (1<<0) @@ -1568,14 +1559,14 @@ #define _VSYNCSHIFT_B 0x61028 -#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) -#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) -#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) -#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B) -#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B) -#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B) +#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) +#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) +#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B) +#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B) +#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) +#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) -#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) +#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B) /* VGA port control */ #define ADPA 0x61100 @@ -2650,7 +2641,6 @@ #define PIPECONF_GAMMA (1<<24) #define PIPECONF_FORCE_BORDER (1<<25) #define PIPECONF_INTERLACE_MASK (7 << 21) -#define PIPECONF_INTERLACE_MASK_HSW (3 << 21) /* Note that pre-gen3 does not support interlaced display directly. Panel * fitting must be disabled on pre-ilk for interlaced. */ #define PIPECONF_PROGRESSIVE (0 << 21) @@ -2721,7 +2711,7 @@ #define PIPE_12BPC (3 << 5) #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) -#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) +#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF) #define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) #define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) @@ -3008,19 +2998,12 @@ #define DISPPLANE_GAMMA_ENABLE (1<<30) #define DISPPLANE_GAMMA_DISABLE 0 #define DISPPLANE_PIXFORMAT_MASK (0xf<<26) -#define DISPPLANE_YUV422 (0x0<<26) #define DISPPLANE_8BPP (0x2<<26) -#define DISPPLANE_BGRA555 (0x3<<26) -#define DISPPLANE_BGRX555 (0x4<<26) -#define DISPPLANE_BGRX565 (0x5<<26) -#define DISPPLANE_BGRX888 (0x6<<26) -#define DISPPLANE_BGRA888 (0x7<<26) -#define DISPPLANE_RGBX101010 (0x8<<26) -#define DISPPLANE_RGBA101010 (0x9<<26) -#define DISPPLANE_BGRX101010 (0xa<<26) -#define DISPPLANE_RGBX161616 (0xc<<26) -#define DISPPLANE_RGBX888 (0xe<<26) -#define DISPPLANE_RGBA888 (0xf<<26) +#define DISPPLANE_15_16BPP (0x4<<26) +#define DISPPLANE_16BPP (0x5<<26) +#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) +#define DISPPLANE_32BPP (0x7<<26) +#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) #define DISPPLANE_STEREO_ENABLE (1<<25) #define DISPPLANE_STEREO_DISABLE 0 #define DISPPLANE_SEL_PIPE_SHIFT 24 @@ -3041,8 +3024,6 @@ #define _DSPASIZE 0x70190 #define _DSPASURF 0x7019C /* 965+ only */ #define _DSPATILEOFF 0x701A4 /* 965+ only */ -#define _DSPAOFFSET 0x701A4 /* HSW */ -#define _DSPASURFLIVE 0x701AC #define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) #define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) @@ -3052,8 +3033,6 @@ #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) #define DSPLINOFF(plane) DSPADDR(plane) -#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET) -#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE) /* Display/Sprite base address macros */ #define DISP_BASEADDR_MASK (0xfffff000) @@ -3099,8 +3078,6 @@ #define _DSPBSIZE 0x71190 #define _DSPBSURF 0x7119C #define _DSPBTILEOFF 0x711A4 -#define _DSPBOFFSET 0x711A4 -#define _DSPBSURFLIVE 0x711AC /* Sprite A control */ #define _DVSACNTR 0x72180 @@ -3166,7 +3143,6 @@ #define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) #define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) #define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) -#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) #define _SPRA_CTL 0x70280 #define SPRITE_ENABLE (1<<31) @@ -3201,8 +3177,6 @@ #define _SPRA_SURF 0x7029c #define _SPRA_KEYMAX 0x702a0 #define _SPRA_TILEOFF 0x702a4 -#define _SPRA_OFFSET 0x702a4 -#define _SPRA_SURFLIVE 0x702ac #define _SPRA_SCALE 0x70304 #define SPRITE_SCALE_ENABLE (1<<31) #define SPRITE_FILTER_MASK (3<<29) @@ -3223,8 +3197,6 @@ #define _SPRB_SURF 0x7129c #define _SPRB_KEYMAX 0x712a0 #define _SPRB_TILEOFF 0x712a4 -#define _SPRB_OFFSET 0x712a4 -#define _SPRB_SURFLIVE 0x712ac #define _SPRB_SCALE 0x71304 #define _SPRB_GAMC 0x71400 @@ -3238,10 +3210,8 @@ #define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) #define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) #define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) -#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) #define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) #define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) -#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) /* VBIOS regs */ #define VGACNTRL 0x71400 @@ -3276,6 +3246,12 @@ #define DISPLAY_PORT_PLL_BIOS_1 0x46010 #define DISPLAY_PORT_PLL_BIOS_2 0x46014 +#define PCH_DSPCLK_GATE_D 0x42020 +# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) +# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) +# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) +# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) + #define PCH_3DCGDIS0 0x46020 # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) @@ -3325,22 +3301,20 @@ #define _PIPEB_LINK_M2 0x61048 #define _PIPEB_LINK_N2 0x6104c -#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) -#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) -#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2) -#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2) -#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1) -#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1) -#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2) -#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2) +#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1) +#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1) +#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2) +#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2) +#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1) +#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1) +#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2) +#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2) /* CPU panel fitter */ /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ #define _PFA_CTL_1 0x68080 #define _PFB_CTL_1 0x68880 #define PF_ENABLE (1<<31) -#define PF_PIPE_SEL_MASK_IVB (3<<29) -#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29) #define PF_FILTER_MASK (3<<23) #define PF_FILTER_PROGRAMMED (0<<23) #define PF_FILTER_MED_3x3 (1<<23) @@ -3449,13 +3423,15 @@ #define ILK_HDCP_DISABLE (1<<25) #define ILK_eDP_A_DISABLE (1<<24) #define ILK_DESKTOP (1<<23) +#define ILK_DSPCLK_GATE 0x42020 +#define IVB_VRHUNIT_CLK_GATE (1<<28) +#define ILK_DPARB_CLK_GATE (1<<5) +#define ILK_DPFD_CLK_GATE (1<<7) -#define ILK_DSPCLK_GATE_D 0x42020 -#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) -#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) -#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8) -#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7) -#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5) +/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ +#define ILK_CLK_FBC (1<<7) +#define ILK_DPFC_DIS1 (1<<8) +#define ILK_DPFC_DIS2 (1<<9) #define IVB_CHICKEN3 0x4200c # define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) @@ -3471,21 +3447,14 @@ #define GEN7_L3CNTLREG1 0xB01C #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C -#define GEN7_L3AGDIS (1<<19) #define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 #define GEN7_WA_L3_CHICKEN_MODE 0x20000000 -#define GEN7_L3SQCREG4 0xb034 -#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) - /* WaCatErrorRejectionIssue */ #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) -#define HSW_FUSE_STRAP 0x42014 -#define HSW_CDCLK_LIMIT (1 << 24) - /* PCH */ /* south display engine interrupt: IBX */ @@ -3717,7 +3686,7 @@ #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) -#define VLV_VIDEO_DIP_CTL_A 0x60200 +#define VLV_VIDEO_DIP_CTL_A 0x60220 #define VLV_VIDEO_DIP_DATA_A 0x60208 #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 @@ -3826,22 +3795,16 @@ #define TRANS_6BPC (2<<5) #define TRANS_12BPC (3<<5) -#define _TRANSA_CHICKEN1 0xf0060 -#define _TRANSB_CHICKEN1 0xf1060 -#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) -#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4) #define _TRANSA_CHICKEN2 0xf0064 #define _TRANSB_CHICKEN2 0xf1064 #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) -#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) - +#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31) #define SOUTH_CHICKEN1 0xc2000 #define FDIA_PHASE_SYNC_SHIFT_OVR 19 #define FDIA_PHASE_SYNC_SHIFT_EN 18 -#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) -#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) -#define FDI_BC_BIFURCATION_SELECT (1 << 12) +#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) +#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) #define SOUTH_CHICKEN2 0xc2004 #define DPLS_EDP_PPS_FIX_DIS (1<<0) @@ -3853,7 +3816,6 @@ #define SOUTH_DSPCLK_GATE_D 0xc2020 #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) -#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) /* CPU: FDI_TX */ #define _FDI_TXA_CTL 0x60100 @@ -3939,21 +3901,16 @@ #define FDI_PORT_WIDTH_2X_LPT (1<<19) #define FDI_PORT_WIDTH_1X_LPT (0<<19) -#define _FDI_RXA_MISC 0xf0010 -#define _FDI_RXB_MISC 0xf1010 -#define FDI_RX_PWRDN_LANE1_MASK (3<<26) -#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26) -#define FDI_RX_PWRDN_LANE0_MASK (3<<24) -#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24) -#define FDI_RX_TP1_TO_TP2_48 (2<<20) -#define FDI_RX_TP1_TO_TP2_64 (3<<20) -#define FDI_RX_FDI_DELAY_90 (0x90<<0) -#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) - +#define _FDI_RXA_MISC 0xf0010 +#define _FDI_RXB_MISC 0xf1010 #define _FDI_RXA_TUSIZE1 0xf0030 #define _FDI_RXA_TUSIZE2 0xf0038 #define _FDI_RXB_TUSIZE1 0xf1030 #define _FDI_RXB_TUSIZE2 0xf1038 +#define FDI_RX_TP1_TO_TP2_48 (2<<20) +#define FDI_RX_TP1_TO_TP2_64 (3<<20) +#define FDI_RX_FDI_DELAY_90 (0x90<<0) +#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) @@ -4046,11 +4003,6 @@ #define PANEL_LIGHT_ON_DELAY_SHIFT 0 #define PCH_PP_OFF_DELAYS 0xc720c -#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30) -#define PANEL_POWER_PORT_LVDS (0 << 30) -#define PANEL_POWER_PORT_DP_A (1 << 30) -#define PANEL_POWER_PORT_DP_C (2 << 30) -#define PANEL_POWER_PORT_DP_D (3 << 30) #define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) #define PANEL_POWER_DOWN_DELAY_SHIFT 16 #define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) @@ -4098,7 +4050,7 @@ #define TRANS_DP_CTL_A 0xe0300 #define TRANS_DP_CTL_B 0xe1300 #define TRANS_DP_CTL_C 0xe2300 -#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B) +#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000) #define TRANS_DP_OUTPUT_ENABLE (1<<31) #define TRANS_DP_PORT_SEL_B (0<<29) #define TRANS_DP_PORT_SEL_C (1<<29) @@ -4156,8 +4108,6 @@ #define FORCEWAKE_ACK_HSW 0x130044 #define FORCEWAKE_ACK 0x130090 #define FORCEWAKE_MT 0xa188 /* multi-threaded */ -#define FORCEWAKE_KERNEL 0x1 -#define FORCEWAKE_USER 0x2 #define FORCEWAKE_MT_ACK 0x130040 #define ECOBUS 0xa180 #define FORCEWAKE_MT_ENABLE (1<<5) @@ -4270,10 +4220,6 @@ #define GEN6_READ_OC_PARAMS 0xc #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 -#define GEN6_PCODE_WRITE_RC6VIDS 0x4 -#define GEN6_PCODE_READ_RC6VIDS 0x5 -#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0 -#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0) #define GEN6_PCODE_DATA 0x138128 #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 @@ -4305,15 +4251,6 @@ #define GEN7_L3LOG_BASE 0xB070 #define GEN7_L3LOG_SIZE 0x80 -#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ -#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100 -#define GEN7_MAX_PS_THREAD_DEP (8<<12) -#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) - -#define GEN7_ROW_CHICKEN2 0xe4f4 -#define GEN7_ROW_CHICKEN2_GT2 0xf4f4 -#define DOP_CLOCK_GATING_DISABLE (1<<0) - #define G4X_AUD_VID_DID 0x62020 #define INTEL_AUDIO_DEVCL 0x808629FB #define INTEL_AUDIO_DEVBLC 0x80862801 @@ -4443,39 +4380,33 @@ #define HSW_PWR_WELL_CTL6 0x45414 /* Per-pipe DDI Function Control */ -#define TRANS_DDI_FUNC_CTL_A 0x60400 -#define TRANS_DDI_FUNC_CTL_B 0x61400 -#define TRANS_DDI_FUNC_CTL_C 0x62400 -#define TRANS_DDI_FUNC_CTL_EDP 0x6F400 -#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \ - TRANS_DDI_FUNC_CTL_B) -#define TRANS_DDI_FUNC_ENABLE (1<<31) +#define PIPE_DDI_FUNC_CTL_A 0x60400 +#define PIPE_DDI_FUNC_CTL_B 0x61400 +#define PIPE_DDI_FUNC_CTL_C 0x62400 +#define PIPE_DDI_FUNC_CTL_EDP 0x6F400 +#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \ + PIPE_DDI_FUNC_CTL_B) +#define PIPE_DDI_FUNC_ENABLE (1<<31) /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ -#define TRANS_DDI_PORT_MASK (7<<28) -#define TRANS_DDI_SELECT_PORT(x) ((x)<<28) -#define TRANS_DDI_PORT_NONE (0<<28) -#define TRANS_DDI_MODE_SELECT_MASK (7<<24) -#define TRANS_DDI_MODE_SELECT_HDMI (0<<24) -#define TRANS_DDI_MODE_SELECT_DVI (1<<24) -#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24) -#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24) -#define TRANS_DDI_MODE_SELECT_FDI (4<<24) -#define TRANS_DDI_BPC_MASK (7<<20) -#define TRANS_DDI_BPC_8 (0<<20) -#define TRANS_DDI_BPC_10 (1<<20) -#define TRANS_DDI_BPC_6 (2<<20) -#define TRANS_DDI_BPC_12 (3<<20) -#define TRANS_DDI_PVSYNC (1<<17) -#define TRANS_DDI_PHSYNC (1<<16) -#define TRANS_DDI_EDP_INPUT_MASK (7<<12) -#define TRANS_DDI_EDP_INPUT_A_ON (0<<12) -#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12) -#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) -#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) -#define TRANS_DDI_BFI_ENABLE (1<<4) -#define TRANS_DDI_PORT_WIDTH_X1 (0<<1) -#define TRANS_DDI_PORT_WIDTH_X2 (1<<1) -#define TRANS_DDI_PORT_WIDTH_X4 (3<<1) +#define PIPE_DDI_PORT_MASK (7<<28) +#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) +#define PIPE_DDI_MODE_SELECT_MASK (7<<24) +#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) +#define PIPE_DDI_MODE_SELECT_DVI (1<<24) +#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) +#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) +#define PIPE_DDI_MODE_SELECT_FDI (4<<24) +#define PIPE_DDI_BPC_MASK (7<<20) +#define PIPE_DDI_BPC_8 (0<<20) +#define PIPE_DDI_BPC_10 (1<<20) +#define PIPE_DDI_BPC_6 (2<<20) +#define PIPE_DDI_BPC_12 (3<<20) +#define PIPE_DDI_PVSYNC (1<<17) +#define PIPE_DDI_PHSYNC (1<<16) +#define PIPE_DDI_BFI_ENABLE (1<<4) +#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) +#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) +#define PIPE_DDI_PORT_WIDTH_X4 (3<<1) /* DisplayPort Transport Control */ #define DP_TP_CTL_A 0x64040 @@ -4489,16 +4420,12 @@ #define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) #define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) #define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) -#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8) -#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8) #define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) -#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7) /* DisplayPort Transport Status */ #define DP_TP_STATUS_A 0x64044 #define DP_TP_STATUS_B 0x64144 #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) -#define DP_TP_STATUS_IDLE_DONE (1<<25) #define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) /* DDI Buffer Control */ @@ -4517,7 +4444,6 @@ #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ #define DDI_BUF_EMP_MASK (0xf<<24) #define DDI_BUF_IS_IDLE (1<<7) -#define DDI_A_4_LANES (1<<4) #define DDI_PORT_WIDTH_X1 (0<<1) #define DDI_PORT_WIDTH_X2 (1<<1) #define DDI_PORT_WIDTH_X4 (3<<1) @@ -4564,8 +4490,8 @@ /* SPLL */ #define SPLL_CTL 0x46020 #define SPLL_PLL_ENABLE (1<<31) -#define SPLL_PLL_SSC (1<<28) -#define SPLL_PLL_NON_SSC (2<<28) +#define SPLL_PLL_SCC (1<<28) +#define SPLL_PLL_NON_SCC (2<<28) #define SPLL_PLL_FREQ_810MHz (0<<26) #define SPLL_PLL_FREQ_1350MHz (1<<26) @@ -4574,7 +4500,7 @@ #define WRPLL_CTL2 0x46060 #define WRPLL_PLL_ENABLE (1<<31) #define WRPLL_PLL_SELECT_SSC (0x01<<28) -#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28) +#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) /* WRPLL divider programming */ #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) @@ -4591,36 +4517,21 @@ #define PORT_CLK_SEL_SPLL (3<<29) #define PORT_CLK_SEL_WRPLL1 (4<<29) #define PORT_CLK_SEL_WRPLL2 (5<<29) -#define PORT_CLK_SEL_NONE (7<<29) - -/* Transcoder clock selection */ -#define TRANS_CLK_SEL_A 0x46140 -#define TRANS_CLK_SEL_B 0x46144 -#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B) -/* For each transcoder, we need to select the corresponding port clock */ -#define TRANS_CLK_SEL_DISABLED (0x0<<29) -#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29) - -#define _TRANSA_MSA_MISC 0x60410 -#define _TRANSB_MSA_MISC 0x61410 -#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \ - _TRANSB_MSA_MISC) -#define TRANS_MSA_SYNC_CLK (1<<0) -#define TRANS_MSA_6_BPC (0<<5) -#define TRANS_MSA_8_BPC (1<<5) -#define TRANS_MSA_10_BPC (2<<5) -#define TRANS_MSA_12_BPC (3<<5) -#define TRANS_MSA_16_BPC (4<<5) + +/* Pipe clock selection */ +#define PIPE_CLK_SEL_A 0x46140 +#define PIPE_CLK_SEL_B 0x46144 +#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B) +/* For each pipe, we need to select the corresponding port clock */ +#define PIPE_CLK_SEL_DISABLED (0x0<<29) +#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) /* LCPLL Control */ #define LCPLL_CTL 0x130040 #define LCPLL_PLL_DISABLE (1<<31) #define LCPLL_PLL_LOCK (1<<30) -#define LCPLL_CLK_FREQ_MASK (3<<26) -#define LCPLL_CLK_FREQ_450 (0<<26) #define LCPLL_CD_CLOCK_DISABLE (1<<25) #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) -#define LCPLL_CD_SOURCE_FCLK (1<<21) /* Pipe WM_LINETIME - watermark line time */ #define PIPE_WM_LINETIME_A 0x45270 diff --git a/trunk/drivers/gpu/drm/i915/i915_suspend.c b/trunk/drivers/gpu/drm/i915/i915_suspend.c index 63d4d30c39de..5854bddb1e9f 100644 --- a/trunk/drivers/gpu/drm/i915/i915_suspend.c +++ b/trunk/drivers/gpu/drm/i915/i915_suspend.c @@ -60,9 +60,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe) reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; if (pipe == PIPE_A) - array = dev_priv->regfile.save_palette_a; + array = dev_priv->save_palette_a; else - array = dev_priv->regfile.save_palette_b; + array = dev_priv->save_palette_b; for (i = 0; i < 256; i++) array[i] = I915_READ(reg + (i << 2)); @@ -82,9 +82,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; if (pipe == PIPE_A) - array = dev_priv->regfile.save_palette_a; + array = dev_priv->save_palette_a; else - array = dev_priv->regfile.save_palette_b; + array = dev_priv->save_palette_b; for (i = 0; i < 256; i++) I915_WRITE(reg + (i << 2), array[i]); @@ -131,11 +131,11 @@ static void i915_save_vga(struct drm_device *dev) u16 cr_index, cr_data, st01; /* VGA color palette registers */ - dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK); + dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK); /* MSR bits */ - dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ); - if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { + dev_priv->saveMSR = I915_READ8(VGA_MSR_READ); + if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { cr_index = VGA_CR_INDEX_CGA; cr_data = VGA_CR_DATA_CGA; st01 = VGA_ST01_CGA; @@ -150,35 +150,35 @@ static void i915_save_vga(struct drm_device *dev) i915_read_indexed(dev, cr_index, cr_data, 0x11) & (~0x80)); for (i = 0; i <= 0x24; i++) - dev_priv->regfile.saveCR[i] = + dev_priv->saveCR[i] = i915_read_indexed(dev, cr_index, cr_data, i); /* Make sure we don't turn off CR group 0 writes */ - dev_priv->regfile.saveCR[0x11] &= ~0x80; + dev_priv->saveCR[0x11] &= ~0x80; /* Attribute controller registers */ I915_READ8(st01); - dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX); + dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX); for (i = 0; i <= 0x14; i++) - dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0); + dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0); I915_READ8(st01); - I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX); + I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX); I915_READ8(st01); /* Graphics controller registers */ for (i = 0; i < 9; i++) - dev_priv->regfile.saveGR[i] = + dev_priv->saveGR[i] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); - dev_priv->regfile.saveGR[0x10] = + dev_priv->saveGR[0x10] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); - dev_priv->regfile.saveGR[0x11] = + dev_priv->saveGR[0x11] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); - dev_priv->regfile.saveGR[0x18] = + dev_priv->saveGR[0x18] = i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); /* Sequencer registers */ for (i = 0; i < 8; i++) - dev_priv->regfile.saveSR[i] = + dev_priv->saveSR[i] = i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); } @@ -189,8 +189,8 @@ static void i915_restore_vga(struct drm_device *dev) u16 cr_index, cr_data, st01; /* MSR bits */ - I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR); - if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { + I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR); + if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { cr_index = VGA_CR_INDEX_CGA; cr_data = VGA_CR_DATA_CGA; st01 = VGA_ST01_CGA; @@ -203,36 +203,36 @@ static void i915_restore_vga(struct drm_device *dev) /* Sequencer registers, don't write SR07 */ for (i = 0; i < 7; i++) i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, - dev_priv->regfile.saveSR[i]); + dev_priv->saveSR[i]); /* CRT controller regs */ /* Enable CR group 0 writes */ - i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]); + i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); for (i = 0; i <= 0x24; i++) - i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]); + i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]); /* Graphics controller regs */ for (i = 0; i < 9; i++) i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, - dev_priv->regfile.saveGR[i]); + dev_priv->saveGR[i]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, - dev_priv->regfile.saveGR[0x10]); + dev_priv->saveGR[0x10]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, - dev_priv->regfile.saveGR[0x11]); + dev_priv->saveGR[0x11]); i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, - dev_priv->regfile.saveGR[0x18]); + dev_priv->saveGR[0x18]); /* Attribute controller registers */ I915_READ8(st01); /* switch back to index mode */ for (i = 0; i <= 0x14; i++) - i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0); + i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0); I915_READ8(st01); /* switch back to index mode */ - I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20); + I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20); I915_READ8(st01); /* VGA color palette registers */ - I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK); + I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); } static void i915_save_modeset_reg(struct drm_device *dev) @@ -244,162 +244,156 @@ static void i915_save_modeset_reg(struct drm_device *dev) return; /* Cursor state */ - dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR); - dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS); - dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE); - dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR); - dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS); - dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE); + dev_priv->saveCURACNTR = I915_READ(_CURACNTR); + dev_priv->saveCURAPOS = I915_READ(_CURAPOS); + dev_priv->saveCURABASE = I915_READ(_CURABASE); + dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR); + dev_priv->saveCURBPOS = I915_READ(_CURBPOS); + dev_priv->saveCURBBASE = I915_READ(_CURBBASE); if (IS_GEN2(dev)) - dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE); + dev_priv->saveCURSIZE = I915_READ(CURSIZE); if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); - dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); + dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); + dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); } /* Pipe & plane A info */ - dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF); - dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC); + dev_priv->savePIPEACONF = I915_READ(_PIPEACONF); + dev_priv->savePIPEASRC = I915_READ(_PIPEASRC); if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0); - dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1); - dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A); + dev_priv->saveFPA0 = I915_READ(_PCH_FPA0); + dev_priv->saveFPA1 = I915_READ(_PCH_FPA1); + dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A); } else { - dev_priv->regfile.saveFPA0 = I915_READ(_FPA0); - dev_priv->regfile.saveFPA1 = I915_READ(_FPA1); - dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A); + dev_priv->saveFPA0 = I915_READ(_FPA0); + dev_priv->saveFPA1 = I915_READ(_FPA1); + dev_priv->saveDPLL_A = I915_READ(_DPLL_A); } if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD); - dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A); - dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A); - dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A); - dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A); - dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A); - dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A); + dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD); + dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A); + dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A); + dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A); + dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A); + dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A); + dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A); if (!HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A); + dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A); if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); - dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); - dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); - dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); - - dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); - dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); - - dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1); - dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); - dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); - - dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF); - dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); - dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); - dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); - dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); - dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); - dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); - } - - dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR); - dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE); - dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE); - dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS); - dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR); + dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); + dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); + dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); + dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); + + dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); + dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); + + dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1); + dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); + dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); + + dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF); + dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); + dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); + dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); + dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); + dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); + dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); + } + + dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR); + dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE); + dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE); + dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS); + dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR); if (INTEL_INFO(dev)->gen >= 4) { - dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF); - dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF); + dev_priv->saveDSPASURF = I915_READ(_DSPASURF); + dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF); } i915_save_palette(dev, PIPE_A); - dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT); + dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT); /* Pipe & plane B info */ - dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF); - dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC); + dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF); + dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC); if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0); - dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1); - dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B); + dev_priv->saveFPB0 = I915_READ(_PCH_FPB0); + dev_priv->saveFPB1 = I915_READ(_PCH_FPB1); + dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B); } else { - dev_priv->regfile.saveFPB0 = I915_READ(_FPB0); - dev_priv->regfile.saveFPB1 = I915_READ(_FPB1); - dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B); + dev_priv->saveFPB0 = I915_READ(_FPB0); + dev_priv->saveFPB1 = I915_READ(_FPB1); + dev_priv->saveDPLL_B = I915_READ(_DPLL_B); } if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD); - dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B); - dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B); - dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B); - dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B); - dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B); - dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B); + dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD); + dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B); + dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B); + dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B); + dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B); + dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B); + dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B); if (!HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B); + dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B); if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); - dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); - dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); - dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); - - dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); - dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); - - dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1); - dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); - dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); - - dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF); - dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); - dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); - dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); - dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); - dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); - dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); - } - - dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR); - dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); - dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE); - dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS); - dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR); + dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); + dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); + dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); + dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); + + dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); + dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); + + dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1); + dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); + dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); + + dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF); + dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); + dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); + dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); + dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); + dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); + dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); + } + + dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR); + dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); + dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE); + dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS); + dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR); if (INTEL_INFO(dev)->gen >= 4) { - dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF); - dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); + dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF); + dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); } i915_save_palette(dev, PIPE_B); - dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT); + dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT); /* Fences */ switch (INTEL_INFO(dev)->gen) { case 7: case 6: for (i = 0; i < 16; i++) - dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); + dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); break; case 5: case 4: for (i = 0; i < 16; i++) - dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); break; case 3: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) - dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); case 2: for (i = 0; i < 8; i++) - dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); break; } - /* CRT state */ - if (HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA); - else - dev_priv->regfile.saveADPA = I915_READ(ADPA); - return; } @@ -418,20 +412,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev) case 7: case 6: for (i = 0; i < 16; i++) - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); break; case 5: case 4: for (i = 0; i < 16; i++) - I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); + I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); break; case 3: case 2: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]); + I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]); + I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); break; } @@ -453,164 +447,158 @@ static void i915_restore_modeset_reg(struct drm_device *dev) } if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL); - I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL); + I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); + I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); } /* Pipe & plane A info */ /* Prime the clock */ - if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) { - I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A & + if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { + I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A & ~DPLL_VCO_ENABLE); POSTING_READ(dpll_a_reg); udelay(150); } - I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0); - I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1); + I915_WRITE(fpa0_reg, dev_priv->saveFPA0); + I915_WRITE(fpa1_reg, dev_priv->saveFPA1); /* Actually enable it */ - I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A); + I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); POSTING_READ(dpll_a_reg); udelay(150); if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { - I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD); + I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD); POSTING_READ(_DPLL_A_MD); } udelay(150); /* Restore mode */ - I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A); - I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A); - I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A); - I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A); - I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A); - I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A); + I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A); + I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A); + I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A); + I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A); + I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A); + I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A); if (!HAS_PCH_SPLIT(dev)) - I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A); + I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A); if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1); - I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1); - I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1); - I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1); + I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); + I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); + I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); + I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); - I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL); - I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL); + I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); + I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); - I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1); - I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ); - I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS); + I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1); + I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); + I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS); - I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF); - I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A); - I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A); - I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A); - I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A); - I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A); - I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A); + I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF); + I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); + I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); + I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); + I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); + I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); + I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); } /* Restore plane info */ - I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE); - I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS); - I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC); - I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR); - I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE); + I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE); + I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS); + I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC); + I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR); + I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE); if (INTEL_INFO(dev)->gen >= 4) { - I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF); - I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF); + I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF); + I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF); } - I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF); + I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF); i915_restore_palette(dev, PIPE_A); /* Enable the plane */ - I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR); + I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR); I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); /* Pipe & plane B info */ - if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) { - I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B & + if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { + I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B & ~DPLL_VCO_ENABLE); POSTING_READ(dpll_b_reg); udelay(150); } - I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0); - I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1); + I915_WRITE(fpb0_reg, dev_priv->saveFPB0); + I915_WRITE(fpb1_reg, dev_priv->saveFPB1); /* Actually enable it */ - I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B); + I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); POSTING_READ(dpll_b_reg); udelay(150); if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { - I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD); + I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD); POSTING_READ(_DPLL_B_MD); } udelay(150); /* Restore mode */ - I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B); - I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B); - I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B); - I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B); - I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B); - I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B); + I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B); + I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B); + I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B); + I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B); + I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B); + I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B); if (!HAS_PCH_SPLIT(dev)) - I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B); + I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B); if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1); - I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1); - I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1); - I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1); + I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); + I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); + I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); + I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); - I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL); - I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL); + I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); + I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); - I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1); - I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ); - I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS); + I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1); + I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); + I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS); - I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF); - I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B); - I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B); - I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B); - I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B); - I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B); - I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B); + I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF); + I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); + I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); + I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); + I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); + I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); + I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); } /* Restore plane info */ - I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE); - I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS); - I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC); - I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR); - I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE); + I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE); + I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS); + I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC); + I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR); + I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); if (INTEL_INFO(dev)->gen >= 4) { - I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF); - I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF); + I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF); + I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); } - I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF); + I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF); i915_restore_palette(dev, PIPE_B); /* Enable the plane */ - I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR); + I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR); I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); /* Cursor state */ - I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS); - I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR); - I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE); - I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS); - I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR); - I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE); + I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS); + I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR); + I915_WRITE(_CURABASE, dev_priv->saveCURABASE); + I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS); + I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR); + I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE); if (IS_GEN2(dev)) - I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE); - - /* CRT state */ - if (HAS_PCH_SPLIT(dev)) - I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA); - else - I915_WRITE(ADPA, dev_priv->regfile.saveADPA); + I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); return; } @@ -620,84 +608,89 @@ static void i915_save_display(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; /* Display arbitration control */ - dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); + dev_priv->saveDSPARB = I915_READ(DSPARB); /* This is only meaningful in non-KMS mode */ - /* Don't regfile.save them in KMS mode */ + /* Don't save them in KMS mode */ i915_save_modeset_reg(dev); + /* CRT state */ + if (HAS_PCH_SPLIT(dev)) { + dev_priv->saveADPA = I915_READ(PCH_ADPA); + } else { + dev_priv->saveADPA = I915_READ(ADPA); + } + /* LVDS state */ if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); - dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); - dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); - dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); - dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); - dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); + dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); + dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); + dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); + dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); + dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); + dev_priv->saveLVDS = I915_READ(PCH_LVDS); } else { - dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); - dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); - dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); - dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); + dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); + dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); + dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); + dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); if (INTEL_INFO(dev)->gen >= 4) - dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); + dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); if (IS_MOBILE(dev) && !IS_I830(dev)) - dev_priv->regfile.saveLVDS = I915_READ(LVDS); + dev_priv->saveLVDS = I915_READ(LVDS); } if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) - dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL); + dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); - dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); - dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); + dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); + dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); + dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); } else { - dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); - dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); - dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); - } - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Display Port state */ - if (SUPPORTS_INTEGRATED_DP(dev)) { - dev_priv->regfile.saveDP_B = I915_READ(DP_B); - dev_priv->regfile.saveDP_C = I915_READ(DP_C); - dev_priv->regfile.saveDP_D = I915_READ(DP_D); - dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); - dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); - dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); - dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); - dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); - dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); - dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); - dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); - } - /* FIXME: regfile.save TV & SDVO state */ - } - - /* Only regfile.save FBC state on the platform that supports FBC */ + dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); + dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); + dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); + } + + /* Display Port state */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + dev_priv->saveDP_B = I915_READ(DP_B); + dev_priv->saveDP_C = I915_READ(DP_C); + dev_priv->saveDP_D = I915_READ(DP_D); + dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); + dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); + dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); + dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); + dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); + dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); + dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); + dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); + } + /* FIXME: save TV & SDVO state */ + + /* Only save FBC state on the platform that supports FBC */ if (I915_HAS_FBC(dev)) { if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); + dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); } else if (IS_GM45(dev)) { - dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); + dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); } else { - dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); - dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); - dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); - dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); + dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); + dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); + dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); + dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); } } /* VGA state */ - dev_priv->regfile.saveVGA0 = I915_READ(VGA0); - dev_priv->regfile.saveVGA1 = I915_READ(VGA1); - dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD); + dev_priv->saveVGA0 = I915_READ(VGA0); + dev_priv->saveVGA1 = I915_READ(VGA1); + dev_priv->saveVGA_PD = I915_READ(VGA_PD); if (HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL); + dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); else - dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL); + dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); i915_save_vga(dev); } @@ -707,95 +700,97 @@ static void i915_restore_display(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; /* Display arbitration */ - I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Display port ratios (must be done before clock is set) */ - if (SUPPORTS_INTEGRATED_DP(dev)) { - I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M); - I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M); - I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N); - I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N); - I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M); - I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M); - I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N); - I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N); - } + I915_WRITE(DSPARB, dev_priv->saveDSPARB); + + /* Display port ratios (must be done before clock is set) */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); + I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); + I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); + I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); + I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); + I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); + I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); + I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); } /* This is only meaningful in non-KMS mode */ /* Don't restore them in KMS mode */ i915_restore_modeset_reg(dev); + /* CRT state */ + if (HAS_PCH_SPLIT(dev)) + I915_WRITE(PCH_ADPA, dev_priv->saveADPA); + else + I915_WRITE(ADPA, dev_priv->saveADPA); + /* LVDS state */ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) - I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); + I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS); + I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); } else if (IS_MOBILE(dev) && !IS_I830(dev)) - I915_WRITE(LVDS, dev_priv->regfile.saveLVDS); + I915_WRITE(LVDS, dev_priv->saveLVDS); if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) - I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL); + I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL); - I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); + I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); + I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2; * otherwise we get blank eDP screen after S3 on some machines */ - I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2); - I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL); - I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); - I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); - I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); - I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); + I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2); + I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); + I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); + I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); + I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); + I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); I915_WRITE(RSTDBYCTL, - dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); + dev_priv->saveMCHBAR_RENDER_STANDBY); } else { - I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS); - I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL); - I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL); - I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); - I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); - I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); - I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); - } - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Display Port state */ - if (SUPPORTS_INTEGRATED_DP(dev)) { - I915_WRITE(DP_B, dev_priv->regfile.saveDP_B); - I915_WRITE(DP_C, dev_priv->regfile.saveDP_C); - I915_WRITE(DP_D, dev_priv->regfile.saveDP_D); - } - /* FIXME: restore TV & SDVO state */ + I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); + I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); + I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL); + I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); + I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); + I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); + I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); } + /* Display Port state */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + I915_WRITE(DP_B, dev_priv->saveDP_B); + I915_WRITE(DP_C, dev_priv->saveDP_C); + I915_WRITE(DP_D, dev_priv->saveDP_D); + } + /* FIXME: restore TV & SDVO state */ + /* only restore FBC info on the platform that supports FBC*/ intel_disable_fbc(dev); if (I915_HAS_FBC(dev)) { if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); + I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); } else if (IS_GM45(dev)) { - I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); + I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); } else { - I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE); - I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE); - I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2); - I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); + I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); + I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); + I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); + I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); } } /* VGA state */ if (HAS_PCH_SPLIT(dev)) - I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL); + I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); else - I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL); + I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); - I915_WRITE(VGA0, dev_priv->regfile.saveVGA0); - I915_WRITE(VGA1, dev_priv->regfile.saveVGA1); - I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD); + I915_WRITE(VGA0, dev_priv->saveVGA0); + I915_WRITE(VGA1, dev_priv->saveVGA1); + I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); POSTING_READ(VGA_PD); udelay(150); @@ -807,45 +802,46 @@ int i915_save_state(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB); + pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); mutex_lock(&dev->struct_mutex); + /* Hardware status page */ + dev_priv->saveHWS = I915_READ(HWS_PGA); + i915_save_display(dev); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Interrupt state */ - if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.saveDEIER = I915_READ(DEIER); - dev_priv->regfile.saveDEIMR = I915_READ(DEIMR); - dev_priv->regfile.saveGTIER = I915_READ(GTIER); - dev_priv->regfile.saveGTIMR = I915_READ(GTIMR); - dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); - dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); - dev_priv->regfile.saveMCHBAR_RENDER_STANDBY = - I915_READ(RSTDBYCTL); - dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); - } else { - dev_priv->regfile.saveIER = I915_READ(IER); - dev_priv->regfile.saveIMR = I915_READ(IMR); - } + /* Interrupt state */ + if (HAS_PCH_SPLIT(dev)) { + dev_priv->saveDEIER = I915_READ(DEIER); + dev_priv->saveDEIMR = I915_READ(DEIMR); + dev_priv->saveGTIER = I915_READ(GTIER); + dev_priv->saveGTIMR = I915_READ(GTIMR); + dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); + dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); + dev_priv->saveMCHBAR_RENDER_STANDBY = + I915_READ(RSTDBYCTL); + dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); + } else { + dev_priv->saveIER = I915_READ(IER); + dev_priv->saveIMR = I915_READ(IMR); } intel_disable_gt_powersave(dev); /* Cache mode state */ - dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); + dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); /* Memory Arbitration state */ - dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); + dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); /* Scratch space */ for (i = 0; i < 16; i++) { - dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2)); - dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2)); + dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); + dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); } for (i = 0; i < 3; i++) - dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2)); + dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); mutex_unlock(&dev->struct_mutex); @@ -857,40 +853,41 @@ int i915_restore_state(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB); + pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); mutex_lock(&dev->struct_mutex); + /* Hardware status page */ + I915_WRITE(HWS_PGA, dev_priv->saveHWS); + i915_restore_display(dev); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Interrupt state */ - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(DEIER, dev_priv->regfile.saveDEIER); - I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR); - I915_WRITE(GTIER, dev_priv->regfile.saveGTIER); - I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR); - I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR); - I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR); - I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG); - } else { - I915_WRITE(IER, dev_priv->regfile.saveIER); - I915_WRITE(IMR, dev_priv->regfile.saveIMR); - } + /* Interrupt state */ + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(DEIER, dev_priv->saveDEIER); + I915_WRITE(DEIMR, dev_priv->saveDEIMR); + I915_WRITE(GTIER, dev_priv->saveGTIER); + I915_WRITE(GTIMR, dev_priv->saveGTIMR); + I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); + I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); + I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG); + } else { + I915_WRITE(IER, dev_priv->saveIER); + I915_WRITE(IMR, dev_priv->saveIMR); } /* Cache mode state */ - I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000); + I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); /* Memory arbitration state */ - I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); + I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); for (i = 0; i < 16; i++) { - I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]); - I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]); + I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); + I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]); } for (i = 0; i < 3; i++) - I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]); + I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); mutex_unlock(&dev->struct_mutex); diff --git a/trunk/drivers/gpu/drm/i915/i915_sysfs.c b/trunk/drivers/gpu/drm/i915/i915_sysfs.c index 3bf51d58319d..903eebd2117a 100644 --- a/trunk/drivers/gpu/drm/i915/i915_sysfs.c +++ b/trunk/drivers/gpu/drm/i915/i915_sysfs.c @@ -162,7 +162,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj, if (ret) return ret; - if (!dev_priv->l3_parity.remap_info) { + if (!dev_priv->mm.l3_remap_info) { temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); if (!temp) { mutex_unlock(&drm_dev->struct_mutex); @@ -182,9 +182,9 @@ i915_l3_write(struct file *filp, struct kobject *kobj, * at this point it is left as a TODO. */ if (temp) - dev_priv->l3_parity.remap_info = temp; + dev_priv->mm.l3_remap_info = temp; - memcpy(dev_priv->l3_parity.remap_info + (offset/4), + memcpy(dev_priv->mm.l3_remap_info + (offset/4), buf + (offset/4), count); @@ -211,9 +211,12 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, struct drm_i915_private *dev_priv = dev->dev_private; int ret; - mutex_lock(&dev_priv->rps.hw_lock); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return snprintf(buf, PAGE_SIZE, "%d", ret); } @@ -225,9 +228,12 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute struct drm_i915_private *dev_priv = dev->dev_private; int ret; - mutex_lock(&dev_priv->rps.hw_lock); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return snprintf(buf, PAGE_SIZE, "%d", ret); } @@ -248,14 +254,16 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, val /= GT_FREQUENCY_MULTIPLIER; - mutex_lock(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); hw_max = (rp_state_cap & 0xff); hw_min = ((rp_state_cap & 0xff0000) >> 16); if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return -EINVAL; } @@ -264,7 +272,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, dev_priv->rps.max_delay = val; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return count; } @@ -276,9 +284,12 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute struct drm_i915_private *dev_priv = dev->dev_private; int ret; - mutex_lock(&dev_priv->rps.hw_lock); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return snprintf(buf, PAGE_SIZE, "%d", ret); } @@ -299,14 +310,16 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, val /= GT_FREQUENCY_MULTIPLIER; - mutex_lock(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); hw_max = (rp_state_cap & 0xff); hw_min = ((rp_state_cap & 0xff0000) >> 16); if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return -EINVAL; } @@ -315,7 +328,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, dev_priv->rps.min_delay = val; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev->struct_mutex); return count; diff --git a/trunk/drivers/gpu/drm/i915/i915_trace.h b/trunk/drivers/gpu/drm/i915/i915_trace.h index 3db4a6817713..8134421b89a6 100644 --- a/trunk/drivers/gpu/drm/i915/i915_trace.h +++ b/trunk/drivers/gpu/drm/i915/i915_trace.h @@ -229,26 +229,24 @@ TRACE_EVENT(i915_gem_evict_everything, ); TRACE_EVENT(i915_gem_ring_dispatch, - TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags), - TP_ARGS(ring, seqno, flags), + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno), TP_STRUCT__entry( __field(u32, dev) __field(u32, ring) __field(u32, seqno) - __field(u32, flags) ), TP_fast_assign( __entry->dev = ring->dev->primary->index; __entry->ring = ring->id; __entry->seqno = seqno; - __entry->flags = flags; i915_trace_irq_get(ring, seqno); ), - TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", - __entry->dev, __entry->ring, __entry->seqno, __entry->flags) + TP_printk("dev=%u, ring=%u, seqno=%u", + __entry->dev, __entry->ring, __entry->seqno) ); TRACE_EVENT(i915_gem_ring_flush, diff --git a/trunk/drivers/gpu/drm/i915/intel_bios.c b/trunk/drivers/gpu/drm/i915/intel_bios.c index 55ffba1f5818..56846ed5ee55 100644 --- a/trunk/drivers/gpu/drm/i915/intel_bios.c +++ b/trunk/drivers/gpu/drm/i915/intel_bios.c @@ -755,8 +755,7 @@ void intel_setup_bios(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; /* Set the Panel Power On/Off timings if uninitialized. */ - if (!HAS_PCH_SPLIT(dev) && - I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) { + if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { /* Set T2 to 40ms and T5 to 200ms */ I915_WRITE(PP_ON_DELAYS, 0x019007d0); diff --git a/trunk/drivers/gpu/drm/i915/intel_crt.c b/trunk/drivers/gpu/drm/i915/intel_crt.c index 331af3bc6894..6345878ae1e7 100644 --- a/trunk/drivers/gpu/drm/i915/intel_crt.c +++ b/trunk/drivers/gpu/drm/i915/intel_crt.c @@ -221,20 +221,14 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; u32 adpa; - if (HAS_PCH_SPLIT(dev)) - adpa = ADPA_HOTPLUG_BITS; - else - adpa = 0; - + adpa = ADPA_HOTPLUG_BITS; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) adpa |= ADPA_HSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) adpa |= ADPA_VSYNC_ACTIVE_HIGH; /* For CPT allow 3 pipe config, for others just use A or B */ - if (HAS_PCH_LPT(dev)) - ; /* Those bits don't exist here */ - else if (HAS_PCH_CPT(dev)) + if (HAS_PCH_CPT(dev)) adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); else if (intel_crtc->pipe == 0) adpa |= ADPA_PIPE_A_SELECT; @@ -407,16 +401,12 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector, struct i2c_adapter *adapter) { struct edid *edid; - int ret; edid = intel_crt_get_edid(connector, adapter); if (!edid) return 0; - ret = intel_connector_update_modes(connector, edid); - kfree(edid); - - return ret; + return intel_connector_update_modes(connector, edid); } static bool intel_crt_detect_ddc(struct drm_connector *connector) @@ -654,22 +644,10 @@ static int intel_crt_set_property(struct drm_connector *connector, static void intel_crt_reset(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crt *crt = intel_attached_crt(connector); - if (HAS_PCH_SPLIT(dev)) { - u32 adpa; - - adpa = I915_READ(PCH_ADPA); - adpa &= ~ADPA_CRT_HOTPLUG_MASK; - adpa |= ADPA_HOTPLUG_BITS; - I915_WRITE(PCH_ADPA, adpa); - POSTING_READ(PCH_ADPA); - - DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); + if (HAS_PCH_SPLIT(dev)) crt->force_hotplug_required = 1; - } - } /* @@ -751,7 +729,7 @@ void intel_crt_init(struct drm_device *dev) crt->base.type = INTEL_OUTPUT_ANALOG; crt->base.cloneable = true; - if (IS_I830(dev)) + if (IS_HASWELL(dev) || IS_I830(dev)) crt->base.crtc_mask = (1 << 0); else crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); @@ -771,10 +749,7 @@ void intel_crt_init(struct drm_device *dev) crt->base.disable = intel_disable_crt; crt->base.enable = intel_enable_crt; - if (IS_HASWELL(dev)) - crt->base.get_hw_state = intel_ddi_get_hw_state; - else - crt->base.get_hw_state = intel_crt_get_hw_state; + crt->base.get_hw_state = intel_crt_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state; drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs); @@ -791,6 +766,18 @@ void intel_crt_init(struct drm_device *dev) * Configure the automatic hotplug detection stuff */ crt->force_hotplug_required = 0; + if (HAS_PCH_SPLIT(dev)) { + u32 adpa; + + adpa = I915_READ(PCH_ADPA); + adpa &= ~ADPA_CRT_HOTPLUG_MASK; + adpa |= ADPA_HOTPLUG_BITS; + I915_WRITE(PCH_ADPA, adpa); + POSTING_READ(PCH_ADPA); + + DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); + crt->force_hotplug_required = 1; + } dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; } diff --git a/trunk/drivers/gpu/drm/i915/intel_ddi.c b/trunk/drivers/gpu/drm/i915/intel_ddi.c index 852012b6fc5b..bfe375466a0e 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ddi.c +++ b/trunk/drivers/gpu/drm/i915/intel_ddi.c @@ -58,26 +58,6 @@ static const u32 hsw_ddi_translations_fdi[] = { 0x00FFFFFF, 0x00040006 /* HDMI parameters */ }; -static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) -{ - struct drm_encoder *encoder = &intel_encoder->base; - int type = intel_encoder->type; - - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || - type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) { - struct intel_digital_port *intel_dig_port = - enc_to_dig_port(encoder); - return intel_dig_port->port; - - } else if (type == INTEL_OUTPUT_ANALOG) { - return PORT_E; - - } else { - DRM_ERROR("Invalid DDI encoder type %d\n", type); - BUG(); - } -} - /* On Haswell, DDI port buffers must be programmed with correct values * in advance. The buffer values are different for FDI and DP modes, * but the HDMI/DVI fields are shared among those. So we program the DDI @@ -153,34 +133,25 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - u32 temp, i, rx_ctl_val; + int pipe = intel_crtc->pipe; + u32 reg, temp, i; - /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the - * mode set "sequence for CRT port" document: - * - TP1 to TP2 time with the default value - * - FDI delay to 90h - */ - I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) | - FDI_RX_PWRDN_LANE0_VAL(2) | - FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); - - /* Enable the PCH Receiver FDI PLL */ - rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE | - ((intel_crtc->fdi_lanes - 1) << 19); - I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); - POSTING_READ(_FDI_RXA_CTL); - udelay(220); - - /* Switch from Rawclk to PCDclk */ - rx_ctl_val |= FDI_PCDCLK; - I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); - - /* Configure Port Clock Select */ - I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel); - - /* Start the training iterating through available voltages and emphasis, - * testing each value twice. */ - for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) { + /* Configure CPU PLL, wait for warmup */ + I915_WRITE(SPLL_CTL, + SPLL_PLL_ENABLE | + SPLL_PLL_FREQ_1350MHz | + SPLL_PLL_SCC); + + /* Use SPLL to drive the output when in FDI mode */ + I915_WRITE(PORT_CLK_SEL(PORT_E), + PORT_CLK_SEL_SPLL); + I915_WRITE(PIPE_CLK_SEL(pipe), + PIPE_CLK_SEL_PORT(PORT_E)); + + udelay(20); + + /* Start the training iterating through available voltages and emphasis */ + for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) { /* Configure DP_TP_CTL with auto-training */ I915_WRITE(DP_TP_CTL(PORT_E), DP_TP_CTL_FDI_AUTOTRAIN | @@ -189,63 +160,103 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) DP_TP_CTL_ENABLE); /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */ + temp = I915_READ(DDI_BUF_CTL(PORT_E)); + temp = (temp & ~DDI_BUF_EMP_MASK); I915_WRITE(DDI_BUF_CTL(PORT_E), - DDI_BUF_CTL_ENABLE | - ((intel_crtc->fdi_lanes - 1) << 1) | - hsw_ddi_buf_ctl_values[i / 2]); - POSTING_READ(DDI_BUF_CTL(PORT_E)); + temp | + DDI_BUF_CTL_ENABLE | + DDI_PORT_WIDTH_X2 | + hsw_ddi_buf_ctl_values[i]); udelay(600); - /* Program PCH FDI Receiver TU */ - I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64)); - - /* Enable PCH FDI Receiver with auto-training */ - rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; - I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); - POSTING_READ(_FDI_RXA_CTL); - - /* Wait for FDI receiver lane calibration */ - udelay(30); - - /* Unset FDI_RX_MISC pwrdn lanes */ - temp = I915_READ(_FDI_RXA_MISC); - temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - I915_WRITE(_FDI_RXA_MISC, temp); - POSTING_READ(_FDI_RXA_MISC); - - /* Wait for FDI auto training time */ - udelay(5); + /* We need to program FDI_RX_MISC with the default TP1 to TP2 + * values before enabling the receiver, and configure the delay + * for the FDI timing generator to 90h. Luckily, all the other + * bits are supposed to be zeroed, so we can write those values + * directly. + */ + I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 | + FDI_RX_FDI_DELAY_90); + + /* Enable CPU FDI Receiver with auto-training */ + reg = FDI_RX_CTL(pipe); + I915_WRITE(reg, + I915_READ(reg) | + FDI_LINK_TRAIN_AUTO | + FDI_RX_ENABLE | + FDI_LINK_TRAIN_PATTERN_1_CPT | + FDI_RX_ENHANCE_FRAME_ENABLE | + FDI_PORT_WIDTH_2X_LPT | + FDI_RX_PLL_ENABLE); + POSTING_READ(reg); + udelay(100); temp = I915_READ(DP_TP_STATUS(PORT_E)); if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { - DRM_DEBUG_KMS("FDI link training done on step %d\n", i); + DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i); /* Enable normal pixel sending for FDI */ I915_WRITE(DP_TP_CTL(PORT_E), - DP_TP_CTL_FDI_AUTOTRAIN | - DP_TP_CTL_LINK_TRAIN_NORMAL | - DP_TP_CTL_ENHANCED_FRAME_ENABLE | - DP_TP_CTL_ENABLE); + DP_TP_CTL_FDI_AUTOTRAIN | + DP_TP_CTL_LINK_TRAIN_NORMAL | + DP_TP_CTL_ENHANCED_FRAME_ENABLE | + DP_TP_CTL_ENABLE); + + /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */ + temp = I915_READ(DDI_FUNC_CTL(pipe)); + temp &= ~PIPE_DDI_PORT_MASK; + temp |= PIPE_DDI_SELECT_PORT(PORT_E) | + PIPE_DDI_MODE_SELECT_FDI | + PIPE_DDI_FUNC_ENABLE | + PIPE_DDI_PORT_WIDTH_X2; + I915_WRITE(DDI_FUNC_CTL(pipe), + temp); + break; + } else { + DRM_ERROR("Error training BUF_CTL %d\n", i); - return; + /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */ + I915_WRITE(DP_TP_CTL(PORT_E), + I915_READ(DP_TP_CTL(PORT_E)) & + ~DP_TP_CTL_ENABLE); + I915_WRITE(FDI_RX_CTL(pipe), + I915_READ(FDI_RX_CTL(pipe)) & + ~FDI_RX_PLL_ENABLE); + continue; } + } - /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ - I915_WRITE(DP_TP_CTL(PORT_E), - I915_READ(DP_TP_CTL(PORT_E)) & ~DP_TP_CTL_ENABLE); + DRM_DEBUG_KMS("FDI train done.\n"); +} - rx_ctl_val &= ~FDI_RX_ENABLE; - I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); +/* For DDI connections, it is possible to support different outputs over the + * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by + * the time the output is detected what exactly is on the other end of it. This + * function aims at providing support for this detection and proper output + * configuration. + */ +void intel_ddi_init(struct drm_device *dev, enum port port) +{ + /* For now, we don't do any proper output detection and assume that we + * handle HDMI only */ - /* Reset FDI_RX_MISC pwrdn lanes */ - temp = I915_READ(_FDI_RXA_MISC); - temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); - I915_WRITE(_FDI_RXA_MISC, temp); + switch(port){ + case PORT_A: + /* We don't handle eDP and DP yet */ + DRM_DEBUG_DRIVER("Found digital output on DDI port A\n"); + break; + /* Assume that the ports B, C and D are working in HDMI mode for now */ + case PORT_B: + case PORT_C: + case PORT_D: + intel_hdmi_init(dev, DDI_BUF_CTL(port), port); + break; + default: + DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n", + port); + break; } - - DRM_ERROR("FDI link training failed!\n"); } /* WRPLL clock dividers */ @@ -634,435 +645,116 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { {298000, 2, 21, 19}, }; -static void intel_ddi_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +void intel_ddi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - int port = intel_ddi_get_encoder_port(intel_encoder); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + int port = intel_hdmi->ddi_port; int pipe = intel_crtc->pipe; - int type = intel_encoder->type; - - DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n", - port_name(port), pipe_name(pipe)); - - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; - switch (intel_dp->lane_count) { - case 1: - intel_dp->DP |= DDI_PORT_WIDTH_X1; - break; - case 2: - intel_dp->DP |= DDI_PORT_WIDTH_X2; - break; - case 4: - intel_dp->DP |= DDI_PORT_WIDTH_X4; - break; - default: - intel_dp->DP |= DDI_PORT_WIDTH_X4; - WARN(1, "Unexpected DP lane count %d\n", - intel_dp->lane_count); - break; - } - - if (intel_dp->has_audio) { - DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n", - pipe_name(intel_crtc->pipe)); - - /* write eld */ - DRM_DEBUG_DRIVER("DP audio: write eld information\n"); - intel_write_eld(encoder, adjusted_mode); - } - - intel_dp_init_link_config(intel_dp); - - } else if (type == INTEL_OUTPUT_HDMI) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - - if (intel_hdmi->has_audio) { - /* Proper support for digital audio needs a new logic - * and a new set of registers, so we leave it for future - * patch bombing. - */ - DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", - pipe_name(intel_crtc->pipe)); - - /* write eld */ - DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); - intel_write_eld(encoder, adjusted_mode); - } - - intel_hdmi->set_infoframes(encoder, adjusted_mode); - } -} - -static struct intel_encoder * -intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder, *ret = NULL; - int num_encoders = 0; - - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { - ret = intel_encoder; - num_encoders++; - } - - if (num_encoders != 1) - WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders, - intel_crtc->pipe); - - BUG_ON(ret == NULL); - return ret; -} - -void intel_ddi_put_crtc_pll(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_ddi_plls *plls = &dev_priv->ddi_plls; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - uint32_t val; - - switch (intel_crtc->ddi_pll_sel) { - case PORT_CLK_SEL_SPLL: - plls->spll_refcount--; - if (plls->spll_refcount == 0) { - DRM_DEBUG_KMS("Disabling SPLL\n"); - val = I915_READ(SPLL_CTL); - WARN_ON(!(val & SPLL_PLL_ENABLE)); - I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); - POSTING_READ(SPLL_CTL); - } - break; - case PORT_CLK_SEL_WRPLL1: - plls->wrpll1_refcount--; - if (plls->wrpll1_refcount == 0) { - DRM_DEBUG_KMS("Disabling WRPLL 1\n"); - val = I915_READ(WRPLL_CTL1); - WARN_ON(!(val & WRPLL_PLL_ENABLE)); - I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE); - POSTING_READ(WRPLL_CTL1); - } - break; - case PORT_CLK_SEL_WRPLL2: - plls->wrpll2_refcount--; - if (plls->wrpll2_refcount == 0) { - DRM_DEBUG_KMS("Disabling WRPLL 2\n"); - val = I915_READ(WRPLL_CTL2); - WARN_ON(!(val & WRPLL_PLL_ENABLE)); - I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE); - POSTING_READ(WRPLL_CTL2); - } - break; - } - - WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n"); - WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n"); - WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n"); - - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE; -} + int p, n2, r2; + u32 temp, i; -static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2) -{ - u32 i; + /* On Haswell, we need to enable the clocks and prepare DDI function to + * work in HDMI mode for this pipe. + */ + DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) - if (clock <= wrpll_tmds_clock_table[i].clock) + if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock) break; if (i == ARRAY_SIZE(wrpll_tmds_clock_table)) i--; - *p = wrpll_tmds_clock_table[i].p; - *n2 = wrpll_tmds_clock_table[i].n2; - *r2 = wrpll_tmds_clock_table[i].r2; - - if (wrpll_tmds_clock_table[i].clock != clock) - DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n", - wrpll_tmds_clock_table[i].clock, clock); - - DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", - clock, *p, *n2, *r2); -} - -bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_ddi_plls *plls = &dev_priv->ddi_plls; - int type = intel_encoder->type; - enum pipe pipe = intel_crtc->pipe; - uint32_t reg, val; - - /* TODO: reuse PLLs when possible (compare values) */ - - intel_ddi_put_crtc_pll(crtc); - - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - switch (intel_dp->link_bw) { - case DP_LINK_BW_1_62: - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; - break; - case DP_LINK_BW_2_7: - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350; - break; - case DP_LINK_BW_5_4: - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700; - break; - default: - DRM_ERROR("Link bandwidth %d unsupported\n", - intel_dp->link_bw); - return false; - } - - /* We don't need to turn any PLL on because we'll use LCPLL. */ - return true; - - } else if (type == INTEL_OUTPUT_HDMI) { - int p, n2, r2; - - if (plls->wrpll1_refcount == 0) { - DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n", - pipe_name(pipe)); - plls->wrpll1_refcount++; - reg = WRPLL_CTL1; - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1; - } else if (plls->wrpll2_refcount == 0) { - DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n", - pipe_name(pipe)); - plls->wrpll2_refcount++; - reg = WRPLL_CTL2; - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2; - } else { - DRM_ERROR("No WRPLLs available!\n"); - return false; - } - - WARN(I915_READ(reg) & WRPLL_PLL_ENABLE, - "WRPLL already enabled\n"); - - intel_ddi_calculate_wrpll(clock, &p, &n2, &r2); - - val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | - WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | - WRPLL_DIVIDER_POST(p); + p = wrpll_tmds_clock_table[i].p; + n2 = wrpll_tmds_clock_table[i].n2; + r2 = wrpll_tmds_clock_table[i].r2; - } else if (type == INTEL_OUTPUT_ANALOG) { - if (plls->spll_refcount == 0) { - DRM_DEBUG_KMS("Using SPLL on pipe %c\n", - pipe_name(pipe)); - plls->spll_refcount++; - reg = SPLL_CTL; - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL; - } + if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock) + DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n", + wrpll_tmds_clock_table[i].clock, crtc->mode.clock); - WARN(I915_READ(reg) & SPLL_PLL_ENABLE, - "SPLL already enabled\n"); + DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", + crtc->mode.clock, p, n2, r2); - val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; + /* Enable LCPLL if disabled */ + temp = I915_READ(LCPLL_CTL); + if (temp & LCPLL_PLL_DISABLE) + I915_WRITE(LCPLL_CTL, + temp & ~LCPLL_PLL_DISABLE); - } else { - WARN(1, "Invalid DDI encoder type %d\n", type); - return false; - } + /* Configure WR PLL 1, program the correct divider values for + * the desired frequency and wait for warmup */ + I915_WRITE(WRPLL_CTL1, + WRPLL_PLL_ENABLE | + WRPLL_PLL_SELECT_LCPLL_2700 | + WRPLL_DIVIDER_REFERENCE(r2) | + WRPLL_DIVIDER_FEEDBACK(n2) | + WRPLL_DIVIDER_POST(p)); - I915_WRITE(reg, val); udelay(20); - return true; -} + /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use + * this port for connection. + */ + I915_WRITE(PORT_CLK_SEL(port), + PORT_CLK_SEL_WRPLL1); + I915_WRITE(PIPE_CLK_SEL(pipe), + PIPE_CLK_SEL_PORT(port)); -void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - int type = intel_encoder->type; - uint32_t temp; + udelay(20); - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { + if (intel_hdmi->has_audio) { + /* Proper support for digital audio needs a new logic and a new set + * of registers, so we leave it for future patch bombing. + */ + DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", + pipe_name(intel_crtc->pipe)); - temp = TRANS_MSA_SYNC_CLK; - switch (intel_crtc->bpp) { - case 18: - temp |= TRANS_MSA_6_BPC; - break; - case 24: - temp |= TRANS_MSA_8_BPC; - break; - case 30: - temp |= TRANS_MSA_10_BPC; - break; - case 36: - temp |= TRANS_MSA_12_BPC; - break; - default: - temp |= TRANS_MSA_8_BPC; - WARN(1, "%d bpp unsupported by DDI function\n", - intel_crtc->bpp); - } - I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); + /* write eld */ + DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); + intel_write_eld(encoder, adjusted_mode); } -} -void intel_ddi_enable_pipe_func(struct drm_crtc *crtc) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - enum pipe pipe = intel_crtc->pipe; - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - enum port port = intel_ddi_get_encoder_port(intel_encoder); - int type = intel_encoder->type; - uint32_t temp; - - /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */ - temp = TRANS_DDI_FUNC_ENABLE; - temp |= TRANS_DDI_SELECT_PORT(port); + /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ + temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port); switch (intel_crtc->bpp) { case 18: - temp |= TRANS_DDI_BPC_6; + temp |= PIPE_DDI_BPC_6; break; case 24: - temp |= TRANS_DDI_BPC_8; + temp |= PIPE_DDI_BPC_8; break; case 30: - temp |= TRANS_DDI_BPC_10; + temp |= PIPE_DDI_BPC_10; break; case 36: - temp |= TRANS_DDI_BPC_12; + temp |= PIPE_DDI_BPC_12; break; default: - WARN(1, "%d bpp unsupported by transcoder DDI function\n", + WARN(1, "%d bpp unsupported by pipe DDI function\n", intel_crtc->bpp); } - if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) - temp |= TRANS_DDI_PVSYNC; - if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) - temp |= TRANS_DDI_PHSYNC; - - if (cpu_transcoder == TRANSCODER_EDP) { - switch (pipe) { - case PIPE_A: - temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; - break; - case PIPE_B: - temp |= TRANS_DDI_EDP_INPUT_B_ONOFF; - break; - case PIPE_C: - temp |= TRANS_DDI_EDP_INPUT_C_ONOFF; - break; - default: - BUG(); - break; - } - } - - if (type == INTEL_OUTPUT_HDMI) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - - if (intel_hdmi->has_hdmi_sink) - temp |= TRANS_DDI_MODE_SELECT_HDMI; - else - temp |= TRANS_DDI_MODE_SELECT_DVI; - - } else if (type == INTEL_OUTPUT_ANALOG) { - temp |= TRANS_DDI_MODE_SELECT_FDI; - temp |= (intel_crtc->fdi_lanes - 1) << 1; - - } else if (type == INTEL_OUTPUT_DISPLAYPORT || - type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - temp |= TRANS_DDI_MODE_SELECT_DP_SST; - - switch (intel_dp->lane_count) { - case 1: - temp |= TRANS_DDI_PORT_WIDTH_X1; - break; - case 2: - temp |= TRANS_DDI_PORT_WIDTH_X2; - break; - case 4: - temp |= TRANS_DDI_PORT_WIDTH_X4; - break; - default: - temp |= TRANS_DDI_PORT_WIDTH_X4; - WARN(1, "Unsupported lane count %d\n", - intel_dp->lane_count); - } - - } else { - WARN(1, "Invalid encoder type %d for pipe %d\n", - intel_encoder->type, pipe); - } - - I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); -} - -void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder) -{ - uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); - uint32_t val = I915_READ(reg); - - val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK); - val |= TRANS_DDI_PORT_NONE; - I915_WRITE(reg, val); -} - -bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) -{ - struct drm_device *dev = intel_connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *intel_encoder = intel_connector->encoder; - int type = intel_connector->base.connector_type; - enum port port = intel_ddi_get_encoder_port(intel_encoder); - enum pipe pipe = 0; - enum transcoder cpu_transcoder; - uint32_t tmp; - - if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) - return false; - - if (port == PORT_A) - cpu_transcoder = TRANSCODER_EDP; + if (intel_hdmi->has_hdmi_sink) + temp |= PIPE_DDI_MODE_SELECT_HDMI; else - cpu_transcoder = pipe; + temp |= PIPE_DDI_MODE_SELECT_DVI; - tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + temp |= PIPE_DDI_PVSYNC; + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + temp |= PIPE_DDI_PHSYNC; - switch (tmp & TRANS_DDI_MODE_SELECT_MASK) { - case TRANS_DDI_MODE_SELECT_HDMI: - case TRANS_DDI_MODE_SELECT_DVI: - return (type == DRM_MODE_CONNECTOR_HDMIA); + I915_WRITE(DDI_FUNC_CTL(pipe), temp); - case TRANS_DDI_MODE_SELECT_DP_SST: - if (type == DRM_MODE_CONNECTOR_eDP) - return true; - case TRANS_DDI_MODE_SELECT_DP_MST: - return (type == DRM_MODE_CONNECTOR_DisplayPort); - - case TRANS_DDI_MODE_SELECT_FDI: - return (type == DRM_MODE_CONNECTOR_VGA); - - default: - return false; - } + intel_hdmi->set_infoframes(encoder, adjusted_mode); } bool intel_ddi_get_hw_state(struct intel_encoder *encoder, @@ -1070,432 +762,58 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - enum port port = intel_ddi_get_encoder_port(encoder); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); u32 tmp; int i; - tmp = I915_READ(DDI_BUF_CTL(port)); + tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port)); if (!(tmp & DDI_BUF_CTL_ENABLE)) return false; - if (port == PORT_A) { - tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); + for_each_pipe(i) { + tmp = I915_READ(DDI_FUNC_CTL(i)); - switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { - case TRANS_DDI_EDP_INPUT_A_ON: - case TRANS_DDI_EDP_INPUT_A_ONOFF: - *pipe = PIPE_A; - break; - case TRANS_DDI_EDP_INPUT_B_ONOFF: - *pipe = PIPE_B; - break; - case TRANS_DDI_EDP_INPUT_C_ONOFF: - *pipe = PIPE_C; - break; - } - - return true; - } else { - for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) { - tmp = I915_READ(TRANS_DDI_FUNC_CTL(i)); - - if ((tmp & TRANS_DDI_PORT_MASK) - == TRANS_DDI_SELECT_PORT(port)) { - *pipe = i; - return true; - } + if ((tmp & PIPE_DDI_PORT_MASK) + == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) { + *pipe = i; + return true; } } - DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port); + DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port); return true; } -static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - uint32_t temp, ret; - enum port port; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); - int i; - - if (cpu_transcoder == TRANSCODER_EDP) { - port = PORT_A; - } else { - temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); - temp &= TRANS_DDI_PORT_MASK; - - for (i = PORT_B; i <= PORT_E; i++) - if (temp == TRANS_DDI_SELECT_PORT(i)) - port = i; - } - - ret = I915_READ(PORT_CLK_SEL(port)); - - DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n", - pipe_name(pipe), port_name(port), ret); - - return ret; -} - -void intel_ddi_setup_hw_pll_state(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - enum pipe pipe; - struct intel_crtc *intel_crtc; - - for_each_pipe(pipe) { - intel_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - - if (!intel_crtc->active) - continue; - - intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, - pipe); - - switch (intel_crtc->ddi_pll_sel) { - case PORT_CLK_SEL_SPLL: - dev_priv->ddi_plls.spll_refcount++; - break; - case PORT_CLK_SEL_WRPLL1: - dev_priv->ddi_plls.wrpll1_refcount++; - break; - case PORT_CLK_SEL_WRPLL2: - dev_priv->ddi_plls.wrpll2_refcount++; - break; - } - } -} - -void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) -{ - struct drm_crtc *crtc = &intel_crtc->base; - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); - enum port port = intel_ddi_get_encoder_port(intel_encoder); - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - - if (cpu_transcoder != TRANSCODER_EDP) - I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), - TRANS_CLK_SEL_PORT(port)); -} - -void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) -{ - struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - - if (cpu_transcoder != TRANSCODER_EDP) - I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), - TRANS_CLK_SEL_DISABLED); -} - -static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) -{ - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_crtc *crtc = encoder->crtc; - struct drm_i915_private *dev_priv = encoder->dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum port port = intel_ddi_get_encoder_port(intel_encoder); - int type = intel_encoder->type; - - if (type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - ironlake_edp_panel_vdd_on(intel_dp); - ironlake_edp_panel_on(intel_dp); - ironlake_edp_panel_vdd_off(intel_dp, true); - } - - WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); - I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel); - - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); - intel_dp_start_link_train(intel_dp); - intel_dp_complete_link_train(intel_dp); - } -} - -static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, - enum port port) -{ - uint32_t reg = DDI_BUF_CTL(port); - int i; - - for (i = 0; i < 8; i++) { - udelay(1); - if (I915_READ(reg) & DDI_BUF_IS_IDLE) - return; - } - DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port)); -} - -static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) -{ - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_i915_private *dev_priv = encoder->dev->dev_private; - enum port port = intel_ddi_get_encoder_port(intel_encoder); - int type = intel_encoder->type; - uint32_t val; - bool wait = false; - - val = I915_READ(DDI_BUF_CTL(port)); - if (val & DDI_BUF_CTL_ENABLE) { - val &= ~DDI_BUF_CTL_ENABLE; - I915_WRITE(DDI_BUF_CTL(port), val); - wait = true; - } - - val = I915_READ(DP_TP_CTL(port)); - val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); - val |= DP_TP_CTL_LINK_TRAIN_PAT1; - I915_WRITE(DP_TP_CTL(port), val); - - if (wait) - intel_wait_ddi_buf_idle(dev_priv, port); - - if (type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - ironlake_edp_panel_vdd_on(intel_dp); - ironlake_edp_panel_off(intel_dp); - } - - I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); -} - -static void intel_enable_ddi(struct intel_encoder *intel_encoder) +void intel_enable_ddi(struct intel_encoder *encoder) { - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_device *dev = encoder->dev; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - enum port port = intel_ddi_get_encoder_port(intel_encoder); - int type = intel_encoder->type; - - if (type == INTEL_OUTPUT_HDMI) { - /* In HDMI/DVI mode, the port width, and swing/emphasis values - * are ignored so nothing special needs to be done besides - * enabling the port. - */ - I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE); - } else if (type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - ironlake_edp_backlight_on(intel_dp); - } -} - -static void intel_disable_ddi(struct intel_encoder *intel_encoder) -{ - struct drm_encoder *encoder = &intel_encoder->base; - int type = intel_encoder->type; - - if (type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - ironlake_edp_backlight_off(intel_dp); - } -} - -int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) -{ - if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) - return 450; - else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) == - LCPLL_CLK_FREQ_450) - return 450; - else if (IS_ULT(dev_priv->dev)) - return 338; - else - return 540; -} + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + int port = intel_hdmi->ddi_port; + u32 temp; -void intel_ddi_pll_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t val = I915_READ(LCPLL_CTL); + temp = I915_READ(DDI_BUF_CTL(port)); + temp |= DDI_BUF_CTL_ENABLE; - /* The LCPLL register should be turned on by the BIOS. For now let's - * just check its state and print errors in case something is wrong. - * Don't even try to turn it on. + /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width, + * and swing/emphasis values are ignored so nothing special needs + * to be done besides enabling the port. */ - - DRM_DEBUG_KMS("CDCLK running at %dMHz\n", - intel_ddi_get_cdclk_freq(dev_priv)); - - if (val & LCPLL_CD_SOURCE_FCLK) - DRM_ERROR("CDCLK source is not LCPLL\n"); - - if (val & LCPLL_PLL_DISABLE) - DRM_ERROR("LCPLL is disabled\n"); -} - -void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) -{ - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - struct intel_dp *intel_dp = &intel_dig_port->dp; - struct drm_i915_private *dev_priv = encoder->dev->dev_private; - enum port port = intel_dig_port->port; - bool wait; - uint32_t val; - - if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { - val = I915_READ(DDI_BUF_CTL(port)); - if (val & DDI_BUF_CTL_ENABLE) { - val &= ~DDI_BUF_CTL_ENABLE; - I915_WRITE(DDI_BUF_CTL(port), val); - wait = true; - } - - val = I915_READ(DP_TP_CTL(port)); - val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); - val |= DP_TP_CTL_LINK_TRAIN_PAT1; - I915_WRITE(DP_TP_CTL(port), val); - POSTING_READ(DP_TP_CTL(port)); - - if (wait) - intel_wait_ddi_buf_idle(dev_priv, port); - } - - val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST | - DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; - if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) - val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; - I915_WRITE(DP_TP_CTL(port), val); - POSTING_READ(DP_TP_CTL(port)); - - intel_dp->DP |= DDI_BUF_CTL_ENABLE; - I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP); - POSTING_READ(DDI_BUF_CTL(port)); - - udelay(600); + I915_WRITE(DDI_BUF_CTL(port), temp); } -void intel_ddi_fdi_disable(struct drm_crtc *crtc) +void intel_disable_ddi(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); - uint32_t val; - - intel_ddi_post_disable(intel_encoder); - - val = I915_READ(_FDI_RXA_CTL); - val &= ~FDI_RX_ENABLE; - I915_WRITE(_FDI_RXA_CTL, val); - - val = I915_READ(_FDI_RXA_MISC); - val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); - I915_WRITE(_FDI_RXA_MISC, val); - - val = I915_READ(_FDI_RXA_CTL); - val &= ~FDI_PCDCLK; - I915_WRITE(_FDI_RXA_CTL, val); - - val = I915_READ(_FDI_RXA_CTL); - val &= ~FDI_RX_PLL_ENABLE; - I915_WRITE(_FDI_RXA_CTL, val); -} - -static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder) -{ - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); - int type = intel_encoder->type; - - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) - intel_dp_check_link_status(intel_dp); -} - -static void intel_ddi_destroy(struct drm_encoder *encoder) -{ - /* HDMI has nothing special to destroy, so we can go with this. */ - intel_dp_encoder_destroy(encoder); -} - -static bool intel_ddi_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - int type = intel_encoder->type; - - WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n"); - - if (type == INTEL_OUTPUT_HDMI) - return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode); - else - return intel_dp_mode_fixup(encoder, mode, adjusted_mode); -} - -static const struct drm_encoder_funcs intel_ddi_funcs = { - .destroy = intel_ddi_destroy, -}; - -static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = { - .mode_fixup = intel_ddi_mode_fixup, - .mode_set = intel_ddi_mode_set, - .disable = intel_encoder_noop, -}; - -void intel_ddi_init(struct drm_device *dev, enum port port) -{ - struct intel_digital_port *intel_dig_port; - struct intel_encoder *intel_encoder; - struct drm_encoder *encoder; - struct intel_connector *hdmi_connector = NULL; - struct intel_connector *dp_connector = NULL; - - intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); - if (!intel_dig_port) - return; - - dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); - if (!dp_connector) { - kfree(intel_dig_port); - return; - } - - if (port != PORT_A) { - hdmi_connector = kzalloc(sizeof(struct intel_connector), - GFP_KERNEL); - if (!hdmi_connector) { - kfree(dp_connector); - kfree(intel_dig_port); - return; - } - } - - intel_encoder = &intel_dig_port->base; - encoder = &intel_encoder->base; - - drm_encoder_init(dev, encoder, &intel_ddi_funcs, - DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs); - - intel_encoder->enable = intel_enable_ddi; - intel_encoder->pre_enable = intel_ddi_pre_enable; - intel_encoder->disable = intel_disable_ddi; - intel_encoder->post_disable = intel_ddi_post_disable; - intel_encoder->get_hw_state = intel_ddi_get_hw_state; - - intel_dig_port->port = port; - if (hdmi_connector) - intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port); - else - intel_dig_port->hdmi.sdvox_reg = 0; - intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + int port = intel_hdmi->ddi_port; + u32 temp; - intel_encoder->type = INTEL_OUTPUT_UNKNOWN; - intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); - intel_encoder->cloneable = false; - intel_encoder->hot_plug = intel_ddi_hot_plug; + temp = I915_READ(DDI_BUF_CTL(port)); + temp &= ~DDI_BUF_CTL_ENABLE; - if (hdmi_connector) - intel_hdmi_init_connector(intel_dig_port, hdmi_connector); - intel_dp_init_connector(intel_dig_port, dp_connector); + I915_WRITE(DDI_BUF_CTL(port), temp); } diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index de51489de23c..b426d44a2b05 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -41,6 +41,8 @@ #include #include +#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) + bool intel_pipe_has_type(struct drm_crtc *crtc, int type); static void intel_increase_pllclock(struct drm_crtc *crtc); static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); @@ -78,16 +80,6 @@ struct intel_limit { /* FDI */ #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ -int -intel_pch_rawclk(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - WARN_ON(!HAS_PCH_SPLIT(dev)); - - return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; -} - static bool intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, @@ -388,7 +380,7 @@ static const intel_limit_t intel_limits_vlv_dac = { static const intel_limit_t intel_limits_vlv_hdmi = { .dot = { .min = 20000, .max = 165000 }, - .vco = { .min = 4000000, .max = 5994000}, + .vco = { .min = 5994000, .max = 4000000 }, .n = { .min = 1, .max = 7 }, .m = { .min = 60, .max = 300 }, /* guess */ .m1 = { .min = 2, .max = 3 }, @@ -401,10 +393,10 @@ static const intel_limit_t intel_limits_vlv_hdmi = { }; static const intel_limit_t intel_limits_vlv_dp = { - .dot = { .min = 25000, .max = 270000 }, - .vco = { .min = 4000000, .max = 6000000 }, + .dot = { .min = 162000, .max = 270000 }, + .vco = { .min = 5994000, .max = 4000000 }, .n = { .min = 1, .max = 7 }, - .m = { .min = 22, .max = 450 }, + .m = { .min = 60, .max = 300 }, /* guess */ .m1 = { .min = 2, .max = 3 }, .m2 = { .min = 11, .max = 156 }, .p = { .min = 10, .max = 30 }, @@ -539,7 +531,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, limit = &intel_limits_ironlake_single_lvds; } } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || - intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) + HAS_eDP) limit = &intel_limits_ironlake_display_port; else limit = &intel_limits_ironlake_dac; @@ -935,15 +927,6 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, return true; } -enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - return intel_crtc->cpu_transcoder; -} - static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1016,11 +999,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); if (INTEL_INFO(dev)->gen >= 4) { - int reg = PIPECONF(cpu_transcoder); + int reg = PIPECONF(pipe); /* Wait for the Pipe State to go off */ if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, @@ -1122,14 +1103,12 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv, int reg; u32 val; bool cur_state; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); if (IS_HASWELL(dev_priv->dev)) { /* On Haswell, DDI is used instead of FDI_TX_CTL */ - reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); + reg = DDI_FUNC_CTL(pipe); val = I915_READ(reg); - cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); + cur_state = !!(val & PIPE_DDI_FUNC_ENABLE); } else { reg = FDI_TX_CTL(pipe); val = I915_READ(reg); @@ -1149,9 +1128,14 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv, u32 val; bool cur_state; - reg = FDI_RX_CTL(pipe); - val = I915_READ(reg); - cur_state = !!(val & FDI_RX_ENABLE); + if (IS_HASWELL(dev_priv->dev) && pipe > 0) { + DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n"); + return; + } else { + reg = FDI_RX_CTL(pipe); + val = I915_READ(reg); + cur_state = !!(val & FDI_RX_ENABLE); + } WARN(cur_state != state, "FDI RX state assertion failure (expected %s, current %s)\n", state_string(state), state_string(cur_state)); @@ -1184,6 +1168,10 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, int reg; u32 val; + if (IS_HASWELL(dev_priv->dev) && pipe > 0) { + DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n"); + return; + } reg = FDI_RX_CTL(pipe); val = I915_READ(reg); WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); @@ -1224,14 +1212,12 @@ void assert_pipe(struct drm_i915_private *dev_priv, int reg; u32 val; bool cur_state; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); /* if we need the pipe A quirk it must be always on */ if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) state = true; - reg = PIPECONF(cpu_transcoder); + reg = PIPECONF(pipe); val = I915_READ(reg); cur_state = !!(val & PIPECONF_ENABLE); WARN(cur_state != state, @@ -1568,14 +1554,14 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) } /** - * ironlake_enable_pch_pll - enable PCH PLL + * intel_enable_pch_pll - enable PCH PLL * @dev_priv: i915 private structure * @pipe: pipe PLL to enable * * The PCH PLL needs to be enabled before the PCH transcoder, since it * drives the transcoder clock. */ -static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc) +static void intel_enable_pch_pll(struct intel_crtc *intel_crtc) { struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; struct intel_pch_pll *pll; @@ -1659,12 +1645,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc) pll->on = false; } -static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe) +static void intel_enable_transcoder(struct drm_i915_private *dev_priv, + enum pipe pipe) { - struct drm_device *dev = dev_priv->dev; + int reg; + u32 val, pipeconf_val; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - uint32_t reg, val, pipeconf_val; /* PCH only available on ILK+ */ BUG_ON(dev_priv->info->gen < 5); @@ -1678,15 +1664,10 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, assert_fdi_tx_enabled(dev_priv, pipe); assert_fdi_rx_enabled(dev_priv, pipe); - if (HAS_PCH_CPT(dev)) { - /* Workaround: Set the timing override bit before enabling the - * pch transcoder. */ - reg = TRANS_CHICKEN2(pipe); - val = I915_READ(reg); - val |= TRANS_CHICKEN2_TIMING_OVERRIDE; - I915_WRITE(reg, val); + if (IS_HASWELL(dev_priv->dev) && pipe > 0) { + DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n"); + return; } - reg = TRANSCONF(pipe); val = I915_READ(reg); pipeconf_val = I915_READ(PIPECONF(pipe)); @@ -1715,42 +1696,11 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, DRM_ERROR("failed to enable transcoder %d\n", pipe); } -static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder) -{ - u32 val, pipeconf_val; - - /* PCH only available on ILK+ */ - BUG_ON(dev_priv->info->gen < 5); - - /* FDI must be feeding us bits for PCH ports */ - assert_fdi_tx_enabled(dev_priv, cpu_transcoder); - assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); - - /* Workaround: set timing override bit. */ - val = I915_READ(_TRANSA_CHICKEN2); - val |= TRANS_CHICKEN2_TIMING_OVERRIDE; - I915_WRITE(_TRANSA_CHICKEN2, val); - - val = TRANS_ENABLE; - pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); - - if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == - PIPECONF_INTERLACED_ILK) - val |= TRANS_INTERLACED; - else - val |= TRANS_PROGRESSIVE; - - I915_WRITE(TRANSCONF(TRANSCODER_A), val); - if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100)) - DRM_ERROR("Failed to enable PCH transcoder\n"); -} - -static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe) +static void intel_disable_transcoder(struct drm_i915_private *dev_priv, + enum pipe pipe) { - struct drm_device *dev = dev_priv->dev; - uint32_t reg, val; + int reg; + u32 val; /* FDI relies on the transcoder */ assert_fdi_tx_disabled(dev_priv, pipe); @@ -1766,31 +1716,6 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, /* wait for PCH transcoder off, transcoder state */ if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) DRM_ERROR("failed to disable transcoder %d\n", pipe); - - if (!HAS_PCH_IBX(dev)) { - /* Workaround: Clear the timing override chicken bit again. */ - reg = TRANS_CHICKEN2(pipe); - val = I915_READ(reg); - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - I915_WRITE(reg, val); - } -} - -static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) -{ - u32 val; - - val = I915_READ(_TRANSACONF); - val &= ~TRANS_ENABLE; - I915_WRITE(_TRANSACONF, val); - /* wait for PCH transcoder off, transcoder state */ - if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50)) - DRM_ERROR("Failed to disable PCH transcoder\n"); - - /* Workaround: clear timing override bit. */ - val = I915_READ(_TRANSA_CHICKEN2); - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - I915_WRITE(_TRANSA_CHICKEN2, val); } /** @@ -1810,17 +1735,9 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool pch_port) { - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); - enum transcoder pch_transcoder; int reg; u32 val; - if (IS_HASWELL(dev_priv->dev)) - pch_transcoder = TRANSCODER_A; - else - pch_transcoder = pipe; - /* * A pipe without a PLL won't actually be able to drive bits from * a plane. On ILK+ the pipe PLLs are integrated, so we don't @@ -1831,13 +1748,13 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, else { if (pch_port) { /* if driving the PCH, we need FDI enabled */ - assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); - assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder); + assert_fdi_rx_pll_enabled(dev_priv, pipe); + assert_fdi_tx_pll_enabled(dev_priv, pipe); } /* FIXME: assert CPU port conditions for SNB+ */ } - reg = PIPECONF(cpu_transcoder); + reg = PIPECONF(pipe); val = I915_READ(reg); if (val & PIPECONF_ENABLE) return; @@ -1861,8 +1778,6 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, static void intel_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); int reg; u32 val; @@ -1876,7 +1791,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) return; - reg = PIPECONF(cpu_transcoder); + reg = PIPECONF(pipe); val = I915_READ(reg); if ((val & PIPECONF_ENABLE) == 0) return; @@ -1892,10 +1807,8 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, void intel_flush_display_plane(struct drm_i915_private *dev_priv, enum plane plane) { - if (dev_priv->info->gen >= 4) - I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); - else - I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); + I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); + I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); } /** @@ -2013,9 +1926,9 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel * is assumed to be a power-of-two. */ -unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, - unsigned int bpp, - unsigned int pitch) +static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y, + unsigned int bpp, + unsigned int pitch) { int tile_rows, tiles; @@ -2056,38 +1969,24 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, dspcntr = I915_READ(reg); /* Mask out pixel format bits in case we change it */ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; - switch (fb->pixel_format) { - case DRM_FORMAT_C8: + switch (fb->bits_per_pixel) { + case 8: dspcntr |= DISPPLANE_8BPP; break; - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_ARGB1555: - dspcntr |= DISPPLANE_BGRX555; - break; - case DRM_FORMAT_RGB565: - dspcntr |= DISPPLANE_BGRX565; - break; - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ARGB8888: - dspcntr |= DISPPLANE_BGRX888; - break; - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ABGR8888: - dspcntr |= DISPPLANE_RGBX888; - break; - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_ARGB2101010: - dspcntr |= DISPPLANE_BGRX101010; + case 16: + if (fb->depth == 15) + dspcntr |= DISPPLANE_15_16BPP; + else + dspcntr |= DISPPLANE_16BPP; break; - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ABGR2101010: - dspcntr |= DISPPLANE_RGBX101010; + case 24: + case 32: + dspcntr |= DISPPLANE_32BPP_NO_ALPHA; break; default: - DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); + DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); return -EINVAL; } - if (INTEL_INFO(dev)->gen >= 4) { if (obj->tiling_mode != I915_TILING_NONE) dspcntr |= DISPPLANE_TILED; @@ -2101,9 +2000,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, if (INTEL_INFO(dev)->gen >= 4) { intel_crtc->dspaddr_offset = - intel_gen4_compute_offset_xtiled(&x, &y, - fb->bits_per_pixel / 8, - fb->pitches[0]); + gen4_compute_dspaddr_offset_xtiled(&x, &y, + fb->bits_per_pixel / 8, + fb->pitches[0]); linear_offset -= intel_crtc->dspaddr_offset; } else { intel_crtc->dspaddr_offset = linear_offset; @@ -2154,31 +2053,27 @@ static int ironlake_update_plane(struct drm_crtc *crtc, dspcntr = I915_READ(reg); /* Mask out pixel format bits in case we change it */ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; - switch (fb->pixel_format) { - case DRM_FORMAT_C8: + switch (fb->bits_per_pixel) { + case 8: dspcntr |= DISPPLANE_8BPP; break; - case DRM_FORMAT_RGB565: - dspcntr |= DISPPLANE_BGRX565; - break; - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ARGB8888: - dspcntr |= DISPPLANE_BGRX888; - break; - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ABGR8888: - dspcntr |= DISPPLANE_RGBX888; - break; - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_ARGB2101010: - dspcntr |= DISPPLANE_BGRX101010; + case 16: + if (fb->depth != 16) + return -EINVAL; + + dspcntr |= DISPPLANE_16BPP; break; - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ABGR2101010: - dspcntr |= DISPPLANE_RGBX101010; + case 24: + case 32: + if (fb->depth == 24) + dspcntr |= DISPPLANE_32BPP_NO_ALPHA; + else if (fb->depth == 30) + dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; + else + return -EINVAL; break; default: - DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); + DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); return -EINVAL; } @@ -2194,9 +2089,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc, linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); intel_crtc->dspaddr_offset = - intel_gen4_compute_offset_xtiled(&x, &y, - fb->bits_per_pixel / 8, - fb->pitches[0]); + gen4_compute_dspaddr_offset_xtiled(&x, &y, + fb->bits_per_pixel / 8, + fb->pitches[0]); linear_offset -= intel_crtc->dspaddr_offset; DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", @@ -2204,12 +2099,8 @@ static int ironlake_update_plane(struct drm_crtc *crtc, I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_MODIFY_DISPBASE(DSPSURF(plane), obj->gtt_offset + intel_crtc->dspaddr_offset); - if (IS_HASWELL(dev)) { - I915_WRITE(DSPOFFSET(plane), (y << 16) | x); - } else { - I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); - I915_WRITE(DSPLINOFF(plane), linear_offset); - } + I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); + I915_WRITE(DSPLINOFF(plane), linear_offset); POSTING_READ(reg); return 0; @@ -2257,39 +2148,13 @@ intel_finish_fb(struct drm_framebuffer *old_fb) return ret; } -static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_master_private *master_priv; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - if (!dev->primary->master) - return; - - master_priv = dev->primary->master->driver_priv; - if (!master_priv->sarea_priv) - return; - - switch (intel_crtc->pipe) { - case 0: - master_priv->sarea_priv->pipeA_x = x; - master_priv->sarea_priv->pipeA_y = y; - break; - case 1: - master_priv->sarea_priv->pipeB_x = x; - master_priv->sarea_priv->pipeB_y = y; - break; - default: - break; - } -} - static int intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_master_private *master_priv; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_framebuffer *old_fb; int ret; @@ -2341,7 +2206,20 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, intel_update_fbc(dev); mutex_unlock(&dev->struct_mutex); - intel_crtc_update_sarea_pos(crtc, x, y); + if (!dev->primary->master) + return 0; + + master_priv = dev->primary->master->driver_priv; + if (!master_priv->sarea_priv) + return 0; + + if (intel_crtc->pipe) { + master_priv->sarea_priv->pipeB_x = x; + master_priv->sarea_priv->pipeB_y = y; + } else { + master_priv->sarea_priv->pipeA_x = x; + master_priv->sarea_priv->pipeA_y = y; + } return 0; } @@ -2436,29 +2314,6 @@ static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) POSTING_READ(SOUTH_CHICKEN1); } -static void ivb_modeset_global_resources(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *pipe_B_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); - struct intel_crtc *pipe_C_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); - uint32_t temp; - - /* When everything is off disable fdi C so that we could enable fdi B - * with all lanes. XXX: This misses the case where a pipe is not using - * any pch resources and so doesn't need any fdi lanes. */ - if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) { - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); - - temp = I915_READ(SOUTH_CHICKEN1); - temp &= ~FDI_BC_BIFURCATION_SELECT; - DRM_DEBUG_KMS("disabling fdi C rx\n"); - I915_WRITE(SOUTH_CHICKEN1, temp); - } -} - /* The FDI link training functions for ILK/Ibexpeak. */ static void ironlake_fdi_link_train(struct drm_crtc *crtc) { @@ -2502,9 +2357,11 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) udelay(150); /* Ironlake workaround, enable clock pointer after FDI enable*/ - I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); - I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | - FDI_RX_PHASE_SYNC_POINTER_EN); + if (HAS_PCH_IBX(dev)) { + I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); + I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | + FDI_RX_PHASE_SYNC_POINTER_EN); + } reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { @@ -2593,9 +2450,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; I915_WRITE(reg, temp | FDI_TX_ENABLE); - I915_WRITE(FDI_RX_MISC(pipe), - FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); - reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { @@ -2610,7 +2464,8 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(reg); udelay(150); - cpt_phase_pointer_enable(dev, pipe); + if (HAS_PCH_CPT(dev)) + cpt_phase_pointer_enable(dev, pipe); for (i = 0; i < 4; i++) { reg = FDI_TX_CTL(pipe); @@ -2715,9 +2570,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(reg); udelay(150); - DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", - I915_READ(FDI_RX_IIR(pipe))); - /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); @@ -2730,9 +2582,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) temp |= FDI_COMPOSITE_SYNC; I915_WRITE(reg, temp | FDI_TX_ENABLE); - I915_WRITE(FDI_RX_MISC(pipe), - FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); - reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_AUTO; @@ -2744,7 +2593,8 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(reg); udelay(150); - cpt_phase_pointer_enable(dev, pipe); + if (HAS_PCH_CPT(dev)) + cpt_phase_pointer_enable(dev, pipe); for (i = 0; i < 4; i++) { reg = FDI_TX_CTL(pipe); @@ -2763,7 +2613,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) if (temp & FDI_RX_BIT_LOCK || (I915_READ(reg) & FDI_RX_BIT_LOCK)) { I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); - DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i); + DRM_DEBUG_KMS("FDI train 1 done.\n"); break; } } @@ -2804,7 +2654,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) if (temp & FDI_RX_SYMBOL_LOCK) { I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i); + DRM_DEBUG_KMS("FDI train 2 done.\n"); break; } } @@ -2821,6 +2671,9 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) int pipe = intel_crtc->pipe; u32 reg, temp; + /* Write the TU size bits so error detection works */ + I915_WRITE(FDI_RX_TUSIZE1(pipe), + I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ reg = FDI_RX_CTL(pipe); @@ -2921,6 +2774,9 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) /* Ironlake workaround, disable clock pointer after downing FDI */ if (HAS_PCH_IBX(dev)) { I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); + I915_WRITE(FDI_RX_CHICKEN(pipe), + I915_READ(FDI_RX_CHICKEN(pipe) & + ~FDI_RX_PHASE_SYNC_POINTER_EN)); } else if (HAS_PCH_CPT(dev)) { cpt_phase_pointer_disable(dev, pipe); } @@ -2983,7 +2839,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) mutex_unlock(&dev->struct_mutex); } -static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc) +static bool intel_crtc_driving_pch(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct intel_encoder *intel_encoder; @@ -2993,6 +2849,23 @@ static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc) * must be driven by its own crtc; no sharing is possible. */ for_each_encoder_on_crtc(dev, crtc, intel_encoder) { + + /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell + * CPU handles all others */ + if (IS_HASWELL(dev)) { + /* It is still unclear how this will work on PPT, so throw up a warning */ + WARN_ON(!HAS_PCH_LPT(dev)); + + if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { + DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n"); + return true; + } else { + DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n", + intel_encoder->type); + return false; + } + } + switch (intel_encoder->type) { case INTEL_OUTPUT_EDP: if (!intel_encoder_is_pch_edp(&intel_encoder->base)) @@ -3004,11 +2877,6 @@ static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc) return true; } -static bool haswell_crtc_driving_pch(struct drm_crtc *crtc) -{ - return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG); -} - /* Program iCLKIP clock to the desired frequency */ static void lpt_program_iclkip(struct drm_crtc *crtc) { @@ -3118,24 +2986,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) assert_transcoder_disabled(dev_priv, pipe); - /* Write the TU size bits before fdi link training, so that error - * detection works. */ - I915_WRITE(FDI_RX_TUSIZE1(pipe), - I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); - /* For PCH output, training FDI link */ dev_priv->display.fdi_link_train(crtc); - /* XXX: pch pll's can be enabled any time before we enable the PCH - * transcoder, and we actually should do this to not upset any PCH - * transcoder that already use the clock when we share it. - * - * Note that enable_pch_pll tries to do the right thing, but get_pch_pll - * unconditionally resets the pll - we need that to have the right LVDS - * enable sequence. */ - ironlake_enable_pch_pll(intel_crtc); + intel_enable_pch_pll(intel_crtc); - if (HAS_PCH_CPT(dev)) { + if (HAS_PCH_LPT(dev)) { + DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n"); + lpt_program_iclkip(crtc); + } else if (HAS_PCH_CPT(dev)) { u32 sel; temp = I915_READ(PCH_DPLL_SEL); @@ -3172,7 +3031,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); - intel_fdi_normal_train(crtc); + if (!IS_HASWELL(dev)) + intel_fdi_normal_train(crtc); /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && @@ -3204,37 +3064,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) temp |= TRANS_DP_PORT_SEL_D; break; default: - BUG(); + DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); + temp |= TRANS_DP_PORT_SEL_B; + break; } I915_WRITE(reg, temp); } - ironlake_enable_pch_transcoder(dev_priv, pipe); -} - -static void lpt_pch_enable(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - - assert_transcoder_disabled(dev_priv, TRANSCODER_A); - - lpt_program_iclkip(crtc); - - /* Set transcoder timing. */ - I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder))); - I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder))); - I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder))); - - I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder))); - I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder))); - I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder))); - I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder))); - - lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); + intel_enable_transcoder(dev_priv, pipe); } static void intel_put_pch_pll(struct intel_crtc *intel_crtc) @@ -3327,12 +3165,16 @@ static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u3 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - int dslreg = PIPEDSL(pipe); + int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe); u32 temp; temp = I915_READ(dslreg); udelay(500); if (wait_for(I915_READ(dslreg) != temp, 5)) { + /* Without this, mode sets may fail silently on FDI */ + I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS); + udelay(250); + I915_WRITE(tc2reg, 0); if (wait_for(I915_READ(dslreg) != temp, 5)) DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); } @@ -3363,12 +3205,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); } - is_pch_port = ironlake_crtc_driving_pch(crtc); + is_pch_port = intel_crtc_driving_pch(crtc); if (is_pch_port) { - /* Note: FDI PLL enabling _must_ be done before we enable the - * cpu pipes, hence this is separate from all the other fdi/pch - * enabling. */ ironlake_fdi_pll_enable(intel_crtc); } else { assert_fdi_tx_disabled(dev_priv, pipe); @@ -3381,17 +3220,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) /* Enable panel fitting for LVDS */ if (dev_priv->pch_pf_size && - (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || - intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { + (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { /* Force use of hard-coded filter coefficients * as some pre-programmed values are broken, * e.g. x201. */ - if (IS_IVYBRIDGE(dev)) - I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | - PF_PIPE_SEL_IVB(pipe)); - else - I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); } @@ -3431,83 +3265,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_wait_for_vblank(dev, intel_crtc->pipe); } -static void haswell_crtc_enable(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; - int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; - bool is_pch_port; - - WARN_ON(!crtc->enabled); - - if (intel_crtc->active) - return; - - intel_crtc->active = true; - intel_update_watermarks(dev); - - is_pch_port = haswell_crtc_driving_pch(crtc); - - if (is_pch_port) - dev_priv->display.fdi_link_train(crtc); - - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->pre_enable) - encoder->pre_enable(encoder); - - intel_ddi_enable_pipe_clock(intel_crtc); - - /* Enable panel fitting for eDP */ - if (dev_priv->pch_pf_size && - intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { - /* Force use of hard-coded filter coefficients - * as some pre-programmed values are broken, - * e.g. x201. - */ - I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | - PF_PIPE_SEL_IVB(pipe)); - I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); - I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); - } - - /* - * On ILK+ LUT must be loaded before the pipe is running but with - * clocks enabled - */ - intel_crtc_load_lut(crtc); - - intel_ddi_set_pipe_settings(crtc); - intel_ddi_enable_pipe_func(crtc); - - intel_enable_pipe(dev_priv, pipe, is_pch_port); - intel_enable_plane(dev_priv, plane, pipe); - - if (is_pch_port) - lpt_pch_enable(crtc); - - mutex_lock(&dev->struct_mutex); - intel_update_fbc(dev); - mutex_unlock(&dev->struct_mutex); - - intel_crtc_update_cursor(crtc, true); - - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->enable(encoder); - - /* - * There seems to be a race in PCH platform hw (at least on some - * outputs) where an enabled pipe still completes any pageflip right - * away (as if the pipe is off) instead of waiting for vblank. As soon - * as the first vblank happend, everything works as expected. Hence just - * wait for one vblank before returning to avoid strange things - * happening. - */ - intel_wait_for_vblank(dev, intel_crtc->pipe); -} - static void ironlake_crtc_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -3546,7 +3303,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) ironlake_fdi_disable(crtc); - ironlake_disable_pch_transcoder(dev_priv, pipe); + intel_disable_transcoder(dev_priv, pipe); if (HAS_PCH_CPT(dev)) { /* disable TRANS_DP_CTL */ @@ -3588,78 +3345,12 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) mutex_unlock(&dev->struct_mutex); } -static void haswell_crtc_disable(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; - int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - bool is_pch_port; - - if (!intel_crtc->active) - return; - - is_pch_port = haswell_crtc_driving_pch(crtc); - - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->disable(encoder); - - intel_crtc_wait_for_pending_flips(crtc); - drm_vblank_off(dev, pipe); - intel_crtc_update_cursor(crtc, false); - - intel_disable_plane(dev_priv, plane, pipe); - - if (dev_priv->cfb_plane == plane) - intel_disable_fbc(dev); - - intel_disable_pipe(dev_priv, pipe); - - intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); - - /* Disable PF */ - I915_WRITE(PF_CTL(pipe), 0); - I915_WRITE(PF_WIN_SZ(pipe), 0); - - intel_ddi_disable_pipe_clock(intel_crtc); - - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->post_disable) - encoder->post_disable(encoder); - - if (is_pch_port) { - lpt_disable_pch_transcoder(dev_priv); - intel_ddi_fdi_disable(crtc); - } - - intel_crtc->active = false; - intel_update_watermarks(dev); - - mutex_lock(&dev->struct_mutex); - intel_update_fbc(dev); - mutex_unlock(&dev->struct_mutex); -} - -static void ironlake_crtc_off(struct drm_crtc *crtc) +static void ironlake_crtc_off(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); intel_put_pch_pll(intel_crtc); } -static void haswell_crtc_off(struct drm_crtc *crtc) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - /* Stop saying we're using TRANSCODER_EDP because some other CRTC might - * start using it. */ - intel_crtc->cpu_transcoder = intel_crtc->pipe; - - intel_ddi_put_crtc_pll(crtc); -} - static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) { if (!enable && intel_crtc->overlay) { @@ -4370,7 +4061,7 @@ static void vlv_update_pll(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, intel_clock_t *clock, intel_clock_t *reduced_clock, - int num_connectors) + int refclk, int num_connectors) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -4378,19 +4069,9 @@ static void vlv_update_pll(struct drm_crtc *crtc, int pipe = intel_crtc->pipe; u32 dpll, mdiv, pdiv; u32 bestn, bestm1, bestm2, bestp1, bestp2; - bool is_sdvo; - u32 temp; - - is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || - intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); - - dpll = DPLL_VGA_MODE_DIS; - dpll |= DPLL_EXT_BUFFER_ENABLE_VLV; - dpll |= DPLL_REFA_CLK_ENABLE_VLV; - dpll |= DPLL_INTEGRATED_CLOCK_VLV; + bool is_hdmi; - I915_WRITE(DPLL(pipe), dpll); - POSTING_READ(DPLL(pipe)); + is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); bestn = clock->n; bestm1 = clock->m1; @@ -4398,10 +4079,12 @@ static void vlv_update_pll(struct drm_crtc *crtc, bestp1 = clock->p1; bestp2 = clock->p2; - /* - * In Valleyview PLL and program lane counter registers are exposed - * through DPIO interface - */ + /* Enable DPIO clock input */ + dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | + DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; + I915_WRITE(DPLL(pipe), dpll); + POSTING_READ(DPLL(pipe)); + mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); mdiv |= ((bestn << DPIO_N_SHIFT)); @@ -4412,13 +4095,12 @@ static void vlv_update_pll(struct drm_crtc *crtc, intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); - pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) | + pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) | (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) | - (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) | - (5 << DPIO_CLK_BIAS_CTL_SHIFT); + (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT); intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv); - intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b); + intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051); dpll |= DPLL_VCO_ENABLE; I915_WRITE(DPLL(pipe), dpll); @@ -4426,44 +4108,19 @@ static void vlv_update_pll(struct drm_crtc *crtc, if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) DRM_ERROR("DPLL %d failed to lock\n", pipe); - intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620); - - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) - intel_dp_set_m_n(crtc, mode, adjusted_mode); + if (is_hdmi) { + u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode); - I915_WRITE(DPLL(pipe), dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(DPLL(pipe)); - udelay(150); - - temp = 0; - if (is_sdvo) { - temp = intel_mode_get_pixel_multiplier(adjusted_mode); if (temp > 1) temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; else temp = 0; - } - I915_WRITE(DPLL_MD(pipe), temp); - POSTING_READ(DPLL_MD(pipe)); - /* Now program lane control registers */ - if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) - || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) - { - temp = 0x1000C4; - if(pipe == 1) - temp |= (1 << 21); - intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp); - } - if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP)) - { - temp = 0x1000C4; - if(pipe == 1) - temp |= (1 << 21); - intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); + I915_WRITE(DPLL_MD(pipe), temp); + POSTING_READ(DPLL_MD(pipe)); } + + intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */ } static void i9xx_update_pll(struct drm_crtc *crtc, @@ -4479,8 +4136,6 @@ static void i9xx_update_pll(struct drm_crtc *crtc, u32 dpll; bool is_sdvo; - i9xx_update_pll_dividers(crtc, clock, reduced_clock); - is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); @@ -4581,7 +4236,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc, static void i8xx_update_pll(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode, - intel_clock_t *clock, intel_clock_t *reduced_clock, + intel_clock_t *clock, int num_connectors) { struct drm_device *dev = crtc->dev; @@ -4590,8 +4245,6 @@ static void i8xx_update_pll(struct drm_crtc *crtc, int pipe = intel_crtc->pipe; u32 dpll; - i9xx_update_pll_dividers(crtc, clock, reduced_clock); - dpll = DPLL_VGA_MODE_DIS; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { @@ -4641,64 +4294,6 @@ static void i8xx_update_pll(struct drm_crtc *crtc, I915_WRITE(DPLL(pipe), dpll); } -static void intel_set_pipe_timings(struct intel_crtc *intel_crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - enum pipe pipe = intel_crtc->pipe; - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - uint32_t vsyncshift; - - if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { - /* the chip adds 2 halflines automatically */ - adjusted_mode->crtc_vtotal -= 1; - adjusted_mode->crtc_vblank_end -= 1; - vsyncshift = adjusted_mode->crtc_hsync_start - - adjusted_mode->crtc_htotal / 2; - } else { - vsyncshift = 0; - } - - if (INTEL_INFO(dev)->gen > 3) - I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); - - I915_WRITE(HTOTAL(cpu_transcoder), - (adjusted_mode->crtc_hdisplay - 1) | - ((adjusted_mode->crtc_htotal - 1) << 16)); - I915_WRITE(HBLANK(cpu_transcoder), - (adjusted_mode->crtc_hblank_start - 1) | - ((adjusted_mode->crtc_hblank_end - 1) << 16)); - I915_WRITE(HSYNC(cpu_transcoder), - (adjusted_mode->crtc_hsync_start - 1) | - ((adjusted_mode->crtc_hsync_end - 1) << 16)); - - I915_WRITE(VTOTAL(cpu_transcoder), - (adjusted_mode->crtc_vdisplay - 1) | - ((adjusted_mode->crtc_vtotal - 1) << 16)); - I915_WRITE(VBLANK(cpu_transcoder), - (adjusted_mode->crtc_vblank_start - 1) | - ((adjusted_mode->crtc_vblank_end - 1) << 16)); - I915_WRITE(VSYNC(cpu_transcoder), - (adjusted_mode->crtc_vsync_start - 1) | - ((adjusted_mode->crtc_vsync_end - 1) << 16)); - - /* Workaround: when the EDP input selection is B, the VTOTAL_B must be - * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is - * documented on the DDI_FUNC_CTL register description, EDP Input Select - * bits. */ - if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && - (pipe == PIPE_B || pipe == PIPE_C)) - I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); - - /* pipesrc controls the size that is scaled from, which should - * always be the user's requested size. - */ - I915_WRITE(PIPESRC(pipe), - ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); -} - static int i9xx_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -4712,7 +4307,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, int plane = intel_crtc->plane; int refclk, num_connectors = 0; intel_clock_t clock, reduced_clock; - u32 dspcntr, pipeconf; + u32 dspcntr, pipeconf, vsyncshift; bool ok, has_reduced_clock = false, is_sdvo = false; bool is_lvds = false, is_tv = false, is_dp = false; struct intel_encoder *encoder; @@ -4776,14 +4371,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, if (is_sdvo && is_tv) i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); + i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ? + &reduced_clock : NULL); + if (IS_GEN2(dev)) - i8xx_update_pll(crtc, adjusted_mode, &clock, - has_reduced_clock ? &reduced_clock : NULL, - num_connectors); + i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors); else if (IS_VALLEYVIEW(dev)) - vlv_update_pll(crtc, mode, adjusted_mode, &clock, - has_reduced_clock ? &reduced_clock : NULL, - num_connectors); + vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL, + refclk, num_connectors); else i9xx_update_pll(crtc, mode, adjusted_mode, &clock, has_reduced_clock ? &reduced_clock : NULL, @@ -4824,14 +4419,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, } } - if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { - if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { - pipeconf |= PIPECONF_BPP_6 | - PIPECONF_ENABLE | - I965_PIPECONF_ACTIVE; - } - } - DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); drm_mode_debug_printmodeline(mode); @@ -4847,12 +4434,40 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, pipeconf &= ~PIPECONF_INTERLACE_MASK; if (!IS_GEN2(dev) && - adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) + adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; - else + /* the chip adds 2 halflines automatically */ + adjusted_mode->crtc_vtotal -= 1; + adjusted_mode->crtc_vblank_end -= 1; + vsyncshift = adjusted_mode->crtc_hsync_start + - adjusted_mode->crtc_htotal/2; + } else { pipeconf |= PIPECONF_PROGRESSIVE; + vsyncshift = 0; + } + + if (!IS_GEN3(dev)) + I915_WRITE(VSYNCSHIFT(pipe), vsyncshift); + + I915_WRITE(HTOTAL(pipe), + (adjusted_mode->crtc_hdisplay - 1) | + ((adjusted_mode->crtc_htotal - 1) << 16)); + I915_WRITE(HBLANK(pipe), + (adjusted_mode->crtc_hblank_start - 1) | + ((adjusted_mode->crtc_hblank_end - 1) << 16)); + I915_WRITE(HSYNC(pipe), + (adjusted_mode->crtc_hsync_start - 1) | + ((adjusted_mode->crtc_hsync_end - 1) << 16)); - intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); + I915_WRITE(VTOTAL(pipe), + (adjusted_mode->crtc_vdisplay - 1) | + ((adjusted_mode->crtc_vtotal - 1) << 16)); + I915_WRITE(VBLANK(pipe), + (adjusted_mode->crtc_vblank_start - 1) | + ((adjusted_mode->crtc_vblank_end - 1) << 16)); + I915_WRITE(VSYNC(pipe), + (adjusted_mode->crtc_vsync_start - 1) | + ((adjusted_mode->crtc_vsync_end - 1) << 16)); /* pipesrc and dspsize control the size that is scaled from, * which should always be the user's requested size. @@ -4861,6 +4476,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); I915_WRITE(DSPPOS(plane), 0); + I915_WRITE(PIPESRC(pipe), + ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); I915_WRITE(PIPECONF(pipe), pipeconf); POSTING_READ(PIPECONF(pipe)); @@ -5051,8 +4668,8 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc, val |= PIPE_12BPC; break; default: - /* Case prevented by intel_choose_pipe_bpp_dither. */ - BUG(); + val |= PIPE_8BPC; + break; } val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); @@ -5069,31 +4686,6 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc, POSTING_READ(PIPECONF(pipe)); } -static void haswell_set_pipeconf(struct drm_crtc *crtc, - struct drm_display_mode *adjusted_mode, - bool dither) -{ - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - uint32_t val; - - val = I915_READ(PIPECONF(cpu_transcoder)); - - val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); - if (dither) - val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); - - val &= ~PIPECONF_INTERLACE_MASK_HSW; - if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) - val |= PIPECONF_INTERLACED_ILK; - else - val |= PIPECONF_PROGRESSIVE; - - I915_WRITE(PIPECONF(cpu_transcoder), val); - POSTING_READ(PIPECONF(cpu_transcoder)); -} - static bool ironlake_compute_clocks(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode, intel_clock_t *clock, @@ -5157,115 +4749,74 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, return true; } -static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t temp; - - temp = I915_READ(SOUTH_CHICKEN1); - if (temp & FDI_BC_BIFURCATION_SELECT) - return; - - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); - - temp |= FDI_BC_BIFURCATION_SELECT; - DRM_DEBUG_KMS("enabling fdi C rx\n"); - I915_WRITE(SOUTH_CHICKEN1, temp); - POSTING_READ(SOUTH_CHICKEN1); -} - -static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc) -{ - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *pipe_B_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); - - DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n", - intel_crtc->pipe, intel_crtc->fdi_lanes); - if (intel_crtc->fdi_lanes > 4) { - DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n", - intel_crtc->pipe, intel_crtc->fdi_lanes); - /* Clamp lanes to avoid programming the hw with bogus values. */ - intel_crtc->fdi_lanes = 4; - - return false; - } - - if (dev_priv->num_pipe == 2) - return true; - - switch (intel_crtc->pipe) { - case PIPE_A: - return true; - case PIPE_B: - if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && - intel_crtc->fdi_lanes > 2) { - DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", - intel_crtc->pipe, intel_crtc->fdi_lanes); - /* Clamp lanes to avoid programming the hw with bogus values. */ - intel_crtc->fdi_lanes = 2; - - return false; - } - - if (intel_crtc->fdi_lanes > 2) - WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); - else - cpt_enable_fdi_bc_bifurcation(dev); - - return true; - case PIPE_C: - if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) { - if (intel_crtc->fdi_lanes > 2) { - DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", - intel_crtc->pipe, intel_crtc->fdi_lanes); - /* Clamp lanes to avoid programming the hw with bogus values. */ - intel_crtc->fdi_lanes = 2; - - return false; - } - } else { - DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); - return false; - } - - cpt_enable_fdi_bc_bifurcation(dev); - - return true; - default: - BUG(); - } -} - -static void ironlake_set_m_n(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static int ironlake_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, + struct drm_framebuffer *fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; - struct intel_encoder *intel_encoder, *edp_encoder = NULL; + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + int num_connectors = 0; + intel_clock_t clock, reduced_clock; + u32 dpll, fp = 0, fp2 = 0; + bool ok, has_reduced_clock = false, is_sdvo = false; + bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; + struct intel_encoder *encoder, *edp_encoder = NULL; + int ret; struct fdi_m_n m_n = {0}; - int target_clock, pixel_multiplier, lane, link_bw; - bool is_dp = false, is_cpu_edp = false; + u32 temp; + int target_clock, pixel_multiplier, lane, link_bw, factor; + unsigned int pipe_bpp; + bool dither; + bool is_cpu_edp = false, is_pch_edp = false; - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { - switch (intel_encoder->type) { + for_each_encoder_on_crtc(dev, crtc, encoder) { + switch (encoder->type) { + case INTEL_OUTPUT_LVDS: + is_lvds = true; + break; + case INTEL_OUTPUT_SDVO: + case INTEL_OUTPUT_HDMI: + is_sdvo = true; + if (encoder->needs_tv_clock) + is_tv = true; + break; + case INTEL_OUTPUT_TVOUT: + is_tv = true; + break; + case INTEL_OUTPUT_ANALOG: + is_crt = true; + break; case INTEL_OUTPUT_DISPLAYPORT: is_dp = true; break; case INTEL_OUTPUT_EDP: is_dp = true; - if (!intel_encoder_is_pch_edp(&intel_encoder->base)) + if (intel_encoder_is_pch_edp(&encoder->base)) + is_pch_edp = true; + else is_cpu_edp = true; - edp_encoder = intel_encoder; + edp_encoder = encoder; break; } + + num_connectors++; + } + + ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, + &has_reduced_clock, &reduced_clock); + if (!ok) { + DRM_ERROR("Couldn't find PLL settings for mode!\n"); + return -EINVAL; } + /* Ensure that the cursor is valid for the new mode before changing... */ + intel_crtc_update_cursor(crtc, true); + /* FDI link */ pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); lane = 0; @@ -5292,6 +4843,20 @@ static void ironlake_set_m_n(struct drm_crtc *crtc, else target_clock = adjusted_mode->clock; + /* determine panel color depth */ + dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, + adjusted_mode); + if (is_lvds && dev_priv->lvds_dither) + dither = true; + + if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 && + pipe_bpp != 36) { + WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n", + pipe_bpp); + pipe_bpp = 24; + } + intel_crtc->bpp = pipe_bpp; + if (!lane) { /* * Account for spread spectrum to avoid @@ -5309,51 +4874,10 @@ static void ironlake_set_m_n(struct drm_crtc *crtc, ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n); - I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m); - I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); - I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); - I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); -} - -static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, - struct drm_display_mode *adjusted_mode, - intel_clock_t *clock, u32 fp) -{ - struct drm_crtc *crtc = &intel_crtc->base; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *intel_encoder; - uint32_t dpll; - int factor, pixel_multiplier, num_connectors = 0; - bool is_lvds = false, is_sdvo = false, is_tv = false; - bool is_dp = false, is_cpu_edp = false; - - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { - switch (intel_encoder->type) { - case INTEL_OUTPUT_LVDS: - is_lvds = true; - break; - case INTEL_OUTPUT_SDVO: - case INTEL_OUTPUT_HDMI: - is_sdvo = true; - if (intel_encoder->needs_tv_clock) - is_tv = true; - break; - case INTEL_OUTPUT_TVOUT: - is_tv = true; - break; - case INTEL_OUTPUT_DISPLAYPORT: - is_dp = true; - break; - case INTEL_OUTPUT_EDP: - is_dp = true; - if (!intel_encoder_is_pch_edp(&intel_encoder->base)) - is_cpu_edp = true; - break; - } - - num_connectors++; - } + fp = clock.n << 16 | clock.m1 << 8 | clock.m2; + if (has_reduced_clock) + fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | + reduced_clock.m2; /* Enable autotuning of the PLL clock (if permissible) */ factor = 21; @@ -5365,7 +4889,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, } else if (is_sdvo && is_tv) factor = 20; - if (clock->m < factor * clock->n) + if (clock.m < factor * clock.n) fp |= FP_CB_TUNE; dpll = 0; @@ -5375,119 +4899,55 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, else dpll |= DPLLB_MODE_DAC_SERIAL; if (is_sdvo) { - pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); + int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); if (pixel_multiplier > 1) { dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; } - dpll |= DPLL_DVO_HIGH_SPEED; - } - if (is_dp && !is_cpu_edp) - dpll |= DPLL_DVO_HIGH_SPEED; - - /* compute bitmask from p1 value */ - dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; - /* also FPA1 */ - dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; - - switch (clock->p2) { - case 5: - dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; - break; - case 7: - dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; - break; - case 10: - dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; - break; - case 14: - dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; - break; - } - - if (is_sdvo && is_tv) - dpll |= PLL_REF_INPUT_TVCLKINBC; - else if (is_tv) - /* XXX: just matching BIOS for now */ - /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ - dpll |= 3; - else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) - dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; - else - dpll |= PLL_REF_INPUT_DREFCLK; - - return dpll; -} - -static int ironlake_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *fb) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; - int num_connectors = 0; - intel_clock_t clock, reduced_clock; - u32 dpll, fp = 0, fp2 = 0; - bool ok, has_reduced_clock = false; - bool is_lvds = false, is_dp = false, is_cpu_edp = false; - struct intel_encoder *encoder; - u32 temp; - int ret; - bool dither, fdi_config_ok; - - for_each_encoder_on_crtc(dev, crtc, encoder) { - switch (encoder->type) { - case INTEL_OUTPUT_LVDS: - is_lvds = true; - break; - case INTEL_OUTPUT_DISPLAYPORT: - is_dp = true; - break; - case INTEL_OUTPUT_EDP: - is_dp = true; - if (!intel_encoder_is_pch_edp(&encoder->base)) - is_cpu_edp = true; - break; - } - - num_connectors++; + dpll |= DPLL_DVO_HIGH_SPEED; } + if (is_dp && !is_cpu_edp) + dpll |= DPLL_DVO_HIGH_SPEED; - WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), - "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); + /* compute bitmask from p1 value */ + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; + /* also FPA1 */ + dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; - ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, - &has_reduced_clock, &reduced_clock); - if (!ok) { - DRM_ERROR("Couldn't find PLL settings for mode!\n"); - return -EINVAL; + switch (clock.p2) { + case 5: + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; + break; + case 7: + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; + break; + case 10: + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; + break; + case 14: + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; + break; } - /* Ensure that the cursor is valid for the new mode before changing... */ - intel_crtc_update_cursor(crtc, true); - - /* determine panel color depth */ - dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, - adjusted_mode); - if (is_lvds && dev_priv->lvds_dither) - dither = true; - - fp = clock.n << 16 | clock.m1 << 8 | clock.m2; - if (has_reduced_clock) - fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | - reduced_clock.m2; - - dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp); + if (is_sdvo && is_tv) + dpll |= PLL_REF_INPUT_TVCLKINBC; + else if (is_tv) + /* XXX: just matching BIOS for now */ + /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ + dpll |= 3; + else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) + dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; + else + dpll |= PLL_REF_INPUT_DREFCLK; DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); drm_mode_debug_printmodeline(mode); - /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ - if (!is_cpu_edp) { + /* CPU eDP is the only output that doesn't need a PCH PLL of its own on + * pre-Haswell/LPT generation */ + if (HAS_PCH_LPT(dev)) { + DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n", + pipe); + } else if (!is_cpu_edp) { struct intel_pch_pll *pll; pll = intel_get_pch_pll(intel_crtc, dpll, fp); @@ -5573,13 +5033,47 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, } } - intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { + /* the chip adds 2 halflines automatically */ + adjusted_mode->crtc_vtotal -= 1; + adjusted_mode->crtc_vblank_end -= 1; + I915_WRITE(VSYNCSHIFT(pipe), + adjusted_mode->crtc_hsync_start + - adjusted_mode->crtc_htotal/2); + } else { + I915_WRITE(VSYNCSHIFT(pipe), 0); + } + + I915_WRITE(HTOTAL(pipe), + (adjusted_mode->crtc_hdisplay - 1) | + ((adjusted_mode->crtc_htotal - 1) << 16)); + I915_WRITE(HBLANK(pipe), + (adjusted_mode->crtc_hblank_start - 1) | + ((adjusted_mode->crtc_hblank_end - 1) << 16)); + I915_WRITE(HSYNC(pipe), + (adjusted_mode->crtc_hsync_start - 1) | + ((adjusted_mode->crtc_hsync_end - 1) << 16)); + + I915_WRITE(VTOTAL(pipe), + (adjusted_mode->crtc_vdisplay - 1) | + ((adjusted_mode->crtc_vtotal - 1) << 16)); + I915_WRITE(VBLANK(pipe), + (adjusted_mode->crtc_vblank_start - 1) | + ((adjusted_mode->crtc_vblank_end - 1) << 16)); + I915_WRITE(VSYNC(pipe), + (adjusted_mode->crtc_vsync_start - 1) | + ((adjusted_mode->crtc_vsync_end - 1) << 16)); - /* Note, this also computes intel_crtc->fdi_lanes which is used below in - * ironlake_check_fdi_lanes. */ - ironlake_set_m_n(crtc, mode, adjusted_mode); + /* pipesrc controls the size that is scaled from, which should + * always be the user's requested size. + */ + I915_WRITE(PIPESRC(pipe), + ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); - fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc); + I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); + I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); + I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); + I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); if (is_cpu_edp) ironlake_set_pll_edp(crtc, adjusted_mode->clock); @@ -5598,217 +5092,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, intel_update_linetime_watermarks(dev, pipe, adjusted_mode); - return fdi_config_ok ? ret : -EINVAL; -} - -static int haswell_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *fb) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; - int num_connectors = 0; - intel_clock_t clock, reduced_clock; - u32 dpll = 0, fp = 0, fp2 = 0; - bool ok, has_reduced_clock = false; - bool is_lvds = false, is_dp = false, is_cpu_edp = false; - struct intel_encoder *encoder; - u32 temp; - int ret; - bool dither; - - for_each_encoder_on_crtc(dev, crtc, encoder) { - switch (encoder->type) { - case INTEL_OUTPUT_LVDS: - is_lvds = true; - break; - case INTEL_OUTPUT_DISPLAYPORT: - is_dp = true; - break; - case INTEL_OUTPUT_EDP: - is_dp = true; - if (!intel_encoder_is_pch_edp(&encoder->base)) - is_cpu_edp = true; - break; - } - - num_connectors++; - } - - if (is_cpu_edp) - intel_crtc->cpu_transcoder = TRANSCODER_EDP; - else - intel_crtc->cpu_transcoder = pipe; - - /* We are not sure yet this won't happen. */ - WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", - INTEL_PCH_TYPE(dev)); - - WARN(num_connectors != 1, "%d connectors attached to pipe %c\n", - num_connectors, pipe_name(pipe)); - - WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) & - (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE)); - - WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE); - - if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock)) - return -EINVAL; - - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { - ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, - &has_reduced_clock, - &reduced_clock); - if (!ok) { - DRM_ERROR("Couldn't find PLL settings for mode!\n"); - return -EINVAL; - } - } - - /* Ensure that the cursor is valid for the new mode before changing... */ - intel_crtc_update_cursor(crtc, true); - - /* determine panel color depth */ - dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, - adjusted_mode); - if (is_lvds && dev_priv->lvds_dither) - dither = true; - - DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); - drm_mode_debug_printmodeline(mode); - - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { - fp = clock.n << 16 | clock.m1 << 8 | clock.m2; - if (has_reduced_clock) - fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | - reduced_clock.m2; - - dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, - fp); - - /* CPU eDP is the only output that doesn't need a PCH PLL of its - * own on pre-Haswell/LPT generation */ - if (!is_cpu_edp) { - struct intel_pch_pll *pll; - - pll = intel_get_pch_pll(intel_crtc, dpll, fp); - if (pll == NULL) { - DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n", - pipe); - return -EINVAL; - } - } else - intel_put_pch_pll(intel_crtc); - - /* The LVDS pin pair needs to be on before the DPLLs are - * enabled. This is an exception to the general rule that - * mode_set doesn't turn things on. - */ - if (is_lvds) { - temp = I915_READ(PCH_LVDS); - temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; - if (HAS_PCH_CPT(dev)) { - temp &= ~PORT_TRANS_SEL_MASK; - temp |= PORT_TRANS_SEL_CPT(pipe); - } else { - if (pipe == 1) - temp |= LVDS_PIPEB_SELECT; - else - temp &= ~LVDS_PIPEB_SELECT; - } - - /* set the corresponsding LVDS_BORDER bit */ - temp |= dev_priv->lvds_border_bits; - /* Set the B0-B3 data pairs corresponding to whether - * we're going to set the DPLLs for dual-channel mode or - * not. - */ - if (clock.p2 == 7) - temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; - else - temp &= ~(LVDS_B0B3_POWER_UP | - LVDS_CLKB_POWER_UP); - - /* It would be nice to set 24 vs 18-bit mode - * (LVDS_A3_POWER_UP) appropriately here, but we need to - * look more thoroughly into how panels behave in the - * two modes. - */ - temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); - if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) - temp |= LVDS_HSYNC_POLARITY; - if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) - temp |= LVDS_VSYNC_POLARITY; - I915_WRITE(PCH_LVDS, temp); - } - } - - if (is_dp && !is_cpu_edp) { - intel_dp_set_m_n(crtc, mode, adjusted_mode); - } else { - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { - /* For non-DP output, clear any trans DP clock recovery - * setting.*/ - I915_WRITE(TRANSDATA_M1(pipe), 0); - I915_WRITE(TRANSDATA_N1(pipe), 0); - I915_WRITE(TRANSDPLINK_M1(pipe), 0); - I915_WRITE(TRANSDPLINK_N1(pipe), 0); - } - } - - intel_crtc->lowfreq_avail = false; - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { - if (intel_crtc->pch_pll) { - I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(intel_crtc->pch_pll->pll_reg); - udelay(150); - - /* The pixel multiplier can only be updated once the - * DPLL is enabled and the clocks are stable. - * - * So write it again. - */ - I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); - } - - if (intel_crtc->pch_pll) { - if (is_lvds && has_reduced_clock && i915_powersave) { - I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); - intel_crtc->lowfreq_avail = true; - } else { - I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); - } - } - } - - intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); - - if (!is_dp || is_cpu_edp) - ironlake_set_m_n(crtc, mode, adjusted_mode); - - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) - if (is_cpu_edp) - ironlake_set_pll_edp(crtc, adjusted_mode->clock); - - haswell_set_pipeconf(crtc, adjusted_mode, dither); - - /* Set up the display plane register */ - I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); - POSTING_READ(DSPCNTR(plane)); - - ret = intel_pipe_set_base(crtc, x, y, fb); - - intel_update_watermarks(dev); - - intel_update_linetime_watermarks(dev, pipe, adjusted_mode); - return ret; } @@ -5820,8 +5103,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_encoder_helper_funcs *encoder_funcs; - struct intel_encoder *encoder; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int ret; @@ -5832,19 +5113,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, x, y, fb); drm_vblank_post_modeset(dev, pipe); - if (ret != 0) - return ret; - - for_each_encoder_on_crtc(dev, crtc, encoder) { - DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", - encoder->base.base.id, - drm_get_encoder_name(&encoder->base), - mode->base.id, mode->name); - encoder_funcs = encoder->base.helper_private; - encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode); - } - - return 0; + return ret; } static bool intel_eld_uptodate(struct drm_connector *connector, @@ -6480,7 +5749,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, int depth, int bpp) { struct drm_i915_gem_object *obj; - struct drm_mode_fb_cmd2 mode_cmd = { 0 }; + struct drm_mode_fb_cmd2 mode_cmd; obj = i915_gem_alloc_object(dev, intel_framebuffer_size_for_mode(mode, bpp)); @@ -6610,19 +5879,24 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); if (IS_ERR(fb)) { DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); - return false; + goto fail; } if (!intel_set_mode(crtc, mode, 0, 0, fb)) { DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); if (old->release_fb) old->release_fb->funcs->destroy(old->release_fb); - return false; + goto fail; } /* let the connector get through one full cycle before testing */ intel_wait_for_vblank(dev, intel_crtc->pipe); + return true; +fail: + connector->encoder = NULL; + encoder->crtc = NULL; + return false; } void intel_release_load_detect_pipe(struct drm_connector *connector, @@ -6747,12 +6021,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; + int pipe = intel_crtc->pipe; struct drm_display_mode *mode; - int htot = I915_READ(HTOTAL(cpu_transcoder)); - int hsync = I915_READ(HSYNC(cpu_transcoder)); - int vtot = I915_READ(VTOTAL(cpu_transcoder)); - int vsync = I915_READ(VSYNC(cpu_transcoder)); + int htot = I915_READ(HTOTAL(pipe)); + int hsync = I915_READ(HSYNC(pipe)); + int vtot = I915_READ(VTOTAL(pipe)); + int vsync = I915_READ(VSYNC(pipe)); mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) @@ -6909,19 +6183,14 @@ static void intel_unpin_work_fn(struct work_struct *__work) { struct intel_unpin_work *work = container_of(__work, struct intel_unpin_work, work); - struct drm_device *dev = work->crtc->dev; - mutex_lock(&dev->struct_mutex); + mutex_lock(&work->dev->struct_mutex); intel_unpin_fb_obj(work->old_fb_obj); drm_gem_object_unreference(&work->pending_flip_obj->base); drm_gem_object_unreference(&work->old_fb_obj->base); - intel_update_fbc(dev); - mutex_unlock(&dev->struct_mutex); - - BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); - atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); - + intel_update_fbc(work->dev); + mutex_unlock(&work->dev->struct_mutex); kfree(work); } @@ -6932,6 +6201,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; struct drm_i915_gem_object *obj; + struct drm_pending_vblank_event *e; + struct timeval tvbl; unsigned long flags; /* Ignore early vblank irqs */ @@ -6947,8 +6218,17 @@ static void do_intel_finish_page_flip(struct drm_device *dev, intel_crtc->unpin_work = NULL; - if (work->event) - drm_send_vblank_event(dev, intel_crtc->pipe, work->event); + if (work->event) { + e = work->event; + e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); + + e->event.tv_sec = tvbl.tv_sec; + e->event.tv_usec = tvbl.tv_usec; + + list_add_tail(&e->base.link, + &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + } drm_vblank_put(dev, intel_crtc->pipe); @@ -6958,9 +6238,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev, atomic_clear_mask(1 << intel_crtc->plane, &obj->pending_flip.counter); - wake_up(&dev_priv->pending_flip_queue); - queue_work(dev_priv->wq, &work->work); + wake_up(&dev_priv->pending_flip_queue); + schedule_work(&work->work); trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); } @@ -7261,7 +6541,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return -ENOMEM; work->event = event; - work->crtc = crtc; + work->dev = crtc->dev; intel_fb = to_intel_framebuffer(crtc->fb); work->old_fb_obj = intel_fb->obj; INIT_WORK(&work->work, intel_unpin_work_fn); @@ -7286,9 +6566,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; - if (atomic_read(&intel_crtc->unpin_work_count) >= 2) - flush_workqueue(dev_priv->wq); - ret = i915_mutex_lock_interruptible(dev); if (ret) goto cleanup; @@ -7307,7 +6584,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, * the flip occurs and the object is no longer visible. */ atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); - atomic_inc(&intel_crtc->unpin_work_count); ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); if (ret) @@ -7322,7 +6598,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return 0; cleanup_pending: - atomic_dec(&intel_crtc->unpin_work_count); atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); drm_gem_object_unreference(&work->old_fb_obj->base); drm_gem_object_unreference(&obj->base); @@ -7618,7 +6893,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) dev->mode_config.dpms_property; connector->dpms = DRM_MODE_DPMS_ON; - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, dpms_property, DRM_MODE_DPMS_ON); @@ -7740,6 +7015,8 @@ bool intel_set_mode(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; + struct drm_encoder_helper_funcs *encoder_funcs; + struct drm_encoder *encoder; struct intel_crtc *intel_crtc; unsigned disable_pipes, prepare_pipes, modeset_pipes; bool ret = true; @@ -7784,9 +7061,6 @@ bool intel_set_mode(struct drm_crtc *crtc, * update the the output configuration. */ intel_modeset_update_state(dev, prepare_pipes); - if (dev_priv->display.modeset_global_resources) - dev_priv->display.modeset_global_resources(dev); - /* Set up the DPLL and any encoders state that needs to adjust or depend * on the DPLL. */ @@ -7796,6 +7070,18 @@ bool intel_set_mode(struct drm_crtc *crtc, x, y, fb); if (!ret) goto done; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + + if (encoder->crtc != &intel_crtc->base) + continue; + + DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", + encoder->base.id, drm_get_encoder_name(encoder), + mode->base.id, mode->name); + encoder_funcs = encoder->helper_private; + encoder_funcs->mode_set(encoder, mode, adjusted_mode); + } } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ @@ -8134,12 +7420,6 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { .page_flip = intel_crtc_page_flip, }; -static void intel_cpu_pll_init(struct drm_device *dev) -{ - if (IS_HASWELL(dev)) - intel_ddi_pll_init(dev); -} - static void intel_pch_pll_init(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -8179,7 +7459,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) /* Swap pipes & planes for FBC on pre-965 */ intel_crtc->pipe = pipe; intel_crtc->plane = pipe; - intel_crtc->cpu_transcoder = pipe; if (IS_MOBILE(dev) && IS_GEN3(dev)) { DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); intel_crtc->plane = !pipe; @@ -8272,9 +7551,17 @@ static void intel_setup_outputs(struct drm_device *dev) I915_WRITE(PFIT_CONTROL, 0); } - if (!(IS_HASWELL(dev) && - (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES))) - intel_crt_init(dev); + if (HAS_PCH_SPLIT(dev)) { + dpd_is_edp = intel_dpd_is_edp(dev); + + if (has_edp_a(dev)) + intel_dp_init(dev, DP_A, PORT_A); + + if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) + intel_dp_init(dev, PCH_DP_D, PORT_D); + } + + intel_crt_init(dev); if (IS_HASWELL(dev)) { int found; @@ -8297,10 +7584,6 @@ static void intel_setup_outputs(struct drm_device *dev) intel_ddi_init(dev, PORT_D); } else if (HAS_PCH_SPLIT(dev)) { int found; - dpd_is_edp = intel_dpd_is_edp(dev); - - if (has_edp_a(dev)) - intel_dp_init(dev, DP_A, PORT_A); if (I915_READ(HDMIB) & PORT_DETECTED) { /* PCH SDVOB multiplex with HDMIB */ @@ -8320,15 +7603,11 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(PCH_DP_C) & DP_DETECTED) intel_dp_init(dev, PCH_DP_C, PORT_C); - if (I915_READ(PCH_DP_D) & DP_DETECTED) + if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) intel_dp_init(dev, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev)) { int found; - /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ - if (I915_READ(DP_C) & DP_DETECTED) - intel_dp_init(dev, DP_C, PORT_C); - if (I915_READ(SDVOB) & PORT_DETECTED) { /* SDVOB multiplex with HDMIB */ found = intel_sdvo_init(dev, SDVOB, true); @@ -8341,6 +7620,9 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(SDVOC) & PORT_DETECTED) intel_hdmi_init(dev, SDVOC, PORT_C); + /* Shares lanes with HDMI on SDVOC */ + if (I915_READ(DP_C) & DP_DETECTED) + intel_dp_init(dev, DP_C, PORT_C); } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { bool found = false; @@ -8396,8 +7678,6 @@ static void intel_setup_outputs(struct drm_device *dev) if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) ironlake_init_pch_refclk(dev); - - drm_helper_move_panel_connectors_to_head(dev); } static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) @@ -8438,51 +7718,27 @@ int intel_framebuffer_init(struct drm_device *dev, if (mode_cmd->pitches[0] & 63) return -EINVAL; - /* FIXME <= Gen4 stride limits are bit unclear */ - if (mode_cmd->pitches[0] > 32768) - return -EINVAL; - - if (obj->tiling_mode != I915_TILING_NONE && - mode_cmd->pitches[0] != obj->stride) - return -EINVAL; - - /* Reject formats not supported by any plane early. */ switch (mode_cmd->pixel_format) { - case DRM_FORMAT_C8: + case DRM_FORMAT_RGB332: case DRM_FORMAT_RGB565: case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ARGB8888: - break; - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_ARGB1555: - if (INTEL_INFO(dev)->gen > 3) - return -EINVAL; - break; case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_ARGB8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ABGR2101010: - if (INTEL_INFO(dev)->gen < 4) - return -EINVAL; + /* RGB formats are common across chipsets */ break; case DRM_FORMAT_YUYV: case DRM_FORMAT_UYVY: case DRM_FORMAT_YVYU: case DRM_FORMAT_VYUY: - if (INTEL_INFO(dev)->gen < 6) - return -EINVAL; break; default: - DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); + DRM_DEBUG_KMS("unsupported pixel format %u\n", + mode_cmd->pixel_format); return -EINVAL; } - /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ - if (mode_cmd->offsets[0] != 0) - return -EINVAL; - ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { DRM_ERROR("framebuffer init failed %d\n", ret); @@ -8520,13 +7776,7 @@ static void intel_init_display(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; /* We always want a DPMS function */ - if (IS_HASWELL(dev)) { - dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; - dev_priv->display.crtc_enable = haswell_crtc_enable; - dev_priv->display.crtc_disable = haswell_crtc_disable; - dev_priv->display.off = haswell_crtc_off; - dev_priv->display.update_plane = ironlake_update_plane; - } else if (HAS_PCH_SPLIT(dev)) { + if (HAS_PCH_SPLIT(dev)) { dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; dev_priv->display.crtc_enable = ironlake_crtc_enable; dev_priv->display.crtc_disable = ironlake_crtc_disable; @@ -8577,8 +7827,6 @@ static void intel_init_display(struct drm_device *dev) /* FIXME: detect B0+ stepping and use auto training */ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; dev_priv->display.write_eld = ironlake_write_eld; - dev_priv->display.modeset_global_resources = - ivb_modeset_global_resources; } else if (IS_HASWELL(dev)) { dev_priv->display.fdi_link_train = hsw_fdi_link_train; dev_priv->display.write_eld = haswell_write_eld; @@ -8810,7 +8058,6 @@ void intel_modeset_init(struct drm_device *dev) DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); } - intel_cpu_pll_init(dev); intel_pch_pll_init(dev); /* Just disable it once at startup */ @@ -8880,7 +8127,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) u32 reg; /* Clear any frame start delays used for debugging left by the BIOS */ - reg = PIPECONF(crtc->cpu_transcoder); + reg = PIPECONF(crtc->pipe); I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); /* We need to sanitize the plane -> pipe mapping first because this will @@ -9008,35 +8255,10 @@ void intel_modeset_setup_hw_state(struct drm_device *dev) struct intel_encoder *encoder; struct intel_connector *connector; - if (IS_HASWELL(dev)) { - tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); - - if (tmp & TRANS_DDI_FUNC_ENABLE) { - switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { - case TRANS_DDI_EDP_INPUT_A_ON: - case TRANS_DDI_EDP_INPUT_A_ONOFF: - pipe = PIPE_A; - break; - case TRANS_DDI_EDP_INPUT_B_ONOFF: - pipe = PIPE_B; - break; - case TRANS_DDI_EDP_INPUT_C_ONOFF: - pipe = PIPE_C; - break; - } - - crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - crtc->cpu_transcoder = TRANSCODER_EDP; - - DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n", - pipe_name(pipe)); - } - } - for_each_pipe(pipe) { crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - tmp = I915_READ(PIPECONF(crtc->cpu_transcoder)); + tmp = I915_READ(PIPECONF(pipe)); if (tmp & PIPECONF_ENABLE) crtc->active = true; else @@ -9049,9 +8271,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev) crtc->active ? "enabled" : "disabled"); } - if (IS_HASWELL(dev)) - intel_ddi_setup_hw_pll_state(dev); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { pipe = 0; @@ -9101,8 +8320,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev) intel_modeset_update_staged_output_state(dev); intel_modeset_check_state(dev); - - drm_mode_config_reset(dev); } void intel_modeset_gem_init(struct drm_device *dev) @@ -9230,7 +8447,6 @@ intel_display_capture_error_state(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_display_error_state *error; - enum transcoder cpu_transcoder; int i; error = kmalloc(sizeof(*error), GFP_ATOMIC); @@ -9238,8 +8454,6 @@ intel_display_capture_error_state(struct drm_device *dev) return NULL; for_each_pipe(i) { - cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); - error->cursor[i].control = I915_READ(CURCNTR(i)); error->cursor[i].position = I915_READ(CURPOS(i)); error->cursor[i].base = I915_READ(CURBASE(i)); @@ -9254,14 +8468,14 @@ intel_display_capture_error_state(struct drm_device *dev) error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); } - error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); + error->pipe[i].conf = I915_READ(PIPECONF(i)); error->pipe[i].source = I915_READ(PIPESRC(i)); - error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); - error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); - error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); - error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); - error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); - error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); + error->pipe[i].htotal = I915_READ(HTOTAL(i)); + error->pipe[i].hblank = I915_READ(HBLANK(i)); + error->pipe[i].hsync = I915_READ(HSYNC(i)); + error->pipe[i].vtotal = I915_READ(VTOTAL(i)); + error->pipe[i].vblank = I915_READ(VBLANK(i)); + error->pipe[i].vsync = I915_READ(VSYNC(i)); } return error; diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c index 1b63d55318a0..368ed8ef1600 100644 --- a/trunk/drivers/gpu/drm/i915/intel_dp.c +++ b/trunk/drivers/gpu/drm/i915/intel_dp.c @@ -36,6 +36,8 @@ #include #include "i915_drv.h" +#define DP_RECEIVER_CAP_SIZE 0xf +#define DP_LINK_STATUS_SIZE 6 #define DP_LINK_CHECK_TIMEOUT (10 * 1000) /** @@ -47,9 +49,7 @@ */ static bool is_edp(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - - return intel_dig_port->base.type == INTEL_OUTPUT_EDP; + return intel_dp->base.type == INTEL_OUTPUT_EDP; } /** @@ -76,16 +76,15 @@ static bool is_cpu_edp(struct intel_dp *intel_dp) return is_edp(intel_dp) && !is_pch_edp(intel_dp); } -static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) +static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - - return intel_dig_port->base.base.dev; + return container_of(encoder, struct intel_dp, base.base); } static struct intel_dp *intel_attached_dp(struct drm_connector *connector) { - return enc_to_intel_dp(&intel_attached_encoder(connector)->base); + return container_of(intel_attached_encoder(connector), + struct intel_dp, base); } /** @@ -107,31 +106,48 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) return is_pch_edp(intel_dp); } +static void intel_dp_start_link_train(struct intel_dp *intel_dp); +static void intel_dp_complete_link_train(struct intel_dp *intel_dp); static void intel_dp_link_down(struct intel_dp *intel_dp); void intel_edp_link_config(struct intel_encoder *intel_encoder, int *lane_num, int *link_bw) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); *lane_num = intel_dp->lane_count; - *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); + if (intel_dp->link_bw == DP_LINK_BW_1_62) + *link_bw = 162000; + else if (intel_dp->link_bw == DP_LINK_BW_2_7) + *link_bw = 270000; } int intel_edp_target_clock(struct intel_encoder *intel_encoder, struct drm_display_mode *mode) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); - struct intel_connector *intel_connector = intel_dp->attached_connector; + struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); - if (intel_connector->panel.fixed_mode) - return intel_connector->panel.fixed_mode->clock; + if (intel_dp->panel_fixed_mode) + return intel_dp->panel_fixed_mode->clock; else return mode->clock; } +static int +intel_dp_max_lane_count(struct intel_dp *intel_dp) +{ + int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; + switch (max_lane_count) { + case 1: case 2: case 4: + break; + default: + max_lane_count = 4; + } + return max_lane_count; +} + static int intel_dp_max_link_bw(struct intel_dp *intel_dp) { @@ -192,7 +208,7 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp, bool adjust_mode) { int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); - int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); + int max_lanes = intel_dp_max_lane_count(intel_dp); int max_rate, mode_rate; mode_rate = intel_dp_link_required(mode->clock, 24); @@ -218,14 +234,12 @@ intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_dp *intel_dp = intel_attached_dp(connector); - struct intel_connector *intel_connector = to_intel_connector(connector); - struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; - if (is_edp(intel_dp) && fixed_mode) { - if (mode->hdisplay > fixed_mode->hdisplay) + if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { + if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) return MODE_PANEL; - if (mode->vdisplay > fixed_mode->vdisplay) + if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) return MODE_PANEL; } @@ -271,10 +285,6 @@ intel_hrawclk(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t clkcfg; - /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ - if (IS_VALLEYVIEW(dev)) - return 200; - clkcfg = I915_READ(CLKCFG); switch (clkcfg & CLKCFG_FSB_MASK) { case CLKCFG_FSB_400: @@ -300,7 +310,7 @@ intel_hrawclk(struct drm_device *dev) static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; @@ -308,7 +318,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; @@ -317,7 +327,7 @@ static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) static void intel_dp_check_edp(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; if (!is_edp(intel_dp)) @@ -336,8 +346,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, uint8_t *recv, int recv_size) { uint32_t output_reg = intel_dp->output_reg; - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t ch_ctl = output_reg + 0x10; uint32_t ch_data = ch_ctl + 4; @@ -347,29 +356,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, uint32_t aux_clock_divider; int try, precharge; - if (IS_HASWELL(dev)) { - switch (intel_dig_port->port) { - case PORT_A: - ch_ctl = DPA_AUX_CH_CTL; - ch_data = DPA_AUX_CH_DATA1; - break; - case PORT_B: - ch_ctl = PCH_DPB_AUX_CH_CTL; - ch_data = PCH_DPB_AUX_CH_DATA1; - break; - case PORT_C: - ch_ctl = PCH_DPC_AUX_CH_CTL; - ch_data = PCH_DPC_AUX_CH_DATA1; - break; - case PORT_D: - ch_ctl = PCH_DPD_AUX_CH_CTL; - ch_data = PCH_DPD_AUX_CH_DATA1; - break; - default: - BUG(); - } - } - intel_dp_check_edp(intel_dp); /* The clock divider is based off the hrawclk, * and would like to run at 2MHz. So, take the @@ -379,16 +365,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, * clock divider. */ if (is_cpu_edp(intel_dp)) { - if (IS_HASWELL(dev)) - aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; - else if (IS_VALLEYVIEW(dev)) - aux_clock_divider = 100; - else if (IS_GEN6(dev) || IS_GEN7(dev)) + if (IS_GEN6(dev) || IS_GEN7(dev)) aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ else aux_clock_divider = 225; /* eDP input clock at 450Mhz */ } else if (HAS_PCH_SPLIT(dev)) - aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); + aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */ else aux_clock_divider = intel_hrawclk(dev) / 2; @@ -660,6 +642,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, return -EREMOTEIO; } +static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); +static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); + static int intel_dp_i2c_init(struct intel_dp *intel_dp, struct intel_connector *intel_connector, const char *name) @@ -685,25 +670,22 @@ intel_dp_i2c_init(struct intel_dp *intel_dp, return ret; } -bool +static bool intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_connector *intel_connector = intel_dp->attached_connector; int lane_count, clock; - int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); + int max_lane_count = intel_dp_max_lane_count(intel_dp); int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; int bpp, mode_rate; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; - if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { - intel_fixed_panel_mode(intel_connector->panel.fixed_mode, - adjusted_mode); - intel_pch_panel_fitting(dev, - intel_connector->panel.fitting_mode, + if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { + intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); + intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, mode, adjusted_mode); } @@ -780,23 +762,21 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; - struct intel_encoder *intel_encoder; - struct intel_dp *intel_dp; + struct intel_encoder *encoder; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int lane_count = 4; struct intel_dp_m_n m_n; int pipe = intel_crtc->pipe; - enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; /* * Find the lane count in the intel_encoder private */ - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { - intel_dp = enc_to_intel_dp(&intel_encoder->base); + for_each_encoder_on_crtc(dev, crtc, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || - intel_encoder->type == INTEL_OUTPUT_EDP) + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || + intel_dp->base.type == INTEL_OUTPUT_EDP) { lane_count = intel_dp->lane_count; break; @@ -811,46 +791,23 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, intel_dp_compute_m_n(intel_crtc->bpp, lane_count, mode->clock, adjusted_mode->clock, &m_n); - if (IS_HASWELL(dev)) { - I915_WRITE(PIPE_DATA_M1(cpu_transcoder), - TU_SIZE(m_n.tu) | m_n.gmch_m); - I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); - I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); - I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); - } else if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(TRANSDATA_M1(pipe), + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); - } else if (IS_VALLEYVIEW(dev)) { - I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); - I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); - I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); - I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); } else { I915_WRITE(PIPE_GMCH_DATA_M(pipe), - TU_SIZE(m_n.tu) | m_n.gmch_m); + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); } } -void intel_dp_init_link_config(struct intel_dp *intel_dp) -{ - memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); - intel_dp->link_configuration[0] = intel_dp->link_bw; - intel_dp->link_configuration[1] = intel_dp->lane_count; - intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; - /* - * Check for DPCD version > 1.1 and enhanced framing support - */ - if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && - (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { - intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - } -} - static void intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -858,7 +815,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_crtc *crtc = encoder->crtc; + struct drm_crtc *crtc = intel_dp->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); /* @@ -903,12 +860,21 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; intel_write_eld(encoder, adjusted_mode); } - - intel_dp_init_link_config(intel_dp); + memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); + intel_dp->link_configuration[0] = intel_dp->link_bw; + intel_dp->link_configuration[1] = intel_dp->lane_count; + intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; + /* + * Check for DPCD version > 1.1 and enhanced framing support + */ + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && + (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { + intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + } /* Split out the IBX/CPU vs CPT settings */ - if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { + if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) intel_dp->DP |= DP_SYNC_HS_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) @@ -965,7 +931,7 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp, u32 mask, u32 value) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", @@ -1012,9 +978,9 @@ static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) return control; } -void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) +static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; @@ -1053,7 +1019,7 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; @@ -1075,14 +1041,14 @@ static void ironlake_panel_vdd_work(struct work_struct *__work) { struct intel_dp *intel_dp = container_of(to_delayed_work(__work), struct intel_dp, panel_vdd_work); - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; mutex_lock(&dev->mode_config.mutex); ironlake_panel_vdd_off_sync(intel_dp); mutex_unlock(&dev->mode_config.mutex); } -void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) +static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) { if (!is_edp(intel_dp)) return; @@ -1105,9 +1071,9 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) } } -void ironlake_edp_panel_on(struct intel_dp *intel_dp) +static void ironlake_edp_panel_on(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; @@ -1147,9 +1113,9 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp) } } -void ironlake_edp_panel_off(struct intel_dp *intel_dp) +static void ironlake_edp_panel_off(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; @@ -1172,12 +1138,10 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp) ironlake_wait_panel_off(intel_dp); } -void ironlake_edp_backlight_on(struct intel_dp *intel_dp) +static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe; u32 pp; if (!is_edp(intel_dp)) @@ -1195,21 +1159,17 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp) pp |= EDP_BLC_ENABLE; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); - - intel_panel_enable_backlight(dev, pipe); } -void ironlake_edp_backlight_off(struct intel_dp *intel_dp) +static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; if (!is_edp(intel_dp)) return; - intel_panel_disable_backlight(dev); - DRM_DEBUG_KMS("\n"); pp = ironlake_get_pp_control(dev_priv); pp &= ~EDP_BLC_ENABLE; @@ -1220,9 +1180,8 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp) static void ironlake_edp_pll_on(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_crtc *crtc = intel_dig_port->base.base.crtc; - struct drm_device *dev = crtc->dev; + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_crtc *crtc = intel_dp->base.base.crtc; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; @@ -1246,9 +1205,8 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp) static void ironlake_edp_pll_off(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_crtc *crtc = intel_dig_port->base.base.crtc; - struct drm_device *dev = crtc->dev; + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_crtc *crtc = intel_dp->base.base.crtc; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; @@ -1270,7 +1228,7 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp) } /* If the sink supports it, try to set the power state appropriately */ -void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) +static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) { int ret, i; @@ -1340,11 +1298,10 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, return true; } } - - DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", - intel_dp->output_reg); } + DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg); + return true; } @@ -1439,6 +1396,38 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ DP_LINK_STATUS_SIZE); } +static uint8_t +intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], + int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +static uint8_t +intel_get_adjust_request_voltage(uint8_t adjust_request[2], + int lane) +{ + int s = ((lane & 1) ? + DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : + DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); + uint8_t l = adjust_request[lane>>1]; + + return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; +} + +static uint8_t +intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], + int lane) +{ + int s = ((lane & 1) ? + DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : + DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); + uint8_t l = adjust_request[lane>>1]; + + return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; +} + + #if 0 static char *voltage_names[] = { "0.4V", "0.6V", "0.8V", "1.2V" @@ -1459,7 +1448,7 @@ static char *link_train_names[] = { static uint8_t intel_dp_voltage_max(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) return DP_TRAIN_VOLTAGE_SWING_800; @@ -1472,21 +1461,9 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) static uint8_t intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; - if (IS_HASWELL(dev)) { - switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { - case DP_TRAIN_VOLTAGE_SWING_400: - return DP_TRAIN_PRE_EMPHASIS_9_5; - case DP_TRAIN_VOLTAGE_SWING_600: - return DP_TRAIN_PRE_EMPHASIS_6; - case DP_TRAIN_VOLTAGE_SWING_800: - return DP_TRAIN_PRE_EMPHASIS_3_5; - case DP_TRAIN_VOLTAGE_SWING_1200: - default: - return DP_TRAIN_PRE_EMPHASIS_0; - } - } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { + if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_400: return DP_TRAIN_PRE_EMPHASIS_6; @@ -1517,12 +1494,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST uint8_t v = 0; uint8_t p = 0; int lane; + uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); uint8_t voltage_max; uint8_t preemph_max; for (lane = 0; lane < intel_dp->lane_count; lane++) { - uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); - uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); + uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); if (this_v > v) v = this_v; @@ -1639,38 +1617,52 @@ intel_gen7_edp_signal_levels(uint8_t train_set) } } -/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ -static uint32_t -intel_dp_signal_levels_hsw(uint8_t train_set) +static uint8_t +intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) { - int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | - DP_TRAIN_PRE_EMPHASIS_MASK); - switch (signal_levels) { - case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: - return DDI_BUF_EMP_400MV_0DB_HSW; - case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: - return DDI_BUF_EMP_400MV_3_5DB_HSW; - case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: - return DDI_BUF_EMP_400MV_6DB_HSW; - case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: - return DDI_BUF_EMP_400MV_9_5DB_HSW; + int s = (lane & 1) * 4; + uint8_t l = link_status[lane>>1]; - case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: - return DDI_BUF_EMP_600MV_0DB_HSW; - case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: - return DDI_BUF_EMP_600MV_3_5DB_HSW; - case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: - return DDI_BUF_EMP_600MV_6DB_HSW; + return (l >> s) & 0xf; +} - case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: - return DDI_BUF_EMP_800MV_0DB_HSW; - case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: - return DDI_BUF_EMP_800MV_3_5DB_HSW; - default: - DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" - "0x%x\n", signal_levels); - return DDI_BUF_EMP_400MV_0DB_HSW; +/* Check for clock recovery is done on all channels */ +static bool +intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) +{ + int lane; + uint8_t lane_status; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = intel_get_lane_status(link_status, lane); + if ((lane_status & DP_LANE_CR_DONE) == 0) + return false; + } + return true; +} + +/* Check to see if channel eq is done on all channels */ +#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ + DP_LANE_CHANNEL_EQ_DONE|\ + DP_LANE_SYMBOL_LOCKED) +static bool +intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) +{ + uint8_t lane_align; + uint8_t lane_status; + int lane; + + lane_align = intel_dp_link_status(link_status, + DP_LANE_ALIGN_STATUS_UPDATED); + if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) + return false; + for (lane = 0; lane < intel_dp->lane_count; lane++) { + lane_status = intel_get_lane_status(link_status, lane); + if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) + return false; } + return true; } static bool @@ -1678,49 +1670,11 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, uint32_t dp_reg_value, uint8_t dp_train_pat) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - enum port port = intel_dig_port->port; int ret; - uint32_t temp; - - if (IS_HASWELL(dev)) { - temp = I915_READ(DP_TP_CTL(port)); - - if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) - temp |= DP_TP_CTL_SCRAMBLE_DISABLE; - else - temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; - - temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; - switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { - case DP_TRAINING_PATTERN_DISABLE: - temp |= DP_TP_CTL_LINK_TRAIN_IDLE; - I915_WRITE(DP_TP_CTL(port), temp); - - if (wait_for((I915_READ(DP_TP_STATUS(port)) & - DP_TP_STATUS_IDLE_DONE), 1)) - DRM_ERROR("Timed out waiting for DP idle patterns\n"); - temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; - temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; - - break; - case DP_TRAINING_PATTERN_1: - temp |= DP_TP_CTL_LINK_TRAIN_PAT1; - break; - case DP_TRAINING_PATTERN_2: - temp |= DP_TP_CTL_LINK_TRAIN_PAT2; - break; - case DP_TRAINING_PATTERN_3: - temp |= DP_TP_CTL_LINK_TRAIN_PAT3; - break; - } - I915_WRITE(DP_TP_CTL(port), temp); - - } else if (HAS_PCH_CPT(dev) && - (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { + if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { @@ -1780,20 +1734,16 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, } /* Enable corresponding port and start training pattern 1 */ -void +static void intel_dp_start_link_train(struct intel_dp *intel_dp) { - struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; - struct drm_device *dev = encoder->dev; + struct drm_device *dev = intel_dp->base.base.dev; int i; uint8_t voltage; bool clock_recovery = false; int voltage_tries, loop_tries; uint32_t DP = intel_dp->DP; - if (IS_HASWELL(dev)) - intel_ddi_prepare_link_retrain(encoder); - /* Write the link configuration data */ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, intel_dp->link_configuration, @@ -1811,11 +1761,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) uint8_t link_status[DP_LINK_STATUS_SIZE]; uint32_t signal_levels; - if (IS_HASWELL(dev)) { - signal_levels = intel_dp_signal_levels_hsw( - intel_dp->train_set[0]); - DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; - } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { + + if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { @@ -1823,24 +1770,23 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); + DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } - DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", - signal_levels); - /* Set training pattern 1 */ if (!intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE)) break; + /* Set training pattern 1 */ - drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); + udelay(100); if (!intel_dp_get_link_status(intel_dp, link_status)) { DRM_ERROR("failed to get link status\n"); break; } - if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { + if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { DRM_DEBUG_KMS("clock recovery OK\n"); clock_recovery = true; break; @@ -1879,10 +1825,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) intel_dp->DP = DP; } -void +static void intel_dp_complete_link_train(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; bool channel_eq = false; int tries, cr_tries; uint32_t DP = intel_dp->DP; @@ -1902,10 +1848,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) break; } - if (IS_HASWELL(dev)) { - signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]); - DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; - } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { + if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { @@ -1922,18 +1865,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) DP_LINK_SCRAMBLING_DISABLE)) break; - drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); + udelay(400); if (!intel_dp_get_link_status(intel_dp, link_status)) break; /* Make sure clock is still ok */ - if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { + if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { intel_dp_start_link_train(intel_dp); cr_tries++; continue; } - if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { + if (intel_channel_eq_ok(intel_dp, link_status)) { channel_eq = true; break; } @@ -1952,38 +1895,16 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) ++tries; } - if (channel_eq) - DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); - intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); } static void intel_dp_link_down(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t DP = intel_dp->DP; - /* - * DDI code has a strict mode set sequence and we should try to respect - * it, otherwise we might hang the machine in many different ways. So we - * really should be disabling the port only on a complete crtc_disable - * sequence. This function is just called under two conditions on DDI - * code: - * - Link train failed while doing crtc_enable, and on this case we - * really should respect the mode set sequence and wait for a - * crtc_disable. - * - Someone turned the monitor off and intel_dp_check_link_status - * called us. We don't need to disable the whole port on this case, so - * when someone turns the monitor on again, - * intel_ddi_prepare_link_retrain will take care of redoing the link - * train. - */ - if (IS_HASWELL(dev)) - return; - if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) return; @@ -2002,7 +1923,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) if (HAS_PCH_IBX(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { - struct drm_crtc *crtc = intel_dig_port->base.base.crtc; + struct drm_crtc *crtc = intel_dp->base.base.crtc; /* Hardware workaround: leaving our transcoder select * set to transcoder B while it's off will prevent the @@ -2103,7 +2024,7 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp) { /* NAK by default */ - intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); + intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK); } /* @@ -2115,17 +2036,16 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp) * 4. Check link status on receipt of hot-plug interrupt */ -void +static void intel_dp_check_link_status(struct intel_dp *intel_dp) { - struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; u8 sink_irq_vector; u8 link_status[DP_LINK_STATUS_SIZE]; - if (!intel_encoder->connectors_active) + if (!intel_dp->base.connectors_active) return; - if (WARN_ON(!intel_encoder->base.crtc)) + if (WARN_ON(!intel_dp->base.base.crtc)) return; /* Try to read receiver status if the link appears to be up */ @@ -2154,9 +2074,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); } - if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { + if (!intel_channel_eq_ok(intel_dp, link_status)) { DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", - drm_get_encoder_name(&intel_encoder->base)); + drm_get_encoder_name(&intel_dp->base.base)); intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); } @@ -2205,12 +2125,11 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) static enum drm_connector_status ironlake_dp_detect(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); enum drm_connector_status status; /* Can't disconnect eDP, but you can close the lid... */ if (is_edp(intel_dp)) { - status = intel_panel_detect(dev); + status = intel_panel_detect(intel_dp->base.base.dev); if (status == connector_status_unknown) status = connector_status_connected; return status; @@ -2222,7 +2141,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp) static enum drm_connector_status g4x_dp_detect(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t bit; @@ -2249,45 +2168,44 @@ g4x_dp_detect(struct intel_dp *intel_dp) static struct edid * intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) { - struct intel_connector *intel_connector = to_intel_connector(connector); - - /* use cached edid if we have one */ - if (intel_connector->edid) { - struct edid *edid; - int size; + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct edid *edid; + int size; - /* invalid edid */ - if (IS_ERR(intel_connector->edid)) + if (is_edp(intel_dp)) { + if (!intel_dp->edid) return NULL; - size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; + size = (intel_dp->edid->extensions + 1) * EDID_LENGTH; edid = kmalloc(size, GFP_KERNEL); if (!edid) return NULL; - memcpy(edid, intel_connector->edid, size); + memcpy(edid, intel_dp->edid, size); return edid; } - return drm_get_edid(connector, adapter); + edid = drm_get_edid(connector, adapter); + return edid; } static int intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) { - struct intel_connector *intel_connector = to_intel_connector(connector); - - /* use cached edid if we have one */ - if (intel_connector->edid) { - /* invalid edid */ - if (IS_ERR(intel_connector->edid)) - return 0; + struct intel_dp *intel_dp = intel_attached_dp(connector); + int ret; - return intel_connector_update_modes(connector, - intel_connector->edid); + if (is_edp(intel_dp)) { + drm_mode_connector_update_edid_property(connector, + intel_dp->edid); + ret = drm_add_edid_modes(connector, intel_dp->edid); + drm_edid_to_eld(connector, + intel_dp->edid); + return intel_dp->edid_mode_count; } - return intel_ddc_get_modes(connector, adapter); + ret = intel_ddc_get_modes(connector, adapter); + return ret; } @@ -2301,12 +2219,9 @@ static enum drm_connector_status intel_dp_detect(struct drm_connector *connector, bool force) { struct intel_dp *intel_dp = intel_attached_dp(connector); - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct intel_encoder *intel_encoder = &intel_dig_port->base; - struct drm_device *dev = connector->dev; + struct drm_device *dev = intel_dp->base.base.dev; enum drm_connector_status status; struct edid *edid = NULL; - char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; intel_dp->has_audio = false; @@ -2315,9 +2230,10 @@ intel_dp_detect(struct drm_connector *connector, bool force) else status = g4x_dp_detect(intel_dp); - hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), - 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); - DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); + DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n", + intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], + intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5], + intel_dp->dpcd[6], intel_dp->dpcd[7]); if (status != connector_status_connected) return status; @@ -2334,31 +2250,49 @@ intel_dp_detect(struct drm_connector *connector, bool force) } } - if (intel_encoder->type != INTEL_OUTPUT_EDP) - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; return connector_status_connected; } static int intel_dp_get_modes(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); - struct intel_connector *intel_connector = to_intel_connector(connector); - struct drm_device *dev = connector->dev; + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; int ret; /* We should parse the EDID data and find out if it has an audio sink */ ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); - if (ret) + if (ret) { + if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) { + struct drm_display_mode *newmode; + list_for_each_entry(newmode, &connector->probed_modes, + head) { + if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) { + intel_dp->panel_fixed_mode = + drm_mode_duplicate(dev, newmode); + break; + } + } + } return ret; + } - /* if eDP has no EDID, fall back to fixed mode */ - if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { - struct drm_display_mode *mode; - mode = drm_mode_duplicate(dev, - intel_connector->panel.fixed_mode); - if (mode) { + /* if eDP has no EDID, try to use fixed panel mode from VBT */ + if (is_edp(intel_dp)) { + /* initialize panel mode from VBT if available for eDP */ + if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) { + intel_dp->panel_fixed_mode = + drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); + if (intel_dp->panel_fixed_mode) { + intel_dp->panel_fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; + } + } + if (intel_dp->panel_fixed_mode) { + struct drm_display_mode *mode; + mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode); drm_mode_probed_add(connector, mode); return 1; } @@ -2388,12 +2322,10 @@ intel_dp_set_property(struct drm_connector *connector, uint64_t val) { struct drm_i915_private *dev_priv = connector->dev->dev_private; - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_encoder *intel_encoder = intel_attached_encoder(connector); - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + struct intel_dp *intel_dp = intel_attached_dp(connector); int ret; - ret = drm_object_property_set_value(&connector->base, property, val); + ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; @@ -2426,27 +2358,11 @@ intel_dp_set_property(struct drm_connector *connector, goto done; } - if (is_edp(intel_dp) && - property == connector->dev->mode_config.scaling_mode_property) { - if (val == DRM_MODE_SCALE_NONE) { - DRM_DEBUG_KMS("no scaling not supported\n"); - return -EINVAL; - } - - if (intel_connector->panel.fitting_mode == val) { - /* the eDP scaling property is not changed */ - return 0; - } - intel_connector->panel.fitting_mode = val; - - goto done; - } - return -EINVAL; done: - if (intel_encoder->base.crtc) { - struct drm_crtc *crtc = intel_encoder->base.crtc; + if (intel_dp->base.base.crtc) { + struct drm_crtc *crtc = intel_dp->base.base.crtc; intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } @@ -2459,33 +2375,27 @@ intel_dp_destroy(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct intel_dp *intel_dp = intel_attached_dp(connector); - struct intel_connector *intel_connector = to_intel_connector(connector); - if (!IS_ERR_OR_NULL(intel_connector->edid)) - kfree(intel_connector->edid); - - if (is_edp(intel_dp)) { + if (is_edp(intel_dp)) intel_panel_destroy_backlight(dev); - intel_panel_fini(&intel_connector->panel); - } drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); } -void intel_dp_encoder_destroy(struct drm_encoder *encoder) +static void intel_dp_encoder_destroy(struct drm_encoder *encoder) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); i2c_del_adapter(&intel_dp->adapter); drm_encoder_cleanup(encoder); if (is_edp(intel_dp)) { + kfree(intel_dp->edid); cancel_delayed_work_sync(&intel_dp->panel_vdd_work); ironlake_panel_vdd_off_sync(intel_dp); } - kfree(intel_dig_port); + kfree(intel_dp); } static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { @@ -2515,7 +2425,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { static void intel_dp_hot_plug(struct intel_encoder *intel_encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); intel_dp_check_link_status(intel_dp); } @@ -2525,14 +2435,13 @@ int intel_trans_dp_port_sel(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct intel_encoder *intel_encoder; - struct intel_dp *intel_dp; + struct intel_encoder *encoder; - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { - intel_dp = enc_to_intel_dp(&intel_encoder->base); + for_each_encoder_on_crtc(dev, crtc, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || - intel_encoder->type == INTEL_OUTPUT_EDP) + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || + intel_dp->base.type == INTEL_OUTPUT_EDP) return intel_dp->output_reg; } @@ -2562,191 +2471,78 @@ bool intel_dpd_is_edp(struct drm_device *dev) static void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) { - struct intel_connector *intel_connector = to_intel_connector(connector); - intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); - - if (is_edp(intel_dp)) { - drm_mode_create_scaling_mode_property(connector->dev); - drm_object_attach_property( - &connector->base, - connector->dev->mode_config.scaling_mode_property, - DRM_MODE_SCALE_ASPECT); - intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; - } -} - -static void -intel_dp_init_panel_power_sequencer(struct drm_device *dev, - struct intel_dp *intel_dp) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct edp_power_seq cur, vbt, spec, final; - u32 pp_on, pp_off, pp_div, pp; - - /* Workaround: Need to write PP_CONTROL with the unlock key as - * the very first thing. */ - pp = ironlake_get_pp_control(dev_priv); - I915_WRITE(PCH_PP_CONTROL, pp); - - pp_on = I915_READ(PCH_PP_ON_DELAYS); - pp_off = I915_READ(PCH_PP_OFF_DELAYS); - pp_div = I915_READ(PCH_PP_DIVISOR); - - /* Pull timing values out of registers */ - cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> - PANEL_POWER_UP_DELAY_SHIFT; - - cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> - PANEL_LIGHT_ON_DELAY_SHIFT; - - cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> - PANEL_LIGHT_OFF_DELAY_SHIFT; - - cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> - PANEL_POWER_DOWN_DELAY_SHIFT; - - cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> - PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; - - DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", - cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); - - vbt = dev_priv->edp.pps; - - /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of - * our hw here, which are all in 100usec. */ - spec.t1_t3 = 210 * 10; - spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ - spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ - spec.t10 = 500 * 10; - /* This one is special and actually in units of 100ms, but zero - * based in the hw (so we need to add 100 ms). But the sw vbt - * table multiplies it with 1000 to make it in units of 100usec, - * too. */ - spec.t11_t12 = (510 + 100) * 10; - - DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", - vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); - - /* Use the max of the register settings and vbt. If both are - * unset, fall back to the spec limits. */ -#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ - spec.field : \ - max(cur.field, vbt.field)) - assign_final(t1_t3); - assign_final(t8); - assign_final(t9); - assign_final(t10); - assign_final(t11_t12); -#undef assign_final - -#define get_delay(field) (DIV_ROUND_UP(final.field, 10)) - intel_dp->panel_power_up_delay = get_delay(t1_t3); - intel_dp->backlight_on_delay = get_delay(t8); - intel_dp->backlight_off_delay = get_delay(t9); - intel_dp->panel_power_down_delay = get_delay(t10); - intel_dp->panel_power_cycle_delay = get_delay(t11_t12); -#undef get_delay - - /* And finally store the new values in the power sequencer. */ - pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | - (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT); - pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | - (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT); - /* Compute the divisor for the pp clock, simply match the Bspec - * formula. */ - pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) - << PP_REFERENCE_DIVIDER_SHIFT; - pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000) - << PANEL_POWER_CYCLE_DELAY_SHIFT); - - /* Haswell doesn't have any port selection bits for the panel - * power sequencer any more. */ - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { - if (is_cpu_edp(intel_dp)) - pp_on |= PANEL_POWER_PORT_DP_A; - else - pp_on |= PANEL_POWER_PORT_DP_D; - } - - I915_WRITE(PCH_PP_ON_DELAYS, pp_on); - I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); - I915_WRITE(PCH_PP_DIVISOR, pp_div); - - - DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", - intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, - intel_dp->panel_power_cycle_delay); - - DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", - intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); - - DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", - I915_READ(PCH_PP_ON_DELAYS), - I915_READ(PCH_PP_OFF_DELAYS), - I915_READ(PCH_PP_DIVISOR)); } void -intel_dp_init_connector(struct intel_digital_port *intel_dig_port, - struct intel_connector *intel_connector) +intel_dp_init(struct drm_device *dev, int output_reg, enum port port) { - struct drm_connector *connector = &intel_connector->base; - struct intel_dp *intel_dp = &intel_dig_port->dp; - struct intel_encoder *intel_encoder = &intel_dig_port->base; - struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_display_mode *fixed_mode = NULL; - enum port port = intel_dig_port->port; + struct drm_connector *connector; + struct intel_dp *intel_dp; + struct intel_encoder *intel_encoder; + struct intel_connector *intel_connector; const char *name = NULL; int type; + intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL); + if (!intel_dp) + return; + + intel_dp->output_reg = output_reg; + intel_dp->port = port; /* Preserve the current hw state. */ intel_dp->DP = I915_READ(intel_dp->output_reg); - intel_dp->attached_connector = intel_connector; - if (HAS_PCH_SPLIT(dev) && port == PORT_D) + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_dp); + return; + } + intel_encoder = &intel_dp->base; + + if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) if (intel_dpd_is_edp(dev)) intel_dp->is_pch_edp = true; - /* - * FIXME : We need to initialize built-in panels before external panels. - * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup - */ - if (IS_VALLEYVIEW(dev) && port == PORT_C) { - type = DRM_MODE_CONNECTOR_eDP; - intel_encoder->type = INTEL_OUTPUT_EDP; - } else if (port == PORT_A || is_pch_edp(intel_dp)) { + if (output_reg == DP_A || is_pch_edp(intel_dp)) { type = DRM_MODE_CONNECTOR_eDP; intel_encoder->type = INTEL_OUTPUT_EDP; } else { - /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for - * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't - * rewrite it. - */ type = DRM_MODE_CONNECTOR_DisplayPort; + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; } + connector = &intel_connector->base; drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); connector->polled = DRM_CONNECTOR_POLL_HPD; - connector->interlace_allowed = true; - connector->doublescan_allowed = 0; + + intel_encoder->cloneable = false; INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, ironlake_panel_vdd_work); + intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + + connector->interlace_allowed = true; + connector->doublescan_allowed = 0; + + drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); + intel_connector_attach_encoder(intel_connector, intel_encoder); drm_sysfs_connector_add(connector); - if (IS_HASWELL(dev)) - intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; - else - intel_connector->get_hw_state = intel_connector_get_hw_state; - + intel_encoder->enable = intel_enable_dp; + intel_encoder->pre_enable = intel_pre_enable_dp; + intel_encoder->disable = intel_disable_dp; + intel_encoder->post_disable = intel_post_disable_dp; + intel_encoder->get_hw_state = intel_dp_get_hw_state; + intel_connector->get_hw_state = intel_connector_get_hw_state; /* Set up the DDC bus. */ switch (port) { @@ -2770,15 +2566,66 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, break; } - if (is_edp(intel_dp)) - intel_dp_init_panel_power_sequencer(dev, intel_dp); + /* Cache some DPCD data in the eDP case */ + if (is_edp(intel_dp)) { + struct edp_power_seq cur, vbt; + u32 pp_on, pp_off, pp_div; + + pp_on = I915_READ(PCH_PP_ON_DELAYS); + pp_off = I915_READ(PCH_PP_OFF_DELAYS); + pp_div = I915_READ(PCH_PP_DIVISOR); + + if (!pp_on || !pp_off || !pp_div) { + DRM_INFO("bad panel power sequencing delays, disabling panel\n"); + intel_dp_encoder_destroy(&intel_dp->base.base); + intel_dp_destroy(&intel_connector->base); + return; + } + + /* Pull timing values out of registers */ + cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> + PANEL_POWER_UP_DELAY_SHIFT; + + cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> + PANEL_LIGHT_ON_DELAY_SHIFT; + + cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> + PANEL_LIGHT_OFF_DELAY_SHIFT; + + cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> + PANEL_POWER_DOWN_DELAY_SHIFT; + + cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> + PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; + + DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", + cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); + + vbt = dev_priv->edp.pps; + + DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", + vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); + +#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10) + + intel_dp->panel_power_up_delay = get_delay(t1_t3); + intel_dp->backlight_on_delay = get_delay(t8); + intel_dp->backlight_off_delay = get_delay(t9); + intel_dp->panel_power_down_delay = get_delay(t10); + intel_dp->panel_power_cycle_delay = get_delay(t11_t12); + + DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", + intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, + intel_dp->panel_power_cycle_delay); + + DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", + intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); + } intel_dp_i2c_init(intel_dp, intel_connector, name); - /* Cache DPCD and EDID for edp. */ if (is_edp(intel_dp)) { bool ret; - struct drm_display_mode *scan; struct edid *edid; ironlake_edp_panel_vdd_on(intel_dp); @@ -2793,47 +2640,29 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, } else { /* if this fails, presume the device is a ghost */ DRM_INFO("failed to retrieve link info, disabling eDP\n"); - intel_dp_encoder_destroy(&intel_encoder->base); - intel_dp_destroy(connector); + intel_dp_encoder_destroy(&intel_dp->base.base); + intel_dp_destroy(&intel_connector->base); return; } ironlake_edp_panel_vdd_on(intel_dp); edid = drm_get_edid(connector, &intel_dp->adapter); if (edid) { - if (drm_add_edid_modes(connector, edid)) { - drm_mode_connector_update_edid_property(connector, edid); - drm_edid_to_eld(connector, edid); - } else { - kfree(edid); - edid = ERR_PTR(-EINVAL); - } - } else { - edid = ERR_PTR(-ENOENT); - } - intel_connector->edid = edid; - - /* prefer fixed mode from EDID if available */ - list_for_each_entry(scan, &connector->probed_modes, head) { - if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { - fixed_mode = drm_mode_duplicate(dev, scan); - break; - } + drm_mode_connector_update_edid_property(connector, + edid); + intel_dp->edid_mode_count = + drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); + intel_dp->edid = edid; } - - /* fallback to VBT if available for eDP */ - if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) { - fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); - if (fixed_mode) - fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; - } - ironlake_edp_panel_vdd_off(intel_dp, false); } + intel_encoder->hot_plug = intel_dp_hot_plug; + if (is_edp(intel_dp)) { - intel_panel_init(&intel_connector->panel, fixed_mode); - intel_panel_setup_backlight(connector); + dev_priv->int_edp_connector = connector; + intel_panel_setup_backlight(dev); } intel_dp_add_properties(intel_dp, connector); @@ -2847,45 +2676,3 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } } - -void -intel_dp_init(struct drm_device *dev, int output_reg, enum port port) -{ - struct intel_digital_port *intel_dig_port; - struct intel_encoder *intel_encoder; - struct drm_encoder *encoder; - struct intel_connector *intel_connector; - - intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); - if (!intel_dig_port) - return; - - intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); - if (!intel_connector) { - kfree(intel_dig_port); - return; - } - - intel_encoder = &intel_dig_port->base; - encoder = &intel_encoder->base; - - drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, - DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); - - intel_encoder->enable = intel_enable_dp; - intel_encoder->pre_enable = intel_pre_enable_dp; - intel_encoder->disable = intel_disable_dp; - intel_encoder->post_disable = intel_post_disable_dp; - intel_encoder->get_hw_state = intel_dp_get_hw_state; - - intel_dig_port->port = port; - intel_dig_port->dp.output_reg = output_reg; - - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; - intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); - intel_encoder->cloneable = false; - intel_encoder->hot_plug = intel_dp_hot_plug; - - intel_dp_init_connector(intel_dig_port, intel_connector); -} diff --git a/trunk/drivers/gpu/drm/i915/intel_drv.h b/trunk/drivers/gpu/drm/i915/intel_drv.h index 522061ca0685..fe7142502f43 100644 --- a/trunk/drivers/gpu/drm/i915/intel_drv.h +++ b/trunk/drivers/gpu/drm/i915/intel_drv.h @@ -94,7 +94,6 @@ #define INTEL_OUTPUT_HDMI 6 #define INTEL_OUTPUT_DISPLAYPORT 7 #define INTEL_OUTPUT_EDP 8 -#define INTEL_OUTPUT_UNKNOWN 9 #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 @@ -164,11 +163,6 @@ struct intel_encoder { int crtc_mask; }; -struct intel_panel { - struct drm_display_mode *fixed_mode; - int fitting_mode; -}; - struct intel_connector { struct drm_connector base; /* @@ -185,19 +179,12 @@ struct intel_connector { /* Reads out the current hw, returning true if the connector is enabled * and active (i.e. dpms ON state). */ bool (*get_hw_state)(struct intel_connector *); - - /* Panel info for eDP and LVDS */ - struct intel_panel panel; - - /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ - struct edid *edid; }; struct intel_crtc { struct drm_crtc base; enum pipe pipe; enum plane plane; - enum transcoder cpu_transcoder; u8 lut_r[256], lut_g[256], lut_b[256]; /* * Whether the crtc and the connected output pipeline is active. Implies @@ -211,8 +198,6 @@ struct intel_crtc { struct intel_unpin_work *unpin_work; int fdi_lanes; - atomic_t unpin_work_count; - /* Display surface base address adjustement for pageflips. Note that on * gen4+ this only adjusts up to a tile, offsets within a tile are * handled in the hw itself (with the TILEOFF register). */ @@ -227,14 +212,12 @@ struct intel_crtc { /* We can share PLLs across outputs if the timings match */ struct intel_pch_pll *pch_pll; - uint32_t ddi_pll_sel; }; struct intel_plane { struct drm_plane base; enum pipe pipe; struct drm_i915_gem_object *obj; - bool can_scale; int max_downscale; u32 lut_r[1024], lut_g[1024], lut_b[1024]; void (*update_plane)(struct drm_plane *plane, @@ -334,8 +317,10 @@ struct dip_infoframe { } __attribute__((packed)); struct intel_hdmi { + struct intel_encoder base; u32 sdvox_reg; int ddc_bus; + int ddi_port; uint32_t color_range; bool has_hdmi_sink; bool has_audio; @@ -346,15 +331,18 @@ struct intel_hdmi { struct drm_display_mode *adjusted_mode); }; +#define DP_RECEIVER_CAP_SIZE 0xf #define DP_MAX_DOWNSTREAM_PORTS 0x10 #define DP_LINK_CONFIGURATION_SIZE 9 struct intel_dp { + struct intel_encoder base; uint32_t output_reg; uint32_t DP; uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; bool has_audio; enum hdmi_force_audio force_audio; + enum port port; uint32_t color_range; uint8_t link_bw; uint8_t lane_count; @@ -369,16 +357,11 @@ struct intel_dp { int panel_power_cycle_delay; int backlight_on_delay; int backlight_off_delay; + struct drm_display_mode *panel_fixed_mode; /* for eDP */ struct delayed_work panel_vdd_work; bool want_panel_vdd; - struct intel_connector *attached_connector; -}; - -struct intel_digital_port { - struct intel_encoder base; - enum port port; - struct intel_dp dp; - struct intel_hdmi hdmi; + struct edid *edid; /* cached EDID for eDP */ + int edid_mode_count; }; static inline struct drm_crtc * @@ -397,7 +380,7 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane) struct intel_unpin_work { struct work_struct work; - struct drm_crtc *crtc; + struct drm_device *dev; struct drm_i915_gem_object *old_fb_obj; struct drm_i915_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; @@ -412,8 +395,6 @@ struct intel_fbc_work { int interval; }; -int intel_pch_rawclk(struct drm_device *dev); - int intel_connector_update_modes(struct drm_connector *connector, struct edid *edid); int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); @@ -424,12 +405,7 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector) extern void intel_crt_init(struct drm_device *dev); extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port); -extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, - struct intel_connector *intel_connector); extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); -extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); @@ -442,27 +418,10 @@ extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj); extern bool intel_lvds_init(struct drm_device *dev); extern void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); -extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, - struct intel_connector *intel_connector); void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); -extern void intel_dp_init_link_config(struct intel_dp *intel_dp); -extern void intel_dp_start_link_train(struct intel_dp *intel_dp); -extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); -extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); -extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); -extern void intel_dp_check_link_status(struct intel_dp *intel_dp); -extern bool intel_dp_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); extern bool intel_dpd_is_edp(struct drm_device *dev); -extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp); -extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp); -extern void ironlake_edp_panel_on(struct intel_dp *intel_dp); -extern void ironlake_edp_panel_off(struct intel_dp *intel_dp); -extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); -extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); extern void intel_edp_link_config(struct intel_encoder *, int *, int *); extern int intel_edp_target_clock(struct intel_encoder *, struct drm_display_mode *mode); @@ -472,10 +431,6 @@ extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, enum plane plane); /* intel_panel.c */ -extern int intel_panel_init(struct intel_panel *panel, - struct drm_display_mode *fixed_mode); -extern void intel_panel_fini(struct intel_panel *panel); - extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); extern void intel_pch_panel_fitting(struct drm_device *dev, @@ -484,7 +439,7 @@ extern void intel_pch_panel_fitting(struct drm_device *dev, struct drm_display_mode *adjusted_mode); extern u32 intel_panel_get_max_backlight(struct drm_device *dev); extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); -extern int intel_panel_setup_backlight(struct drm_connector *connector); +extern int intel_panel_setup_backlight(struct drm_device *dev); extern void intel_panel_enable_backlight(struct drm_device *dev, enum pipe pipe); extern void intel_panel_disable_backlight(struct drm_device *dev); @@ -518,31 +473,6 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector return to_intel_connector(connector)->encoder; } -static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) -{ - struct intel_digital_port *intel_dig_port = - container_of(encoder, struct intel_digital_port, base.base); - return &intel_dig_port->dp; -} - -static inline struct intel_digital_port * -enc_to_dig_port(struct drm_encoder *encoder) -{ - return container_of(encoder, struct intel_digital_port, base.base); -} - -static inline struct intel_digital_port * -dp_to_dig_port(struct intel_dp *intel_dp) -{ - return container_of(intel_dp, struct intel_digital_port, dp); -} - -static inline struct intel_digital_port * -hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) -{ - return container_of(intel_hdmi, struct intel_digital_port, hdmi); -} - extern void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder); extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); @@ -551,9 +481,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern enum transcoder -intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe); extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); @@ -623,10 +550,6 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe, struct drm_display_mode *mode); -extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, - unsigned int bpp, - unsigned int pitch); - extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, @@ -650,22 +573,12 @@ extern void intel_disable_gt_powersave(struct drm_device *dev); extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); extern void ironlake_teardown_rc6(struct drm_device *dev); +extern void intel_enable_ddi(struct intel_encoder *encoder); +extern void intel_disable_ddi(struct intel_encoder *encoder); extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); -extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv); -extern void intel_ddi_pll_init(struct drm_device *dev); -extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc); -extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder); -extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); -extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); -extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev); -extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock); -extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc); -extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); -extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); -extern bool -intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); -extern void intel_ddi_fdi_disable(struct drm_crtc *crtc); +extern void intel_ddi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); #endif /* __INTEL_DRV_H__ */ diff --git a/trunk/drivers/gpu/drm/i915/intel_hdmi.c b/trunk/drivers/gpu/drm/i915/intel_hdmi.c index 5c279b48df97..9ba0aaed7ee8 100644 --- a/trunk/drivers/gpu/drm/i915/intel_hdmi.c +++ b/trunk/drivers/gpu/drm/i915/intel_hdmi.c @@ -36,15 +36,10 @@ #include #include "i915_drv.h" -static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi) -{ - return hdmi_to_dig_port(intel_hdmi)->base.base.dev; -} - static void assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) { - struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); + struct drm_device *dev = intel_hdmi->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t enabled_bits; @@ -56,14 +51,13 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) { - struct intel_digital_port *intel_dig_port = - container_of(encoder, struct intel_digital_port, base.base); - return &intel_dig_port->hdmi; + return container_of(encoder, struct intel_hdmi, base.base); } static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) { - return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); + return container_of(intel_attached_encoder(connector), + struct intel_hdmi, base); } void intel_dip_infoframe_csum(struct dip_infoframe *frame) @@ -760,16 +754,16 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector, return MODE_OK; } -bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { return true; } static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi) { - struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); + struct drm_device *dev = intel_hdmi->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t bit; @@ -792,9 +786,6 @@ static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector, bool force) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); - struct intel_digital_port *intel_dig_port = - hdmi_to_dig_port(intel_hdmi); - struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_i915_private *dev_priv = connector->dev->dev_private; struct edid *edid; enum drm_connector_status status = connector_status_disconnected; @@ -823,7 +814,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) intel_hdmi->has_audio = (intel_hdmi->force_audio == HDMI_AUDIO_ON); - intel_encoder->type = INTEL_OUTPUT_HDMI; } return status; @@ -869,12 +859,10 @@ intel_hdmi_set_property(struct drm_connector *connector, uint64_t val) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); - struct intel_digital_port *intel_dig_port = - hdmi_to_dig_port(intel_hdmi); struct drm_i915_private *dev_priv = connector->dev->dev_private; int ret; - ret = drm_object_property_set_value(&connector->base, property, val); + ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; @@ -910,8 +898,8 @@ intel_hdmi_set_property(struct drm_connector *connector, return -EINVAL; done: - if (intel_dig_port->base.base.crtc) { - struct drm_crtc *crtc = intel_dig_port->base.base.crtc; + if (intel_hdmi->base.base.crtc) { + struct drm_crtc *crtc = intel_hdmi->base.base.crtc; intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } @@ -926,6 +914,12 @@ static void intel_hdmi_destroy(struct drm_connector *connector) kfree(connector); } +static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = { + .mode_fixup = intel_hdmi_mode_fixup, + .mode_set = intel_ddi_mode_set, + .disable = intel_encoder_noop, +}; + static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { .mode_fixup = intel_hdmi_mode_fixup, .mode_set = intel_hdmi_mode_set, @@ -957,24 +951,43 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c intel_attach_broadcast_rgb_property(connector); } -void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, - struct intel_connector *intel_connector) +void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) { - struct drm_connector *connector = &intel_connector->base; - struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; - struct intel_encoder *intel_encoder = &intel_dig_port->base; - struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - enum port port = intel_dig_port->port; + struct drm_connector *connector; + struct intel_encoder *intel_encoder; + struct intel_connector *intel_connector; + struct intel_hdmi *intel_hdmi; + intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL); + if (!intel_hdmi) + return; + + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_hdmi); + return; + } + + intel_encoder = &intel_hdmi->base; + drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, + DRM_MODE_ENCODER_TMDS); + + connector = &intel_connector->base; drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); + intel_encoder->type = INTEL_OUTPUT_HDMI; + connector->polled = DRM_CONNECTOR_POLL_HPD; connector->interlace_allowed = 1; connector->doublescan_allowed = 0; + intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + + intel_encoder->cloneable = false; + intel_hdmi->ddi_port = port; switch (port) { case PORT_B: intel_hdmi->ddc_bus = GMBUS_PORT_DPB; @@ -994,6 +1007,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, BUG(); } + intel_hdmi->sdvox_reg = sdvox_reg; + if (!HAS_PCH_SPLIT(dev)) { intel_hdmi->write_infoframe = g4x_write_infoframe; intel_hdmi->set_infoframes = g4x_set_infoframes; @@ -1011,10 +1026,21 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_hdmi->set_infoframes = cpt_set_infoframes; } - if (IS_HASWELL(dev)) - intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; - else - intel_connector->get_hw_state = intel_connector_get_hw_state; + if (IS_HASWELL(dev)) { + intel_encoder->enable = intel_enable_ddi; + intel_encoder->disable = intel_disable_ddi; + intel_encoder->get_hw_state = intel_ddi_get_hw_state; + drm_encoder_helper_add(&intel_encoder->base, + &intel_hdmi_helper_funcs_hsw); + } else { + intel_encoder->enable = intel_enable_hdmi; + intel_encoder->disable = intel_disable_hdmi; + intel_encoder->get_hw_state = intel_hdmi_get_hw_state; + drm_encoder_helper_add(&intel_encoder->base, + &intel_hdmi_helper_funcs); + } + intel_connector->get_hw_state = intel_connector_get_hw_state; + intel_hdmi_add_properties(intel_hdmi, connector); @@ -1030,42 +1056,3 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } } - -void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) -{ - struct intel_digital_port *intel_dig_port; - struct intel_encoder *intel_encoder; - struct drm_encoder *encoder; - struct intel_connector *intel_connector; - - intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); - if (!intel_dig_port) - return; - - intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); - if (!intel_connector) { - kfree(intel_dig_port); - return; - } - - intel_encoder = &intel_dig_port->base; - encoder = &intel_encoder->base; - - drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, - DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); - - intel_encoder->enable = intel_enable_hdmi; - intel_encoder->disable = intel_disable_hdmi; - intel_encoder->get_hw_state = intel_hdmi_get_hw_state; - - intel_encoder->type = INTEL_OUTPUT_HDMI; - intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); - intel_encoder->cloneable = false; - - intel_dig_port->port = port; - intel_dig_port->hdmi.sdvox_reg = sdvox_reg; - intel_dig_port->dp.output_reg = 0; - - intel_hdmi_init_connector(intel_dig_port, intel_connector); -} diff --git a/trunk/drivers/gpu/drm/i915/intel_i2c.c b/trunk/drivers/gpu/drm/i915/intel_i2c.c index 3ef5af15b812..c2c6dbc0971c 100644 --- a/trunk/drivers/gpu/drm/i915/intel_i2c.c +++ b/trunk/drivers/gpu/drm/i915/intel_i2c.c @@ -432,7 +432,7 @@ gmbus_xfer(struct i2c_adapter *adapter, I915_WRITE(GMBUS0 + reg_offset, 0); /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ - bus->force_bit = 1; + bus->force_bit = true; ret = i2c_bit_algo.master_xfer(adapter, msgs, num); out: @@ -491,7 +491,7 @@ int intel_setup_gmbus(struct drm_device *dev) /* gmbus seems to be broken on i830 */ if (IS_I830(dev)) - bus->force_bit = 1; + bus->force_bit = true; intel_gpio_setup(bus, port); @@ -532,10 +532,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - bus->force_bit += force_bit ? 1 : -1; - DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n", - force_bit ? "en" : "dis", adapter->name, - bus->force_bit); + bus->force_bit = force_bit; } void intel_teardown_gmbus(struct drm_device *dev) diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c index 81502e8be26b..edba93b3474b 100644 --- a/trunk/drivers/gpu/drm/i915/intel_lvds.c +++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c @@ -40,30 +40,28 @@ #include /* Private structure for the integrated LVDS support */ -struct intel_lvds_connector { - struct intel_connector base; - - struct notifier_block lid_notifier; -}; - -struct intel_lvds_encoder { +struct intel_lvds { struct intel_encoder base; + struct edid *edid; + + int fitting_mode; u32 pfit_control; u32 pfit_pgm_ratios; bool pfit_dirty; - struct intel_lvds_connector *attached_connector; + struct drm_display_mode *fixed_mode; }; -static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder) +static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder) { - return container_of(encoder, struct intel_lvds_encoder, base.base); + return container_of(encoder, struct intel_lvds, base.base); } -static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector) +static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) { - return container_of(connector, struct intel_lvds_connector, base.base); + return container_of(intel_attached_encoder(connector), + struct intel_lvds, base); } static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, @@ -98,7 +96,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, static void intel_enable_lvds(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); + struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct drm_i915_private *dev_priv = dev->dev_private; u32 ctl_reg, lvds_reg, stat_reg; @@ -115,7 +113,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder) I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); - if (lvds_encoder->pfit_dirty) { + if (intel_lvds->pfit_dirty) { /* * Enable automatic panel scaling so that non-native modes * fill the screen. The panel fitter should only be @@ -123,12 +121,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder) * register description and PRM. */ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", - lvds_encoder->pfit_control, - lvds_encoder->pfit_pgm_ratios); + intel_lvds->pfit_control, + intel_lvds->pfit_pgm_ratios); - I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios); - I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control); - lvds_encoder->pfit_dirty = false; + I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); + I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); + intel_lvds->pfit_dirty = false; } I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); @@ -142,7 +140,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder) static void intel_disable_lvds(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); + struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); struct drm_i915_private *dev_priv = dev->dev_private; u32 ctl_reg, lvds_reg, stat_reg; @@ -162,9 +160,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder) if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) DRM_ERROR("timed out waiting for panel to power off\n"); - if (lvds_encoder->pfit_control) { + if (intel_lvds->pfit_control) { I915_WRITE(PFIT_CONTROL, 0); - lvds_encoder->pfit_dirty = true; + intel_lvds->pfit_dirty = true; } I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); @@ -174,8 +172,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder) static int intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_connector *intel_connector = to_intel_connector(connector); - struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; + struct intel_lvds *intel_lvds = intel_attached_lvds(connector); + struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode; if (mode->hdisplay > fixed_mode->hdisplay) return MODE_PANEL; @@ -251,10 +249,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); - struct intel_connector *intel_connector = - &lvds_encoder->attached_connector->base; - struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc; + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); + struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc; u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; int pipe; @@ -264,7 +260,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, return false; } - if (intel_encoder_check_is_cloned(&lvds_encoder->base)) + if (intel_encoder_check_is_cloned(&intel_lvds->base)) return false; /* @@ -273,12 +269,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ - intel_fixed_panel_mode(intel_connector->panel.fixed_mode, - adjusted_mode); + intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode); if (HAS_PCH_SPLIT(dev)) { - intel_pch_panel_fitting(dev, - intel_connector->panel.fitting_mode, + intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, mode, adjusted_mode); return true; } @@ -304,7 +298,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, drm_mode_set_crtcinfo(adjusted_mode, 0); - switch (intel_connector->panel.fitting_mode) { + switch (intel_lvds->fitting_mode) { case DRM_MODE_SCALE_CENTER: /* * For centered modes, we have to calculate border widths & @@ -402,11 +396,11 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) pfit_control |= PANEL_8TO6_DITHER_ENABLE; - if (pfit_control != lvds_encoder->pfit_control || - pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) { - lvds_encoder->pfit_control = pfit_control; - lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios; - lvds_encoder->pfit_dirty = true; + if (pfit_control != intel_lvds->pfit_control || + pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { + intel_lvds->pfit_control = pfit_control; + intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; + intel_lvds->pfit_dirty = true; } dev_priv->lvds_border_bits = border; @@ -455,15 +449,14 @@ intel_lvds_detect(struct drm_connector *connector, bool force) */ static int intel_lvds_get_modes(struct drm_connector *connector) { - struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector); + struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct drm_device *dev = connector->dev; struct drm_display_mode *mode; - /* use cached edid if we have one */ - if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) - return drm_add_edid_modes(connector, lvds_connector->base.edid); + if (intel_lvds->edid) + return drm_add_edid_modes(connector, intel_lvds->edid); - mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode); + mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); if (mode == NULL) return 0; @@ -503,11 +496,10 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = { static int intel_lid_notify(struct notifier_block *nb, unsigned long val, void *unused) { - struct intel_lvds_connector *lvds_connector = - container_of(nb, struct intel_lvds_connector, lid_notifier); - struct drm_connector *connector = &lvds_connector->base.base; - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = + container_of(nb, struct drm_i915_private, lid_notifier); + struct drm_device *dev = dev_priv->dev; + struct drm_connector *connector = dev_priv->int_lvds_connector; if (dev->switch_power_state != DRM_SWITCH_POWER_ON) return NOTIFY_OK; @@ -516,7 +508,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, * check and update the status of LVDS connector after receiving * the LID nofication event. */ - connector->status = connector->funcs->detect(connector, false); + if (connector) + connector->status = connector->funcs->detect(connector, + false); /* Don't force modeset on machines where it causes a GPU lockup */ if (dmi_check_system(intel_no_modeset_on_lid)) @@ -547,18 +541,13 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, */ static void intel_lvds_destroy(struct drm_connector *connector) { - struct intel_lvds_connector *lvds_connector = - to_lvds_connector(connector); - - if (lvds_connector->lid_notifier.notifier_call) - acpi_lid_notifier_unregister(&lvds_connector->lid_notifier); - - if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) - kfree(lvds_connector->base.edid); + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; - intel_panel_destroy_backlight(connector->dev); - intel_panel_fini(&lvds_connector->base.panel); + intel_panel_destroy_backlight(dev); + if (dev_priv->lid_notifier.notifier_call) + acpi_lid_notifier_unregister(&dev_priv->lid_notifier); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); @@ -568,24 +557,22 @@ static int intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { - struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct drm_device *dev = connector->dev; if (property == dev->mode_config.scaling_mode_property) { - struct drm_crtc *crtc; + struct drm_crtc *crtc = intel_lvds->base.base.crtc; if (value == DRM_MODE_SCALE_NONE) { DRM_DEBUG_KMS("no scaling not supported\n"); return -EINVAL; } - if (intel_connector->panel.fitting_mode == value) { + if (intel_lvds->fitting_mode == value) { /* the LVDS scaling property is not changed */ return 0; } - intel_connector->panel.fitting_mode = value; - - crtc = intel_attached_encoder(connector)->base.crtc; + intel_lvds->fitting_mode = value; if (crtc && crtc->enabled) { /* * If the CRTC is enabled, the display will be changed @@ -925,15 +912,12 @@ static bool intel_lvds_supported(struct drm_device *dev) bool intel_lvds_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_lvds_encoder *lvds_encoder; + struct intel_lvds *intel_lvds; struct intel_encoder *intel_encoder; - struct intel_lvds_connector *lvds_connector; struct intel_connector *intel_connector; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_display_mode *scan; /* *modes, *bios_mode; */ - struct drm_display_mode *fixed_mode = NULL; - struct edid *edid; struct drm_crtc *crtc; u32 lvds; int pipe; @@ -961,25 +945,23 @@ bool intel_lvds_init(struct drm_device *dev) } } - lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL); - if (!lvds_encoder) + intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); + if (!intel_lvds) { return false; + } - lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL); - if (!lvds_connector) { - kfree(lvds_encoder); + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_lvds); return false; } - lvds_encoder->attached_connector = lvds_connector; - if (!HAS_PCH_SPLIT(dev)) { - lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL); + intel_lvds->pfit_control = I915_READ(PFIT_CONTROL); } - intel_encoder = &lvds_encoder->base; + intel_encoder = &intel_lvds->base; encoder = &intel_encoder->base; - intel_connector = &lvds_connector->base; connector = &intel_connector->base; drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); @@ -1011,10 +993,14 @@ bool intel_lvds_init(struct drm_device *dev) /* create the scaling mode property */ drm_mode_create_scaling_mode_property(dev); - drm_object_attach_property(&connector->base, + /* + * the initial panel fitting mode will be FULL_SCREEN. + */ + + drm_connector_attach_property(&intel_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_ASPECT); - intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; + intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT; /* * LVDS discovery: * 1) check for EDID on DDC @@ -1029,21 +1015,20 @@ bool intel_lvds_init(struct drm_device *dev) * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ - edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin)); - if (edid) { - if (drm_add_edid_modes(connector, edid)) { + intel_lvds->edid = drm_get_edid(connector, + intel_gmbus_get_adapter(dev_priv, + pin)); + if (intel_lvds->edid) { + if (drm_add_edid_modes(connector, + intel_lvds->edid)) { drm_mode_connector_update_edid_property(connector, - edid); + intel_lvds->edid); } else { - kfree(edid); - edid = ERR_PTR(-EINVAL); + kfree(intel_lvds->edid); + intel_lvds->edid = NULL; } - } else { - edid = ERR_PTR(-ENOENT); } - lvds_connector->base.edid = edid; - - if (IS_ERR_OR_NULL(edid)) { + if (!intel_lvds->edid) { /* Didn't get an EDID, so * Set wide sync ranges so we get all modes * handed to valid_mode for checking @@ -1056,26 +1041,22 @@ bool intel_lvds_init(struct drm_device *dev) list_for_each_entry(scan, &connector->probed_modes, head) { if (scan->type & DRM_MODE_TYPE_PREFERRED) { - DRM_DEBUG_KMS("using preferred mode from EDID: "); - drm_mode_debug_printmodeline(scan); - - fixed_mode = drm_mode_duplicate(dev, scan); - if (fixed_mode) { - intel_find_lvds_downclock(dev, fixed_mode, - connector); - goto out; - } + intel_lvds->fixed_mode = + drm_mode_duplicate(dev, scan); + intel_find_lvds_downclock(dev, + intel_lvds->fixed_mode, + connector); + goto out; } } /* Failed to get EDID, what about VBT? */ if (dev_priv->lfp_lvds_vbt_mode) { - DRM_DEBUG_KMS("using mode from VBT: "); - drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode); - - fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); - if (fixed_mode) { - fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + intel_lvds->fixed_mode = + drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); + if (intel_lvds->fixed_mode) { + intel_lvds->fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; goto out; } } @@ -1095,17 +1076,16 @@ bool intel_lvds_init(struct drm_device *dev) crtc = intel_get_crtc_for_pipe(dev, pipe); if (crtc && (lvds & LVDS_PORT_EN)) { - fixed_mode = intel_crtc_mode_get(dev, crtc); - if (fixed_mode) { - DRM_DEBUG_KMS("using current (BIOS) mode: "); - drm_mode_debug_printmodeline(fixed_mode); - fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc); + if (intel_lvds->fixed_mode) { + intel_lvds->fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; goto out; } } /* If we still don't have a mode after all that, give up. */ - if (!fixed_mode) + if (!intel_lvds->fixed_mode) goto failed; out: @@ -1120,15 +1100,16 @@ bool intel_lvds_init(struct drm_device *dev) I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); } - lvds_connector->lid_notifier.notifier_call = intel_lid_notify; - if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { + dev_priv->lid_notifier.notifier_call = intel_lid_notify; + if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { DRM_DEBUG_KMS("lid notifier registration failed\n"); - lvds_connector->lid_notifier.notifier_call = NULL; + dev_priv->lid_notifier.notifier_call = NULL; } + /* keep the LVDS connector */ + dev_priv->int_lvds_connector = connector; drm_sysfs_connector_add(connector); - intel_panel_init(&intel_connector->panel, fixed_mode); - intel_panel_setup_backlight(connector); + intel_panel_setup_backlight(dev); return true; @@ -1136,9 +1117,7 @@ bool intel_lvds_init(struct drm_device *dev) DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); drm_connector_cleanup(connector); drm_encoder_cleanup(encoder); - if (fixed_mode) - drm_mode_destroy(dev, fixed_mode); - kfree(lvds_encoder); - kfree(lvds_connector); + kfree(intel_lvds); + kfree(intel_connector); return false; } diff --git a/trunk/drivers/gpu/drm/i915/intel_modes.c b/trunk/drivers/gpu/drm/i915/intel_modes.c index b00f1c83adce..cabd84bf66eb 100644 --- a/trunk/drivers/gpu/drm/i915/intel_modes.c +++ b/trunk/drivers/gpu/drm/i915/intel_modes.c @@ -45,6 +45,7 @@ int intel_connector_update_modes(struct drm_connector *connector, drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); drm_edid_to_eld(connector, edid); + kfree(edid); return ret; } @@ -60,16 +61,12 @@ int intel_ddc_get_modes(struct drm_connector *connector, struct i2c_adapter *adapter) { struct edid *edid; - int ret; edid = drm_get_edid(connector, adapter); if (!edid) return 0; - ret = intel_connector_update_modes(connector, edid); - kfree(edid); - - return ret; + return intel_connector_update_modes(connector, edid); } static const struct drm_prop_enum_list force_audio_names[] = { @@ -97,7 +94,7 @@ intel_attach_force_audio_property(struct drm_connector *connector) dev_priv->force_audio_property = prop; } - drm_object_attach_property(&connector->base, prop, 0); + drm_connector_attach_property(connector, prop, 0); } static const struct drm_prop_enum_list broadcast_rgb_names[] = { @@ -124,5 +121,5 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector) dev_priv->broadcast_rgb_property = prop; } - drm_object_attach_property(&connector->base, prop, 0); + drm_connector_attach_property(connector, prop, 0); } diff --git a/trunk/drivers/gpu/drm/i915/intel_opregion.c b/trunk/drivers/gpu/drm/i915/intel_opregion.c index 7741c22c934c..5530413213d8 100644 --- a/trunk/drivers/gpu/drm/i915/intel_opregion.c +++ b/trunk/drivers/gpu/drm/i915/intel_opregion.c @@ -154,8 +154,6 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) struct opregion_asle __iomem *asle = dev_priv->opregion.asle; u32 max; - DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); - if (!(bclp & ASLE_BCLP_VALID)) return ASLE_BACKLIGHT_FAILED; diff --git a/trunk/drivers/gpu/drm/i915/intel_panel.c b/trunk/drivers/gpu/drm/i915/intel_panel.c index c758ad277473..e2aacd329545 100644 --- a/trunk/drivers/gpu/drm/i915/intel_panel.c +++ b/trunk/drivers/gpu/drm/i915/intel_panel.c @@ -138,24 +138,24 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) if (HAS_PCH_SPLIT(dev_priv->dev)) { val = I915_READ(BLC_PWM_PCH_CTL2); - if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) { - dev_priv->regfile.saveBLC_PWM_CTL2 = val; + if (dev_priv->saveBLC_PWM_CTL2 == 0) { + dev_priv->saveBLC_PWM_CTL2 = val; } else if (val == 0) { I915_WRITE(BLC_PWM_PCH_CTL2, - dev_priv->regfile.saveBLC_PWM_CTL2); - val = dev_priv->regfile.saveBLC_PWM_CTL2; + dev_priv->saveBLC_PWM_CTL2); + val = dev_priv->saveBLC_PWM_CTL2; } } else { val = I915_READ(BLC_PWM_CTL); - if (dev_priv->regfile.saveBLC_PWM_CTL == 0) { - dev_priv->regfile.saveBLC_PWM_CTL = val; - dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); + if (dev_priv->saveBLC_PWM_CTL == 0) { + dev_priv->saveBLC_PWM_CTL = val; + dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); } else if (val == 0) { I915_WRITE(BLC_PWM_CTL, - dev_priv->regfile.saveBLC_PWM_CTL); + dev_priv->saveBLC_PWM_CTL); I915_WRITE(BLC_PWM_CTL2, - dev_priv->regfile.saveBLC_PWM_CTL2); - val = dev_priv->regfile.saveBLC_PWM_CTL; + dev_priv->saveBLC_PWM_CTL2); + val = dev_priv->saveBLC_PWM_CTL; } } @@ -275,7 +275,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level } tmp = I915_READ(BLC_PWM_CTL); - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_INFO(dev)->gen < 4) level <<= 1; tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; I915_WRITE(BLC_PWM_CTL, tmp | level); @@ -374,23 +374,26 @@ static void intel_panel_init_backlight(struct drm_device *dev) enum drm_connector_status intel_panel_detect(struct drm_device *dev) { +#if 0 struct drm_i915_private *dev_priv = dev->dev_private; +#endif + if (i915_panel_ignore_lid) + return i915_panel_ignore_lid > 0 ? + connector_status_connected : + connector_status_disconnected; + + /* opregion lid state on HP 2540p is wrong at boot up, + * appears to be either the BIOS or Linux ACPI fault */ +#if 0 /* Assume that the BIOS does not lie through the OpRegion... */ - if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) { + if (dev_priv->opregion.lid_state) return ioread32(dev_priv->opregion.lid_state) & 0x1 ? connector_status_connected : connector_status_disconnected; - } +#endif - switch (i915_panel_ignore_lid) { - case -2: - return connector_status_connected; - case -1: - return connector_status_disconnected; - default: - return connector_status_unknown; - } + return connector_status_unknown; } #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE @@ -413,14 +416,21 @@ static const struct backlight_ops intel_panel_bl_ops = { .get_brightness = intel_panel_get_brightness, }; -int intel_panel_setup_backlight(struct drm_connector *connector) +int intel_panel_setup_backlight(struct drm_device *dev) { - struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct backlight_properties props; + struct drm_connector *connector; intel_panel_init_backlight(dev); + if (dev_priv->int_lvds_connector) + connector = dev_priv->int_lvds_connector; + else if (dev_priv->int_edp_connector) + connector = dev_priv->int_edp_connector; + else + return -ENODEV; + memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_RAW; props.max_brightness = _intel_panel_get_max_backlight(dev); @@ -450,9 +460,9 @@ void intel_panel_destroy_backlight(struct drm_device *dev) backlight_device_unregister(dev_priv->backlight); } #else -int intel_panel_setup_backlight(struct drm_connector *connector) +int intel_panel_setup_backlight(struct drm_device *dev) { - intel_panel_init_backlight(connector->dev); + intel_panel_init_backlight(dev); return 0; } @@ -461,20 +471,3 @@ void intel_panel_destroy_backlight(struct drm_device *dev) return; } #endif - -int intel_panel_init(struct intel_panel *panel, - struct drm_display_mode *fixed_mode) -{ - panel->fixed_mode = fixed_mode; - - return 0; -} - -void intel_panel_fini(struct intel_panel *panel) -{ - struct intel_connector *intel_connector = - container_of(panel, struct intel_connector, panel); - - if (panel->fixed_mode) - drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); -} diff --git a/trunk/drivers/gpu/drm/i915/intel_pm.c b/trunk/drivers/gpu/drm/i915/intel_pm.c index 9e619ada0567..442968f8b201 100644 --- a/trunk/drivers/gpu/drm/i915/intel_pm.c +++ b/trunk/drivers/gpu/drm/i915/intel_pm.c @@ -1468,12 +1468,9 @@ static void i9xx_update_wm(struct drm_device *dev) fifo_size = dev_priv->display.get_fifo_size(dev, 0); crtc = intel_get_crtc_for_plane(dev, 0); if (crtc->enabled && crtc->fb) { - int cpp = crtc->fb->bits_per_pixel / 8; - if (IS_GEN2(dev)) - cpp = 4; - planea_wm = intel_calculate_wm(crtc->mode.clock, - wm_info, fifo_size, cpp, + wm_info, fifo_size, + crtc->fb->bits_per_pixel / 8, latency_ns); enabled = crtc; } else @@ -1482,12 +1479,9 @@ static void i9xx_update_wm(struct drm_device *dev) fifo_size = dev_priv->display.get_fifo_size(dev, 1); crtc = intel_get_crtc_for_plane(dev, 1); if (crtc->enabled && crtc->fb) { - int cpp = crtc->fb->bits_per_pixel / 8; - if (IS_GEN2(dev)) - cpp = 4; - planeb_wm = intel_calculate_wm(crtc->mode.clock, - wm_info, fifo_size, cpp, + wm_info, fifo_size, + crtc->fb->bits_per_pixel / 8, latency_ns); if (enabled == NULL) enabled = crtc; @@ -1577,7 +1571,8 @@ static void i830_update_wm(struct drm_device *dev) planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, dev_priv->display.get_fifo_size(dev, 0), - 4, latency_ns); + crtc->fb->bits_per_pixel / 8, + latency_ns); fwater_lo = I915_READ(FW_BLC) & ~0xfff; fwater_lo |= (3<<8) | planea_wm; @@ -2328,7 +2323,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val) struct drm_i915_private *dev_priv = dev->dev_private; u32 limits = gen6_rps_limits(dev_priv, &val); - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(val > dev_priv->rps.max_delay); WARN_ON(val < dev_priv->rps.min_delay); @@ -2403,12 +2398,12 @@ static void gen6_enable_rps(struct drm_device *dev) struct intel_ring_buffer *ring; u32 rp_state_cap; u32 gt_perf_status; - u32 rc6vids, pcu_mbox, rc6_mask = 0; + u32 pcu_mbox, rc6_mask = 0; u32 gtfifodbg; int rc6_mode; - int i, ret; + int i; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); /* Here begins a magic sequence of register writes to enable * auto-downclocking. @@ -2502,16 +2497,30 @@ static void gen6_enable_rps(struct drm_device *dev) GEN6_RP_UP_BUSY_AVG | (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT)); - ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); - if (!ret) { - pcu_mbox = 0; - ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); - if (ret && pcu_mbox & (1<<31)) { /* OC supported */ - dev_priv->rps.max_delay = pcu_mbox & 0xff; - DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); - } - } else { - DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); + + I915_WRITE(GEN6_PCODE_DATA, 0); + I915_WRITE(GEN6_PCODE_MAILBOX, + GEN6_PCODE_READY | + GEN6_PCODE_WRITE_MIN_FREQ_TABLE); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); + + /* Check for overclock support */ + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); + pcu_mbox = I915_READ(GEN6_PCODE_DATA); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); + if (pcu_mbox & (1<<31)) { /* OC supported */ + dev_priv->rps.max_delay = pcu_mbox & 0xff; + DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); } gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); @@ -2525,20 +2534,6 @@ static void gen6_enable_rps(struct drm_device *dev) /* enable all PM interrupts */ I915_WRITE(GEN6_PMINTRMSK, 0); - rc6vids = 0; - ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); - if (IS_GEN6(dev) && ret) { - DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); - } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { - DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", - GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); - rc6vids &= 0xffff00; - rc6vids |= GEN6_ENCODE_RC6_VID(450); - ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); - if (ret) - DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); - } - gen6_gt_force_wake_put(dev_priv); } @@ -2546,11 +2541,10 @@ static void gen6_update_ring_freq(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int min_freq = 15; - int gpu_freq; - unsigned int ia_freq, max_ia_freq; + int gpu_freq, ia_freq, max_ia_freq; int scaling_factor = 180; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); max_ia_freq = cpufreq_quick_get_max(0); /* @@ -2581,11 +2575,17 @@ static void gen6_update_ring_freq(struct drm_device *dev) else ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); - ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT; - sandybridge_pcode_write(dev_priv, - GEN6_PCODE_WRITE_MIN_FREQ_TABLE, - ia_freq | gpu_freq); + I915_WRITE(GEN6_PCODE_DATA, + (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | + gpu_freq); + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | + GEN6_PCODE_WRITE_MIN_FREQ_TABLE); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & + GEN6_PCODE_READY) == 0, 10)) { + DRM_ERROR("pcode write of freq table timed out\n"); + continue; + } } } @@ -2593,16 +2593,16 @@ void ironlake_teardown_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->ips.renderctx) { - i915_gem_object_unpin(dev_priv->ips.renderctx); - drm_gem_object_unreference(&dev_priv->ips.renderctx->base); - dev_priv->ips.renderctx = NULL; + if (dev_priv->renderctx) { + i915_gem_object_unpin(dev_priv->renderctx); + drm_gem_object_unreference(&dev_priv->renderctx->base); + dev_priv->renderctx = NULL; } - if (dev_priv->ips.pwrctx) { - i915_gem_object_unpin(dev_priv->ips.pwrctx); - drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); - dev_priv->ips.pwrctx = NULL; + if (dev_priv->pwrctx) { + i915_gem_object_unpin(dev_priv->pwrctx); + drm_gem_object_unreference(&dev_priv->pwrctx->base); + dev_priv->pwrctx = NULL; } } @@ -2628,14 +2628,14 @@ static int ironlake_setup_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->ips.renderctx == NULL) - dev_priv->ips.renderctx = intel_alloc_context_page(dev); - if (!dev_priv->ips.renderctx) + if (dev_priv->renderctx == NULL) + dev_priv->renderctx = intel_alloc_context_page(dev); + if (!dev_priv->renderctx) return -ENOMEM; - if (dev_priv->ips.pwrctx == NULL) - dev_priv->ips.pwrctx = intel_alloc_context_page(dev); - if (!dev_priv->ips.pwrctx) { + if (dev_priv->pwrctx == NULL) + dev_priv->pwrctx = intel_alloc_context_page(dev); + if (!dev_priv->pwrctx) { ironlake_teardown_rc6(dev); return -ENOMEM; } @@ -2673,7 +2673,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); intel_ring_emit(ring, MI_SET_CONTEXT); - intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset | + intel_ring_emit(ring, dev_priv->renderctx->gtt_offset | MI_MM_SPACE_GTT | MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN | @@ -2695,7 +2695,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) return; } - I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN); + I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); } @@ -3304,72 +3304,37 @@ static void intel_init_emon(struct drm_device *dev) void intel_disable_gt_powersave(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_IRONLAKE_M(dev)) { ironlake_disable_drps(dev); ironlake_disable_rc6(dev); } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) { - cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); - mutex_lock(&dev_priv->rps.hw_lock); gen6_disable_rps(dev); - mutex_unlock(&dev_priv->rps.hw_lock); } } -static void intel_gen6_powersave_work(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, - rps.delayed_resume_work.work); - struct drm_device *dev = dev_priv->dev; - - mutex_lock(&dev_priv->rps.hw_lock); - gen6_enable_rps(dev); - gen6_update_ring_freq(dev); - mutex_unlock(&dev_priv->rps.hw_lock); -} - void intel_enable_gt_powersave(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_IRONLAKE_M(dev)) { ironlake_enable_drps(dev); ironlake_enable_rc6(dev); intel_init_emon(dev); } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { - /* - * PCU communication is slow and this doesn't need to be - * done at any specific time, so do this out of our fast path - * to make resume and init faster. - */ - schedule_delayed_work(&dev_priv->rps.delayed_resume_work, - round_jiffies_up_relative(HZ)); + gen6_enable_rps(dev); + gen6_update_ring_freq(dev); } } -static void ibx_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - /* - * On Ibex Peak and Cougar Point, we need to disable clock - * gating for the panel power sequencer or it will fail to - * start up when no ports are active. - */ - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); -} - static void ironlake_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; /* Required for FBC */ - dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | - ILK_DPFCUNIT_CLOCK_GATE_DISABLE | - ILK_DPFDUNIT_CLOCK_GATE_ENABLE; + dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | + DPFCRUNIT_CLOCK_GATE_DISABLE | + DPFDUNIT_CLOCK_GATE_DISABLE; + /* Required for CxSR */ + dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; I915_WRITE(PCH_3DCGDIS0, MARIUNIT_CLOCK_GATE_DISABLE | @@ -3377,6 +3342,8 @@ static void ironlake_init_clock_gating(struct drm_device *dev) I915_WRITE(PCH_3DCGDIS1, VFMUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + /* * According to the spec the following bits should be set in * order to enable memory self-refresh @@ -3387,7 +3354,9 @@ static void ironlake_init_clock_gating(struct drm_device *dev) I915_WRITE(ILK_DISPLAY_CHICKEN2, (I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); - dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; + I915_WRITE(ILK_DSPCLK_GATE, + (I915_READ(ILK_DSPCLK_GATE) | + ILK_DPARB_CLK_GATE)); I915_WRITE(DISP_ARB_CTL, (I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS)); @@ -3409,51 +3378,28 @@ static void ironlake_init_clock_gating(struct drm_device *dev) I915_WRITE(ILK_DISPLAY_CHICKEN2, I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE); + I915_WRITE(ILK_DSPCLK_GATE, + I915_READ(ILK_DSPCLK_GATE) | + ILK_DPFC_DIS1 | + ILK_DPFC_DIS2 | + ILK_CLK_FBC); } - I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); - I915_WRITE(ILK_DISPLAY_CHICKEN2, I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_ELPIN_409_SELECT); I915_WRITE(_3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED << 16 | _3D_CHICKEN2_WM_READ_PIPELINED); - - /* WaDisableRenderCachePipelinedFlush */ - I915_WRITE(CACHE_MODE_0, - _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); - - ibx_init_clock_gating(dev); -} - -static void cpt_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; - - /* - * On Ibex Peak and Cougar Point, we need to disable clock - * gating for the panel power sequencer or it will fail to - * start up when no ports are active. - */ - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); - I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | - DPLS_EDP_PPS_FIX_DIS); - /* WADP0ClockGatingDisable */ - for_each_pipe(pipe) { - I915_WRITE(TRANS_CHICKEN1(pipe), - TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); - } } static void gen6_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); I915_WRITE(ILK_DISPLAY_CHICKEN2, I915_READ(ILK_DISPLAY_CHICKEN2) | @@ -3508,12 +3454,11 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_WRITE(ILK_DISPLAY_CHICKEN2, I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE | ILK_VSDPFD_FULL); - I915_WRITE(ILK_DSPCLK_GATE_D, - I915_READ(ILK_DSPCLK_GATE_D) | - ILK_DPARBUNIT_CLOCK_GATE_ENABLE | - ILK_DPFDUNIT_CLOCK_GATE_ENABLE); + I915_WRITE(ILK_DSPCLK_GATE, + I915_READ(ILK_DSPCLK_GATE) | + ILK_DPARB_CLK_GATE | + ILK_DPFD_CLK_GATE); - /* WaMbcDriverBootEnable */ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | GEN6_MBCTL_ENABLE_BOOT_FETCH); @@ -3528,8 +3473,6 @@ static void gen6_init_clock_gating(struct drm_device *dev) * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff)); I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); - - cpt_init_clock_gating(dev); } static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) @@ -3544,24 +3487,13 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) I915_WRITE(GEN7_FF_THREAD_MODE, reg); } -static void lpt_init_clock_gating(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - /* - * TODO: this bit should only be enabled when really needed, then - * disabled when not needed anymore in order to save power. - */ - if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) - I915_WRITE(SOUTH_DSPCLK_GATE_D, - I915_READ(SOUTH_DSPCLK_GATE_D) | - PCH_LP_PARTITION_LEVEL_DISABLE); -} - static void haswell_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); @@ -3572,6 +3504,12 @@ static void haswell_init_clock_gating(struct drm_device *dev) */ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); + + I915_WRITE(IVB_CHICKEN3, + CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | + CHICKEN3_DGMG_DONE_FIX_DISABLE); + /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); @@ -3600,10 +3538,6 @@ static void haswell_init_clock_gating(struct drm_device *dev) I915_WRITE(CACHE_MODE_1, _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); - /* WaMbcDriverBootEnable */ - I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | - GEN6_MBCTL_ENABLE_BOOT_FETCH); - /* XXX: This is a workaround for early silicon revisions and should be * removed later. */ @@ -3613,38 +3547,27 @@ static void haswell_init_clock_gating(struct drm_device *dev) WM_DBG_DISALLOW_SPRITE | WM_DBG_DISALLOW_MAXFIFO); - lpt_init_clock_gating(dev); } static void ivybridge_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; uint32_t snpcr; + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); - I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); - /* WaDisableEarlyCull */ - I915_WRITE(_3D_CHICKEN3, - _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); - - /* WaDisableBackToBackFlipFix */ I915_WRITE(IVB_CHICKEN3, CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | CHICKEN3_DGMG_DONE_FIX_DISABLE); - /* WaDisablePSDDualDispatchEnable */ - if (IS_IVB_GT1(dev)) - I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, - _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); - else - I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2, - _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); - /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); @@ -3653,18 +3576,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, - GEN7_WA_L3_CHICKEN_MODE); - if (IS_IVB_GT1(dev)) - I915_WRITE(GEN7_ROW_CHICKEN2, - _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); - else - I915_WRITE(GEN7_ROW_CHICKEN2_GT2, - _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); - - - /* WaForceL3Serialization */ - I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & - ~L3SQ_URB_READ_CAM_MATCH_DISABLE); + GEN7_WA_L3_CHICKEN_MODE); /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock * gating disable must be set. Failure to set it results in @@ -3695,7 +3607,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) intel_flush_display_plane(dev_priv, pipe); } - /* WaMbcDriverBootEnable */ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | GEN6_MBCTL_ENABLE_BOOT_FETCH); @@ -3709,59 +3620,39 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) snpcr &= ~GEN6_MBC_SNPCR_MASK; snpcr |= GEN6_MBC_SNPCR_MED; I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); - - cpt_init_clock_gating(dev); } static void valleyview_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int pipe; + uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; + + I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); - I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); - - /* WaDisableEarlyCull */ - I915_WRITE(_3D_CHICKEN3, - _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); + I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); - /* WaDisableBackToBackFlipFix */ I915_WRITE(IVB_CHICKEN3, CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | CHICKEN3_DGMG_DONE_FIX_DISABLE); - I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, - _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); - /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ - I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS); + I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); - /* WaForceL3Serialization */ - I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & - ~L3SQ_URB_READ_CAM_MATCH_DISABLE); - - /* WaDisableDopClockGating */ - I915_WRITE(GEN7_ROW_CHICKEN2, - _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); - - /* WaForceL3Serialization */ - I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & - ~L3SQ_URB_READ_CAM_MATCH_DISABLE); - /* This is required by WaCatErrorRejectionIssue */ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); - /* WaMbcDriverBootEnable */ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | GEN6_MBCTL_ENABLE_BOOT_FETCH); @@ -3813,13 +3704,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev) PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN | SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN | PLANEA_FLIPDONE_INT_EN); - - /* - * WaDisableVLVClockGating_VBIIssue - * Disable clock gating on th GCFG unit to prevent a delay - * in the reporting of vblank events. - */ - I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); } static void g4x_init_clock_gating(struct drm_device *dev) @@ -3838,10 +3722,6 @@ static void g4x_init_clock_gating(struct drm_device *dev) if (IS_GM45(dev)) dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; I915_WRITE(DSPCLK_GATE_D, dspclk_gate); - - /* WaDisableRenderCachePipelinedFlush */ - I915_WRITE(CACHE_MODE_0, - _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); } static void crestline_init_clock_gating(struct drm_device *dev) @@ -3897,11 +3777,44 @@ static void i830_init_clock_gating(struct drm_device *dev) I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); } +static void ibx_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* + * On Ibex Peak and Cougar Point, we need to disable clock + * gating for the panel power sequencer or it will fail to + * start up when no ports are active. + */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); +} + +static void cpt_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + + /* + * On Ibex Peak and Cougar Point, we need to disable clock + * gating for the panel power sequencer or it will fail to + * start up when no ports are active. + */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | + DPLS_EDP_PPS_FIX_DIS); + /* Without this, mode sets may fail silently on FDI */ + for_each_pipe(pipe) + I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); +} + void intel_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->display.init_clock_gating(dev); + + if (dev_priv->display.init_pch_clock_gating) + dev_priv->display.init_pch_clock_gating(dev); } /* Starting with Haswell, we have different power wells for @@ -3927,7 +3840,7 @@ void intel_init_power_wells(struct drm_device *dev) if ((well & HSW_PWR_WELL_STATE) == 0) { I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE); - if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20)) + if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20)) DRM_ERROR("Error enabling power well %lx\n", power_wells[i]); } } @@ -3965,6 +3878,11 @@ void intel_init_pm(struct drm_device *dev) /* For FIFO watermark updates */ if (HAS_PCH_SPLIT(dev)) { + if (HAS_PCH_IBX(dev)) + dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; + else if (HAS_PCH_CPT(dev)) + dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; + if (IS_GEN5(dev)) { if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) dev_priv->display.update_wm = ironlake_update_wm; @@ -4075,12 +3993,6 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) DRM_ERROR("GT thread status wait timed out\n"); } -static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE, 0); - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ -} - static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { u32 forcewake_ack; @@ -4094,7 +4006,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL); + I915_WRITE_NOTRACE(FORCEWAKE, 1); POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), @@ -4104,12 +4016,6 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) __gen6_gt_wait_for_thread_c0(dev_priv); } -static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ -} - static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) { u32 forcewake_ack; @@ -4123,7 +4029,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), @@ -4167,7 +4073,7 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } @@ -4205,18 +4111,13 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) return ret; } -static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); -} - static void vlv_force_wake_get(struct drm_i915_private *dev_priv) { if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1)); if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), FORCEWAKE_ACK_TIMEOUT_MS)) @@ -4227,89 +4128,49 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv) static void vlv_force_wake_put(struct drm_i915_private *dev_priv) { - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1)); /* The below doubles as a POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } -void intel_gt_reset(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_VALLEYVIEW(dev)) { - vlv_force_wake_reset(dev_priv); - } else if (INTEL_INFO(dev)->gen >= 6) { - __gen6_gt_force_wake_reset(dev_priv); - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) - __gen6_gt_force_wake_mt_reset(dev_priv); - } -} - void intel_gt_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; spin_lock_init(&dev_priv->gt_lock); - intel_gt_reset(dev); - if (IS_VALLEYVIEW(dev)) { dev_priv->gt.force_wake_get = vlv_force_wake_get; dev_priv->gt.force_wake_put = vlv_force_wake_put; - } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { - dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; - dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; - } else if (IS_GEN6(dev)) { + } else if (INTEL_INFO(dev)->gen >= 6) { dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; - } - INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, - intel_gen6_powersave_work); -} - -int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) -{ - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { - DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); - return -EAGAIN; - } - - I915_WRITE(GEN6_PCODE_DATA, *val); - I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); - - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) { - DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); - return -ETIMEDOUT; + /* IVB configs may use multi-threaded forcewake */ + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { + u32 ecobus; + + /* A small trick here - if the bios hasn't configured + * MT forcewake, and if the device is in RC6, then + * force_wake_mt_get will not wake the device and the + * ECOBUS read will return zero. Which will be + * (correctly) interpreted by the test below as MT + * forcewake being disabled. + */ + mutex_lock(&dev->struct_mutex); + __gen6_gt_force_wake_mt_get(dev_priv); + ecobus = I915_READ_NOTRACE(ECOBUS); + __gen6_gt_force_wake_mt_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + + if (ecobus & FORCEWAKE_MT_ENABLE) { + DRM_DEBUG_KMS("Using MT version of forcewake\n"); + dev_priv->gt.force_wake_get = + __gen6_gt_force_wake_mt_get; + dev_priv->gt.force_wake_put = + __gen6_gt_force_wake_mt_put; + } + } } - - *val = I915_READ(GEN6_PCODE_DATA); - I915_WRITE(GEN6_PCODE_DATA, 0); - - return 0; } -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) -{ - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - - if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { - DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); - return -EAGAIN; - } - - I915_WRITE(GEN6_PCODE_DATA, val); - I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); - - if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, - 500)) { - DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); - return -ETIMEDOUT; - } - - I915_WRITE(GEN6_PCODE_DATA, 0); - - return 0; -} diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c index 987eb5fdaf39..ecbc5c5dbbbc 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -245,7 +245,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, /* * TLB invalidate requires a post-sync write. */ - flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; + flags |= PIPE_CONTROL_QW_WRITE; } ret = intel_ring_begin(ring, 4); @@ -558,9 +558,12 @@ update_mboxes(struct intel_ring_buffer *ring, u32 seqno, u32 mmio_offset) { - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, mmio_offset); + intel_ring_emit(ring, MI_SEMAPHORE_MBOX | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_REGISTER | + MI_SEMAPHORE_UPDATE); intel_ring_emit(ring, seqno); + intel_ring_emit(ring, mmio_offset); } /** @@ -961,9 +964,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) } static int -i965_dispatch_execbuffer(struct intel_ring_buffer *ring, - u32 offset, u32 length, - unsigned flags) +i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) { int ret; @@ -974,7 +975,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT | - (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); + MI_BATCH_NON_SECURE_I965); intel_ring_emit(ring, offset); intel_ring_advance(ring); @@ -983,8 +984,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, static int i830_dispatch_execbuffer(struct intel_ring_buffer *ring, - u32 offset, u32 len, - unsigned flags) + u32 offset, u32 len) { int ret; @@ -993,7 +993,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, return ret; intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); intel_ring_emit(ring, offset + len - 8); intel_ring_emit(ring, 0); intel_ring_advance(ring); @@ -1003,8 +1003,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, static int i915_dispatch_execbuffer(struct intel_ring_buffer *ring, - u32 offset, u32 len, - unsigned flags) + u32 offset, u32 len) { int ret; @@ -1013,7 +1012,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring, return ret; intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); - intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); intel_ring_advance(ring); return 0; @@ -1076,29 +1075,6 @@ static int init_status_page(struct intel_ring_buffer *ring) return ret; } -static int init_phys_hws_pga(struct intel_ring_buffer *ring) -{ - struct drm_i915_private *dev_priv = ring->dev->dev_private; - u32 addr; - - if (!dev_priv->status_page_dmah) { - dev_priv->status_page_dmah = - drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); - if (!dev_priv->status_page_dmah) - return -ENOMEM; - } - - addr = dev_priv->status_page_dmah->busaddr; - if (INTEL_INFO(ring->dev)->gen >= 4) - addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; - I915_WRITE(HWS_PGA, addr); - - ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; - memset(ring->status_page.page_addr, 0, PAGE_SIZE); - - return 0; -} - static int intel_init_ring_buffer(struct drm_device *dev, struct intel_ring_buffer *ring) { @@ -1117,11 +1093,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, ret = init_status_page(ring); if (ret) return ret; - } else { - BUG_ON(ring->id != RCS); - ret = init_phys_hws_pga(ring); - if (ret) - return ret; } obj = i915_gem_alloc_object(dev, ring->size); @@ -1420,48 +1391,19 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, return ret; cmd = MI_FLUSH_DW; - /* - * Bspec vol 1c.5 - video engine command streamer: - * "If ENABLED, all TLBs will be invalidated once the flush - * operation is complete. This bit is only valid when the - * Post-Sync Operation field is a value of 1h or 3h." - */ if (invalidate & I915_GEM_GPU_DOMAINS) - cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | - MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; intel_ring_emit(ring, cmd); - intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); + intel_ring_emit(ring, 0); intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); return 0; } -static int -hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, - u32 offset, u32 len, - unsigned flags) -{ - int ret; - - ret = intel_ring_begin(ring, 2); - if (ret) - return ret; - - intel_ring_emit(ring, - MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | - (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); - /* bit0-7 is the length on GEN6+ */ - intel_ring_emit(ring, offset); - intel_ring_advance(ring); - - return 0; -} - static int gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, - u32 offset, u32 len, - unsigned flags) + u32 offset, u32 len) { int ret; @@ -1469,9 +1411,7 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, if (ret) return ret; - intel_ring_emit(ring, - MI_BATCH_BUFFER_START | - (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); /* bit0-7 is the length on GEN6+ */ intel_ring_emit(ring, offset); intel_ring_advance(ring); @@ -1492,17 +1432,10 @@ static int blt_ring_flush(struct intel_ring_buffer *ring, return ret; cmd = MI_FLUSH_DW; - /* - * Bspec vol 1c.3 - blitter engine command streamer: - * "If ENABLED, all TLBs will be invalidated once the flush - * operation is complete. This bit is only valid when the - * Post-Sync Operation field is a value of 1h or 3h." - */ if (invalidate & I915_GEM_DOMAIN_RENDER) - cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | - MI_FLUSH_DW_OP_STOREDW; + cmd |= MI_INVALIDATE_TLB; intel_ring_emit(ring, cmd); - intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); + intel_ring_emit(ring, 0); intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); @@ -1557,9 +1490,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->irq_enable_mask = I915_USER_INTERRUPT; } ring->write_tail = ring_write_tail; - if (IS_HASWELL(dev)) - ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; - else if (INTEL_INFO(dev)->gen >= 6) + if (INTEL_INFO(dev)->gen >= 6) ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; else if (INTEL_INFO(dev)->gen >= 4) ring->dispatch_execbuffer = i965_dispatch_execbuffer; @@ -1570,6 +1501,12 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->init = init_render_ring; ring->cleanup = render_ring_cleanup; + + if (!I915_NEED_GFX_HWS(dev)) { + ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; + memset(ring->status_page.page_addr, 0, PAGE_SIZE); + } + return intel_init_ring_buffer(dev, ring); } @@ -1577,7 +1514,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; - int ret; ring->name = "render ring"; ring->id = RCS; @@ -1615,13 +1551,16 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ring->init = init_render_ring; ring->cleanup = render_ring_cleanup; + if (!I915_NEED_GFX_HWS(dev)) + ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; + ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); ring->size = size; ring->effective_size = ring->size; - if (IS_I830(ring->dev) || IS_845G(ring->dev)) + if (IS_I830(ring->dev)) ring->effective_size -= 128; ring->virtual_start = ioremap_wc(start, size); @@ -1631,12 +1570,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) return -ENOMEM; } - if (!I915_NEED_GFX_HWS(dev)) { - ret = init_phys_hws_pga(ring); - if (ret) - return ret; - } - return 0; } @@ -1685,6 +1618,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) } ring->init = init_ring_common; + return intel_init_ring_buffer(dev, ring); } diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h index 5af65b89765f..2ea7a311a1f0 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -81,9 +81,7 @@ struct intel_ring_buffer { u32 (*get_seqno)(struct intel_ring_buffer *ring, bool lazy_coherency); int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, - u32 offset, u32 length, - unsigned flags); -#define I915_DISPATCH_SECURE 0x1 + u32 offset, u32 length); void (*cleanup)(struct intel_ring_buffer *ring); int (*sync_to)(struct intel_ring_buffer *ring, struct intel_ring_buffer *to, @@ -183,8 +181,6 @@ intel_read_status_page(struct intel_ring_buffer *ring, * The area from dword 0x20 to 0x3ff is available for driver usage. */ #define I915_GEM_HWS_INDEX 0x20 -#define I915_GEM_HWS_SCRATCH_INDEX 0x30 -#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo.c b/trunk/drivers/gpu/drm/i915/intel_sdvo.c index 4b07401540ef..a6ac0b416964 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sdvo.c +++ b/trunk/drivers/gpu/drm/i915/intel_sdvo.c @@ -1228,30 +1228,6 @@ static void intel_disable_sdvo(struct intel_encoder *encoder) temp = I915_READ(intel_sdvo->sdvo_reg); if ((temp & SDVO_ENABLE) != 0) { - /* HW workaround for IBX, we need to move the port to - * transcoder A before disabling it. */ - if (HAS_PCH_IBX(encoder->base.dev)) { - struct drm_crtc *crtc = encoder->base.crtc; - int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; - - if (temp & SDVO_PIPE_B_SELECT) { - temp &= ~SDVO_PIPE_B_SELECT; - I915_WRITE(intel_sdvo->sdvo_reg, temp); - POSTING_READ(intel_sdvo->sdvo_reg); - - /* Again we need to write this twice. */ - I915_WRITE(intel_sdvo->sdvo_reg, temp); - POSTING_READ(intel_sdvo->sdvo_reg); - - /* Transcoder selection bits only update - * effectively on vblank. */ - if (crtc) - intel_wait_for_vblank(encoder->base.dev, pipe); - else - msleep(50); - } - } - intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); } } @@ -1268,20 +1244,8 @@ static void intel_enable_sdvo(struct intel_encoder *encoder) u8 status; temp = I915_READ(intel_sdvo->sdvo_reg); - if ((temp & SDVO_ENABLE) == 0) { - /* HW workaround for IBX, we need to move the port - * to transcoder A before disabling it. */ - if (HAS_PCH_IBX(dev)) { - struct drm_crtc *crtc = encoder->base.crtc; - int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; - - /* Restore the transcoder select bit. */ - if (pipe == PIPE_B) - temp |= SDVO_PIPE_B_SELECT; - } - + if ((temp & SDVO_ENABLE) == 0) intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); - } for (i = 0; i < 2; i++) intel_wait_for_vblank(dev, intel_crtc->pipe); @@ -1832,7 +1796,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector) intel_sdvo_destroy_enhance_property(connector); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); - kfree(intel_sdvo_connector); + kfree(connector); } static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) @@ -1864,7 +1828,7 @@ intel_sdvo_set_property(struct drm_connector *connector, uint8_t cmd; int ret; - ret = drm_object_property_set_value(&connector->base, property, val); + ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; @@ -1919,7 +1883,7 @@ intel_sdvo_set_property(struct drm_connector *connector, } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { temp_value = val; if (intel_sdvo_connector->left == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, intel_sdvo_connector->right, val); if (intel_sdvo_connector->left_margin == temp_value) return 0; @@ -1931,7 +1895,7 @@ intel_sdvo_set_property(struct drm_connector *connector, cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (intel_sdvo_connector->right == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, intel_sdvo_connector->left, val); if (intel_sdvo_connector->right_margin == temp_value) return 0; @@ -1943,7 +1907,7 @@ intel_sdvo_set_property(struct drm_connector *connector, cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (intel_sdvo_connector->top == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, intel_sdvo_connector->bottom, val); if (intel_sdvo_connector->top_margin == temp_value) return 0; @@ -1955,7 +1919,7 @@ intel_sdvo_set_property(struct drm_connector *connector, cmd = SDVO_CMD_SET_OVERSCAN_V; goto set_value; } else if (intel_sdvo_connector->bottom == property) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, intel_sdvo_connector->top, val); if (intel_sdvo_connector->bottom_margin == temp_value) return 0; @@ -2108,24 +2072,17 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, else mapping = &dev_priv->sdvo_mappings[1]; - if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin)) + pin = GMBUS_PORT_DPB; + if (mapping->initialized) pin = mapping->i2c_pin; - else - pin = GMBUS_PORT_DPB; - - sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); - /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow - * our code totally fails once we start using gmbus. Hence fall back to - * bit banging for now. */ - intel_gmbus_force_bit(sdvo->i2c, true); -} - -/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */ -static void -intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo) -{ - intel_gmbus_force_bit(sdvo->i2c, false); + if (intel_gmbus_is_port_valid(pin)) { + sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); + intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ); + intel_gmbus_force_bit(sdvo->i2c, true); + } else { + sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); + } } static bool @@ -2470,7 +2427,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; - drm_object_attach_property(&intel_sdvo_connector->base.base.base, + drm_connector_attach_property(&intel_sdvo_connector->base.base, intel_sdvo_connector->tv_format, 0); return true; @@ -2486,7 +2443,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, intel_sdvo_connector->name = \ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ if (!intel_sdvo_connector->name) return false; \ - drm_object_attach_property(&connector->base, \ + drm_connector_attach_property(connector, \ intel_sdvo_connector->name, \ intel_sdvo_connector->cur_##name); \ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ @@ -2523,7 +2480,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, if (!intel_sdvo_connector->left) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, intel_sdvo_connector->left, intel_sdvo_connector->left_margin); @@ -2532,7 +2489,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, if (!intel_sdvo_connector->right) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, intel_sdvo_connector->right, intel_sdvo_connector->right_margin); DRM_DEBUG_KMS("h_overscan: max %d, " @@ -2560,7 +2517,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, if (!intel_sdvo_connector->top) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, intel_sdvo_connector->top, intel_sdvo_connector->top_margin); @@ -2570,7 +2527,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, if (!intel_sdvo_connector->bottom) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, intel_sdvo_connector->bottom, intel_sdvo_connector->bottom_margin); DRM_DEBUG_KMS("v_overscan: max %d, " @@ -2602,7 +2559,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, if (!intel_sdvo_connector->dot_crawl) return false; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, intel_sdvo_connector->dot_crawl, intel_sdvo_connector->cur_dot_crawl); DRM_DEBUG_KMS("dot crawl: current %d\n", response); @@ -2706,8 +2663,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) intel_sdvo->is_sdvob = is_sdvob; intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); - if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) - goto err_i2c_bus; + if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { + kfree(intel_sdvo); + return false; + } /* encoder type will be decided later */ intel_encoder = &intel_sdvo->base; @@ -2806,8 +2765,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) err: drm_encoder_cleanup(&intel_encoder->base); i2c_del_adapter(&intel_sdvo->ddc); -err_i2c_bus: - intel_sdvo_unselect_i2c_bus(intel_sdvo); kfree(intel_sdvo); return false; diff --git a/trunk/drivers/gpu/drm/i915/intel_sprite.c b/trunk/drivers/gpu/drm/i915/intel_sprite.c index 827dcd4edf1c..82f5e5c7009d 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sprite.c +++ b/trunk/drivers/gpu/drm/i915/intel_sprite.c @@ -48,8 +48,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, struct intel_plane *intel_plane = to_intel_plane(plane); int pipe = intel_plane->pipe; u32 sprctl, sprscale = 0; - unsigned long sprsurf_offset, linear_offset; - int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + int pixel_size; sprctl = I915_READ(SPRCTL(pipe)); @@ -62,24 +61,33 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; + pixel_size = 4; break; case DRM_FORMAT_XRGB8888: sprctl |= SPRITE_FORMAT_RGBX888; + pixel_size = 4; break; case DRM_FORMAT_YUYV: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV; + pixel_size = 2; break; case DRM_FORMAT_YVYU: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU; + pixel_size = 2; break; case DRM_FORMAT_UYVY: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY; + pixel_size = 2; break; case DRM_FORMAT_VYUY: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY; + pixel_size = 2; break; default: - BUG(); + DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); + sprctl |= SPRITE_FORMAT_RGBX888; + pixel_size = 4; + break; } if (obj->tiling_mode != I915_TILING_NONE) @@ -119,28 +127,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); - - linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); - sprsurf_offset = - intel_gen4_compute_offset_xtiled(&x, &y, - fb->bits_per_pixel / 8, - fb->pitches[0]); - linear_offset -= sprsurf_offset; - - /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET - * register */ - if (IS_HASWELL(dev)) - I915_WRITE(SPROFFSET(pipe), (y << 16) | x); - else if (obj->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) { I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); - else - I915_WRITE(SPRLINOFF(pipe), linear_offset); + } else { + unsigned long offset; + offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); + I915_WRITE(SPRLINOFF(pipe), offset); + } I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); - if (intel_plane->can_scale) - I915_WRITE(SPRSCALE(pipe), sprscale); + I915_WRITE(SPRSCALE(pipe), sprscale); I915_WRITE(SPRCTL(pipe), sprctl); - I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); + I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset); POSTING_READ(SPRSURF(pipe)); } @@ -154,8 +152,7 @@ ivb_disable_plane(struct drm_plane *plane) I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); /* Can't leave the scaler enabled... */ - if (intel_plane->can_scale) - I915_WRITE(SPRSCALE(pipe), 0); + I915_WRITE(SPRSCALE(pipe), 0); /* Activate double buffered register update */ I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); POSTING_READ(SPRSURF(pipe)); @@ -228,10 +225,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); - int pipe = intel_plane->pipe; - unsigned long dvssurf_offset, linear_offset; + int pipe = intel_plane->pipe, pixel_size; u32 dvscntr, dvsscale; - int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); dvscntr = I915_READ(DVSCNTR(pipe)); @@ -244,24 +239,33 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR; + pixel_size = 4; break; case DRM_FORMAT_XRGB8888: dvscntr |= DVS_FORMAT_RGBX888; + pixel_size = 4; break; case DRM_FORMAT_YUYV: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV; + pixel_size = 2; break; case DRM_FORMAT_YVYU: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU; + pixel_size = 2; break; case DRM_FORMAT_UYVY: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY; + pixel_size = 2; break; case DRM_FORMAT_VYUY: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY; + pixel_size = 2; break; default: - BUG(); + DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); + dvscntr |= DVS_FORMAT_RGBX888; + pixel_size = 4; + break; } if (obj->tiling_mode != I915_TILING_NONE) @@ -285,23 +289,18 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); - - linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); - dvssurf_offset = - intel_gen4_compute_offset_xtiled(&x, &y, - fb->bits_per_pixel / 8, - fb->pitches[0]); - linear_offset -= dvssurf_offset; - - if (obj->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) { I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); - else - I915_WRITE(DVSLINOFF(pipe), linear_offset); + } else { + unsigned long offset; + offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); + I915_WRITE(DVSLINOFF(pipe), offset); + } I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); I915_WRITE(DVSSCALE(pipe), dvsscale); I915_WRITE(DVSCNTR(pipe), dvscntr); - I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset); + I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset); POSTING_READ(DVSSURF(pipe)); } @@ -423,8 +422,6 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj, *old_obj; int pipe = intel_plane->pipe; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); int ret = 0; int x = src_x >> 16, y = src_y >> 16; int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay; @@ -439,7 +436,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, src_h = src_h >> 16; /* Pipe must be running... */ - if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) + if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE)) return -EINVAL; if (crtc_x >= primary_w || crtc_y >= primary_h) @@ -449,15 +446,6 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (intel_plane->pipe != intel_crtc->pipe) return -EINVAL; - /* Sprite planes can be linear or x-tiled surfaces */ - switch (obj->tiling_mode) { - case I915_TILING_NONE: - case I915_TILING_X: - break; - default: - return -EINVAL; - } - /* * Clamp the width & height into the visible area. Note we don't * try to scale the source if part of the visible region is offscreen. @@ -484,12 +472,6 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (!crtc_w || !crtc_h) /* Again, nothing to display */ goto out; - /* - * We may not have a scaler, eg. HSW does not have it any more - */ - if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h)) - return -EINVAL; - /* * We can take a larger source and scale it down, but * only so much... 16x is the max on SNB. @@ -683,7 +665,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe) switch (INTEL_INFO(dev)->gen) { case 5: case 6: - intel_plane->can_scale = true; intel_plane->max_downscale = 16; intel_plane->update_plane = ilk_update_plane; intel_plane->disable_plane = ilk_disable_plane; @@ -700,10 +681,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe) break; case 7: - if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev)) - intel_plane->can_scale = false; - else - intel_plane->can_scale = true; intel_plane->max_downscale = 2; intel_plane->update_plane = ivb_update_plane; intel_plane->disable_plane = ivb_disable_plane; diff --git a/trunk/drivers/gpu/drm/i915/intel_tv.c b/trunk/drivers/gpu/drm/i915/intel_tv.c index ea93520c1278..62bb048c135e 100644 --- a/trunk/drivers/gpu/drm/i915/intel_tv.c +++ b/trunk/drivers/gpu/drm/i915/intel_tv.c @@ -1088,11 +1088,13 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, int dspcntr_reg = DSPCNTR(intel_crtc->plane); int pipeconf = I915_READ(pipeconf_reg); int dspcntr = I915_READ(dspcntr_reg); + int dspbase_reg = DSPADDR(intel_crtc->plane); int xpos = 0x0, ypos = 0x0; unsigned int xsize, ysize; /* Pipe must be off here */ I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); - intel_flush_display_plane(dev_priv, intel_crtc->plane); + /* Flush the plane changes */ + I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); /* Wait for vblank for the disable to take effect */ if (IS_GEN2(dev)) @@ -1121,7 +1123,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, I915_WRITE(pipeconf_reg, pipeconf); I915_WRITE(dspcntr_reg, dspcntr); - intel_flush_display_plane(dev_priv, intel_crtc->plane); + /* Flush the plane changes */ + I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); } j = 0; @@ -1289,7 +1292,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector) } intel_tv->tv_format = tv_mode->name; - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, connector->dev->mode_config.tv_mode_property, i); } @@ -1443,7 +1446,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop int ret = 0; bool changed = false; - ret = drm_object_property_set_value(&connector->base, property, val); + ret = drm_connector_property_set_value(connector, property, val); if (ret < 0) goto out; @@ -1655,18 +1658,18 @@ intel_tv_init(struct drm_device *dev) ARRAY_SIZE(tv_modes), tv_format_names); - drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property, + drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, initial_mode); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.tv_left_margin_property, intel_tv->margin[TV_MARGIN_LEFT]); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.tv_top_margin_property, intel_tv->margin[TV_MARGIN_TOP]); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.tv_right_margin_property, intel_tv->margin[TV_MARGIN_RIGHT]); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.tv_bottom_margin_property, intel_tv->margin[TV_MARGIN_BOTTOM]); drm_sysfs_connector_add(connector); diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_main.c b/trunk/drivers/gpu/drm/mgag200/mgag200_main.c index 70dd3c5529d4..d6a1aae33701 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_main.c @@ -133,8 +133,6 @@ static int mga_vram_init(struct mga_device *mdev) { void __iomem *mem; struct apertures_struct *aper = alloc_apertures(1); - if (!aper) - return -ENOMEM; /* BAR 0 is VRAM */ mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0); @@ -142,9 +140,9 @@ static int mga_vram_init(struct mga_device *mdev) aper->ranges[0].base = mdev->mc.vram_base; aper->ranges[0].size = mdev->mc.vram_window; + aper->count = 1; remove_conflicting_framebuffers(aper, "mgafb", true); - kfree(aper); if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window, "mgadrmfb_vram")) { diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_ttm.c b/trunk/drivers/gpu/drm/mgag200/mgag200_ttm.c index 8fc9d9201945..1504699666c4 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -186,11 +186,11 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r static int mgag200_bo_move(struct ttm_buffer_object *bo, bool evict, bool interruptible, - bool no_wait_gpu, + bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { int r; - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); return r; } @@ -355,7 +355,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align, ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size, ttm_bo_type_device, &mgabo->placement, - align >> PAGE_SHIFT, false, NULL, acc_size, + align >> PAGE_SHIFT, 0, false, NULL, acc_size, NULL, mgag200_bo_ttm_destroy); if (ret) return ret; @@ -382,7 +382,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr) mgag200_ttm_placement(bo, pl_flag); for (i = 0; i < bo->placement.num_placement; i++) bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); if (ret) return ret; @@ -405,7 +405,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo) for (i = 0; i < bo->placement.num_placement ; i++) bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); if (ret) return ret; @@ -430,7 +430,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo) for (i = 0; i < bo->placement.num_placement ; i++) bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); if (ret) { DRM_ERROR("pushing to VRAM failed\n"); return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/Makefile b/trunk/drivers/gpu/drm/nouveau/Makefile index ab25752a0b1e..a990df4d6c04 100644 --- a/trunk/drivers/gpu/drm/nouveau/Makefile +++ b/trunk/drivers/gpu/drm/nouveau/Makefile @@ -11,7 +11,6 @@ nouveau-y := core/core/client.o nouveau-y += core/core/engctx.o nouveau-y += core/core/engine.o nouveau-y += core/core/enum.o -nouveau-y += core/core/falcon.o nouveau-y += core/core/gpuobj.o nouveau-y += core/core/handle.o nouveau-y += core/core/mm.o @@ -30,7 +29,6 @@ nouveau-y += core/subdev/bios/base.o nouveau-y += core/subdev/bios/bit.o nouveau-y += core/subdev/bios/conn.o nouveau-y += core/subdev/bios/dcb.o -nouveau-y += core/subdev/bios/disp.o nouveau-y += core/subdev/bios/dp.o nouveau-y += core/subdev/bios/extdev.o nouveau-y += core/subdev/bios/gpio.o @@ -66,19 +64,9 @@ nouveau-y += core/subdev/devinit/nv50.o nouveau-y += core/subdev/fb/base.o nouveau-y += core/subdev/fb/nv04.o nouveau-y += core/subdev/fb/nv10.o -nouveau-y += core/subdev/fb/nv1a.o nouveau-y += core/subdev/fb/nv20.o -nouveau-y += core/subdev/fb/nv25.o nouveau-y += core/subdev/fb/nv30.o -nouveau-y += core/subdev/fb/nv35.o -nouveau-y += core/subdev/fb/nv36.o nouveau-y += core/subdev/fb/nv40.o -nouveau-y += core/subdev/fb/nv41.o -nouveau-y += core/subdev/fb/nv44.o -nouveau-y += core/subdev/fb/nv46.o -nouveau-y += core/subdev/fb/nv47.o -nouveau-y += core/subdev/fb/nv49.o -nouveau-y += core/subdev/fb/nv4e.o nouveau-y += core/subdev/fb/nv50.o nouveau-y += core/subdev/fb/nvc0.o nouveau-y += core/subdev/gpio/base.o @@ -123,10 +111,7 @@ nouveau-y += core/engine/dmaobj/base.o nouveau-y += core/engine/dmaobj/nv04.o nouveau-y += core/engine/dmaobj/nv50.o nouveau-y += core/engine/dmaobj/nvc0.o -nouveau-y += core/engine/dmaobj/nvd0.o nouveau-y += core/engine/bsp/nv84.o -nouveau-y += core/engine/bsp/nvc0.o -nouveau-y += core/engine/bsp/nve0.o nouveau-y += core/engine/copy/nva3.o nouveau-y += core/engine/copy/nvc0.o nouveau-y += core/engine/copy/nve0.o @@ -134,21 +119,7 @@ nouveau-y += core/engine/crypt/nv84.o nouveau-y += core/engine/crypt/nv98.o nouveau-y += core/engine/disp/nv04.o nouveau-y += core/engine/disp/nv50.o -nouveau-y += core/engine/disp/nv84.o -nouveau-y += core/engine/disp/nv94.o -nouveau-y += core/engine/disp/nva0.o -nouveau-y += core/engine/disp/nva3.o nouveau-y += core/engine/disp/nvd0.o -nouveau-y += core/engine/disp/nve0.o -nouveau-y += core/engine/disp/dacnv50.o -nouveau-y += core/engine/disp/hdanva3.o -nouveau-y += core/engine/disp/hdanvd0.o -nouveau-y += core/engine/disp/hdminv84.o -nouveau-y += core/engine/disp/hdminva3.o -nouveau-y += core/engine/disp/hdminvd0.o -nouveau-y += core/engine/disp/sornv50.o -nouveau-y += core/engine/disp/sornv94.o -nouveau-y += core/engine/disp/sornvd0.o nouveau-y += core/engine/disp/vga.o nouveau-y += core/engine/fifo/base.o nouveau-y += core/engine/fifo/nv04.o @@ -180,14 +151,11 @@ nouveau-y += core/engine/mpeg/nv40.o nouveau-y += core/engine/mpeg/nv50.o nouveau-y += core/engine/mpeg/nv84.o nouveau-y += core/engine/ppp/nv98.o -nouveau-y += core/engine/ppp/nvc0.o nouveau-y += core/engine/software/nv04.o nouveau-y += core/engine/software/nv10.o nouveau-y += core/engine/software/nv50.o nouveau-y += core/engine/software/nvc0.o nouveau-y += core/engine/vp/nv84.o -nouveau-y += core/engine/vp/nvc0.o -nouveau-y += core/engine/vp/nve0.o # drm/core nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o @@ -198,7 +166,7 @@ nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o # drm/kms nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o -nouveau-y += nouveau_connector.o nouveau_dp.o +nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o # drm/kms/nv04:nv50 @@ -207,7 +175,9 @@ nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o # drm/kms/nv50- -nouveau-y += nv50_display.o +nouveau-y += nv50_display.o nvd0_display.o +nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o +nouveau-y += nv50_evo.o # drm/pm nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o diff --git a/trunk/drivers/gpu/drm/nouveau/core/core/engctx.c b/trunk/drivers/gpu/drm/nouveau/core/core/engctx.c index 84c71fad2b6c..e41b10d5eb59 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/core/engctx.c +++ b/trunk/drivers/gpu/drm/nouveau/core/core/engctx.c @@ -189,21 +189,6 @@ nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend) return nouveau_gpuobj_fini(&engctx->base, suspend); } -int -_nouveau_engctx_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nouveau_engctx *engctx; - int ret; - - ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256, - NVOBJ_FLAG_ZERO_ALLOC, &engctx); - *pobject = nv_object(engctx); - return ret; -} - void _nouveau_engctx_dtor(struct nouveau_object *object) { diff --git a/trunk/drivers/gpu/drm/nouveau/core/core/falcon.c b/trunk/drivers/gpu/drm/nouveau/core/core/falcon.c deleted file mode 100644 index 6b0843c33877..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/core/falcon.c +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include - -#include - -u32 -_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) -{ - struct nouveau_falcon *falcon = (void *)object; - return nv_rd32(falcon, falcon->addr + addr); -} - -void -_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data) -{ - struct nouveau_falcon *falcon = (void *)object; - nv_wr32(falcon, falcon->addr + addr, data); -} - -int -_nouveau_falcon_init(struct nouveau_object *object) -{ - struct nouveau_device *device = nv_device(object); - struct nouveau_falcon *falcon = (void *)object; - const struct firmware *fw; - char name[32] = "internal"; - int ret, i; - u32 caps; - - /* enable engine, and determine its capabilities */ - ret = nouveau_engine_init(&falcon->base); - if (ret) - return ret; - - if (device->chipset < 0xa3 || - device->chipset == 0xaa || device->chipset == 0xac) { - falcon->version = 0; - falcon->secret = (falcon->addr == 0x087000) ? 1 : 0; - } else { - caps = nv_ro32(falcon, 0x12c); - falcon->version = (caps & 0x0000000f); - falcon->secret = (caps & 0x00000030) >> 4; - } - - caps = nv_ro32(falcon, 0x108); - falcon->code.limit = (caps & 0x000001ff) << 8; - falcon->data.limit = (caps & 0x0003fe00) >> 1; - - nv_debug(falcon, "falcon version: %d\n", falcon->version); - nv_debug(falcon, "secret level: %d\n", falcon->secret); - nv_debug(falcon, "code limit: %d\n", falcon->code.limit); - nv_debug(falcon, "data limit: %d\n", falcon->data.limit); - - /* wait for 'uc halted' to be signalled before continuing */ - if (falcon->secret) { - nv_wait(falcon, 0x008, 0x00000010, 0x00000010); - nv_wo32(falcon, 0x004, 0x00000010); - } - - /* disable all interrupts */ - nv_wo32(falcon, 0x014, 0xffffffff); - - /* no default ucode provided by the engine implementation, try and - * locate a "self-bootstrapping" firmware image for the engine - */ - if (!falcon->code.data) { - snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x", - device->chipset, falcon->addr >> 12); - - ret = request_firmware(&fw, name, &device->pdev->dev); - if (ret == 0) { - falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL); - falcon->code.size = fw->size; - falcon->data.data = NULL; - falcon->data.size = 0; - release_firmware(fw); - } - - falcon->external = true; - } - - /* next step is to try and load "static code/data segment" firmware - * images for the engine - */ - if (!falcon->code.data) { - snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd", - device->chipset, falcon->addr >> 12); - - ret = request_firmware(&fw, name, &device->pdev->dev); - if (ret) { - nv_error(falcon, "unable to load firmware data\n"); - return ret; - } - - falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL); - falcon->data.size = fw->size; - release_firmware(fw); - if (!falcon->data.data) - return -ENOMEM; - - snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc", - device->chipset, falcon->addr >> 12); - - ret = request_firmware(&fw, name, &device->pdev->dev); - if (ret) { - nv_error(falcon, "unable to load firmware code\n"); - return ret; - } - - falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL); - falcon->code.size = fw->size; - release_firmware(fw); - if (!falcon->code.data) - return -ENOMEM; - } - - nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ? - "static code/data segments" : "self-bootstrapping"); - - /* ensure any "self-bootstrapping" firmware image is in vram */ - if (!falcon->data.data && !falcon->core) { - ret = nouveau_gpuobj_new(object->parent, NULL, - falcon->code.size, 256, 0, - &falcon->core); - if (ret) { - nv_error(falcon, "core allocation failed, %d\n", ret); - return ret; - } - - for (i = 0; i < falcon->code.size; i += 4) - nv_wo32(falcon->core, i, falcon->code.data[i / 4]); - } - - /* upload firmware bootloader (or the full code segments) */ - if (falcon->core) { - if (device->card_type < NV_C0) - nv_wo32(falcon, 0x618, 0x04000000); - else - nv_wo32(falcon, 0x618, 0x00000114); - nv_wo32(falcon, 0x11c, 0); - nv_wo32(falcon, 0x110, falcon->core->addr >> 8); - nv_wo32(falcon, 0x114, 0); - nv_wo32(falcon, 0x118, 0x00006610); - } else { - if (falcon->code.size > falcon->code.limit || - falcon->data.size > falcon->data.limit) { - nv_error(falcon, "ucode exceeds falcon limit(s)\n"); - return -EINVAL; - } - - if (falcon->version < 3) { - nv_wo32(falcon, 0xff8, 0x00100000); - for (i = 0; i < falcon->code.size / 4; i++) - nv_wo32(falcon, 0xff4, falcon->code.data[i]); - } else { - nv_wo32(falcon, 0x180, 0x01000000); - for (i = 0; i < falcon->code.size / 4; i++) { - if ((i & 0x3f) == 0) - nv_wo32(falcon, 0x188, i >> 6); - nv_wo32(falcon, 0x184, falcon->code.data[i]); - } - } - } - - /* upload data segment (if necessary), zeroing the remainder */ - if (falcon->version < 3) { - nv_wo32(falcon, 0xff8, 0x00000000); - for (i = 0; !falcon->core && i < falcon->data.size / 4; i++) - nv_wo32(falcon, 0xff4, falcon->data.data[i]); - for (; i < falcon->data.limit; i += 4) - nv_wo32(falcon, 0xff4, 0x00000000); - } else { - nv_wo32(falcon, 0x1c0, 0x01000000); - for (i = 0; !falcon->core && i < falcon->data.size / 4; i++) - nv_wo32(falcon, 0x1c4, falcon->data.data[i]); - for (; i < falcon->data.limit / 4; i++) - nv_wo32(falcon, 0x1c4, 0x00000000); - } - - /* start it running */ - nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */ - nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */ - nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */ - nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */ - return 0; -} - -int -_nouveau_falcon_fini(struct nouveau_object *object, bool suspend) -{ - struct nouveau_falcon *falcon = (void *)object; - - if (!suspend) { - nouveau_gpuobj_ref(NULL, &falcon->core); - if (falcon->external) { - kfree(falcon->data.data); - kfree(falcon->code.data); - falcon->code.data = NULL; - } - } - - nv_mo32(falcon, 0x048, 0x00000003, 0x00000000); - nv_wo32(falcon, 0x014, 0xffffffff); - - return nouveau_engine_fini(&falcon->base, suspend); -} - -int -nouveau_falcon_create_(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, u32 addr, bool enable, - const char *iname, const char *fname, - int length, void **pobject) -{ - struct nouveau_falcon *falcon; - int ret; - - ret = nouveau_engine_create_(parent, engine, oclass, enable, iname, - fname, length, pobject); - falcon = *pobject; - if (ret) - return ret; - - falcon->addr = addr; - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/trunk/drivers/gpu/drm/nouveau/core/core/gpuobj.c index 560b2214cf1c..70586fde69cf 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/core/gpuobj.c +++ b/trunk/drivers/gpu/drm/nouveau/core/core/gpuobj.c @@ -183,7 +183,7 @@ _nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend) } u32 -_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr) +_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr) { struct nouveau_gpuobj *gpuobj = nv_gpuobj(object); struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); @@ -193,7 +193,7 @@ _nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr) } void -_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data) +_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data) { struct nouveau_gpuobj *gpuobj = nv_gpuobj(object); struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); diff --git a/trunk/drivers/gpu/drm/nouveau/core/core/mm.c b/trunk/drivers/gpu/drm/nouveau/core/core/mm.c index 0261a11b2ae0..a6d3cd6490f7 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/core/mm.c +++ b/trunk/drivers/gpu/drm/nouveau/core/core/mm.c @@ -234,18 +234,15 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block) int nouveau_mm_fini(struct nouveau_mm *mm) { - if (nouveau_mm_initialised(mm)) { - struct nouveau_mm_node *node, *heap = - list_first_entry(&mm->nodes, typeof(*heap), nl_entry); - int nodes = 0; - - list_for_each_entry(node, &mm->nodes, nl_entry) { - if (WARN_ON(nodes++ == mm->heap_nodes)) - return -EBUSY; - } + struct nouveau_mm_node *node, *heap = + list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry); + int nodes = 0; - kfree(heap); + list_for_each_entry(node, &mm->nodes, nl_entry) { + if (WARN_ON(nodes++ == mm->heap_nodes)) + return -EBUSY; } + kfree(heap); return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c index 1d9f614cb97d..66f7dfd907ee 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c @@ -22,13 +22,18 @@ * Authors: Ben Skeggs */ -#include +#include #include +#include #include struct nv84_bsp_priv { - struct nouveau_engine base; + struct nouveau_bsp base; +}; + +struct nv84_bsp_chan { + struct nouveau_bsp_chan base; }; /******************************************************************************* @@ -44,16 +49,61 @@ nv84_bsp_sclass[] = { * BSP context ******************************************************************************/ +static int +nv84_bsp_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv84_bsp_chan *priv; + int ret; + + ret = nouveau_bsp_context_create(parent, engine, oclass, NULL, + 0, 0, 0, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +static void +nv84_bsp_context_dtor(struct nouveau_object *object) +{ + struct nv84_bsp_chan *priv = (void *)object; + nouveau_bsp_context_destroy(&priv->base); +} + +static int +nv84_bsp_context_init(struct nouveau_object *object) +{ + struct nv84_bsp_chan *priv = (void *)object; + int ret; + + ret = nouveau_bsp_context_init(&priv->base); + if (ret) + return ret; + + return 0; +} + +static int +nv84_bsp_context_fini(struct nouveau_object *object, bool suspend) +{ + struct nv84_bsp_chan *priv = (void *)object; + return nouveau_bsp_context_fini(&priv->base, suspend); +} + static struct nouveau_oclass nv84_bsp_cclass = { .handle = NV_ENGCTX(BSP, 0x84), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_engctx_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, + .ctor = nv84_bsp_context_ctor, + .dtor = nv84_bsp_context_dtor, + .init = nv84_bsp_context_init, + .fini = nv84_bsp_context_fini, + .rd32 = _nouveau_bsp_context_rd32, + .wr32 = _nouveau_bsp_context_wr32, }, }; @@ -61,6 +111,11 @@ nv84_bsp_cclass = { * BSP engine/subdev functions ******************************************************************************/ +static void +nv84_bsp_intr(struct nouveau_subdev *subdev) +{ +} + static int nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -69,25 +124,52 @@ nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv84_bsp_priv *priv; int ret; - ret = nouveau_engine_create(parent, engine, oclass, true, - "PBSP", "bsp", &priv); + ret = nouveau_bsp_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x04008000; + nv_subdev(priv)->intr = nv84_bsp_intr; nv_engine(priv)->cclass = &nv84_bsp_cclass; nv_engine(priv)->sclass = nv84_bsp_sclass; return 0; } +static void +nv84_bsp_dtor(struct nouveau_object *object) +{ + struct nv84_bsp_priv *priv = (void *)object; + nouveau_bsp_destroy(&priv->base); +} + +static int +nv84_bsp_init(struct nouveau_object *object) +{ + struct nv84_bsp_priv *priv = (void *)object; + int ret; + + ret = nouveau_bsp_init(&priv->base); + if (ret) + return ret; + + return 0; +} + +static int +nv84_bsp_fini(struct nouveau_object *object, bool suspend) +{ + struct nv84_bsp_priv *priv = (void *)object; + return nouveau_bsp_fini(&priv->base, suspend); +} + struct nouveau_oclass nv84_bsp_oclass = { .handle = NV_ENGINE(BSP, 0x84), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv84_bsp_ctor, - .dtor = _nouveau_engine_dtor, - .init = _nouveau_engine_init, - .fini = _nouveau_engine_fini, + .dtor = nv84_bsp_dtor, + .init = nv84_bsp_init, + .fini = nv84_bsp_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c deleted file mode 100644 index 0a5aa6bb0870..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2012 Maarten Lankhorst - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Maarten Lankhorst - */ - -#include - -#include - -struct nvc0_bsp_priv { - struct nouveau_falcon base; -}; - -/******************************************************************************* - * BSP object classes - ******************************************************************************/ - -static struct nouveau_oclass -nvc0_bsp_sclass[] = { - { 0x90b1, &nouveau_object_ofuncs }, - {}, -}; - -/******************************************************************************* - * PBSP context - ******************************************************************************/ - -static struct nouveau_oclass -nvc0_bsp_cclass = { - .handle = NV_ENGCTX(BSP, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, - }, -}; - -/******************************************************************************* - * PBSP engine/subdev functions - ******************************************************************************/ - -static int -nvc0_bsp_init(struct nouveau_object *object) -{ - struct nvc0_bsp_priv *priv = (void *)object; - int ret; - - ret = nouveau_falcon_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x084010, 0x0000fff2); - nv_wr32(priv, 0x08401c, 0x0000fff2); - return 0; -} - -static int -nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nvc0_bsp_priv *priv; - int ret; - - ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true, - "PBSP", "bsp", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_subdev(priv)->unit = 0x00008000; - nv_engine(priv)->cclass = &nvc0_bsp_cclass; - nv_engine(priv)->sclass = nvc0_bsp_sclass; - return 0; -} - -struct nouveau_oclass -nvc0_bsp_oclass = { - .handle = NV_ENGINE(BSP, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nvc0_bsp_ctor, - .dtor = _nouveau_falcon_dtor, - .init = nvc0_bsp_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c deleted file mode 100644 index d4f23bbd75b4..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include - -#include - -struct nve0_bsp_priv { - struct nouveau_falcon base; -}; - -/******************************************************************************* - * BSP object classes - ******************************************************************************/ - -static struct nouveau_oclass -nve0_bsp_sclass[] = { - { 0x95b1, &nouveau_object_ofuncs }, - {}, -}; - -/******************************************************************************* - * PBSP context - ******************************************************************************/ - -static struct nouveau_oclass -nve0_bsp_cclass = { - .handle = NV_ENGCTX(BSP, 0xe0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, - }, -}; - -/******************************************************************************* - * PBSP engine/subdev functions - ******************************************************************************/ - -static int -nve0_bsp_init(struct nouveau_object *object) -{ - struct nve0_bsp_priv *priv = (void *)object; - int ret; - - ret = nouveau_falcon_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x084010, 0x0000fff2); - nv_wr32(priv, 0x08401c, 0x0000fff2); - return 0; -} - -static int -nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nve0_bsp_priv *priv; - int ret; - - ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true, - "PBSP", "bsp", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_subdev(priv)->unit = 0x00008000; - nv_engine(priv)->cclass = &nve0_bsp_cclass; - nv_engine(priv)->sclass = nve0_bsp_sclass; - return 0; -} - -struct nouveau_oclass -nve0_bsp_oclass = { - .handle = NV_ENGINE(BSP, 0xe0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nve0_bsp_ctor, - .dtor = _nouveau_falcon_dtor, - .init = nve0_bsp_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c index 283248c7b050..4df6da0af740 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c @@ -22,9 +22,10 @@ * Authors: Ben Skeggs */ -#include -#include +#include #include +#include +#include #include #include @@ -35,7 +36,11 @@ #include "fuc/nva3.fuc.h" struct nva3_copy_priv { - struct nouveau_falcon base; + struct nouveau_copy base; +}; + +struct nva3_copy_chan { + struct nouveau_copy_chan base; }; /******************************************************************************* @@ -52,16 +57,34 @@ nva3_copy_sclass[] = { * PCOPY context ******************************************************************************/ +static int +nva3_copy_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nva3_copy_chan *priv; + int ret; + + ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0, + NVOBJ_FLAG_ZERO_ALLOC, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + static struct nouveau_oclass nva3_copy_cclass = { .handle = NV_ENGCTX(COPY0, 0xa3), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, + .ctor = nva3_copy_context_ctor, + .dtor = _nouveau_copy_context_dtor, + .init = _nouveau_copy_context_init, + .fini = _nouveau_copy_context_fini, + .rd32 = _nouveau_copy_context_rd32, + .wr32 = _nouveau_copy_context_wr32, }, }; @@ -77,40 +100,41 @@ static const struct nouveau_enum nva3_copy_isr_error_name[] = { {} }; -void +static void nva3_copy_intr(struct nouveau_subdev *subdev) { struct nouveau_fifo *pfifo = nouveau_fifo(subdev); struct nouveau_engine *engine = nv_engine(subdev); - struct nouveau_falcon *falcon = (void *)subdev; struct nouveau_object *engctx; - u32 dispatch = nv_ro32(falcon, 0x01c); - u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16); - u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff; - u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff; - u32 addr = nv_ro32(falcon, 0x040) >> 16; + struct nva3_copy_priv *priv = (void *)subdev; + u32 dispatch = nv_rd32(priv, 0x10401c); + u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16); + u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff; + u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff; + u32 addr = nv_rd32(priv, 0x104040) >> 16; u32 mthd = (addr & 0x07ff) << 2; u32 subc = (addr & 0x3800) >> 11; - u32 data = nv_ro32(falcon, 0x044); + u32 data = nv_rd32(priv, 0x104044); int chid; engctx = nouveau_engctx_get(engine, inst); chid = pfifo->chid(pfifo, engctx); if (stat & 0x00000040) { - nv_error(falcon, "DISPATCH_ERROR ["); + nv_error(priv, "DISPATCH_ERROR ["); nouveau_enum_print(nva3_copy_isr_error_name, ssta); printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n", chid, inst << 12, subc, mthd, data); - nv_wo32(falcon, 0x004, 0x00000040); + nv_wr32(priv, 0x104004, 0x00000040); stat &= ~0x00000040; } if (stat) { - nv_error(falcon, "unhandled intr 0x%08x\n", stat); - nv_wo32(falcon, 0x004, stat); + nv_error(priv, "unhandled intr 0x%08x\n", stat); + nv_wr32(priv, 0x104004, stat); } + nv50_fb_trap(nouveau_fb(priv), 1); nouveau_engctx_put(engctx); } @@ -130,8 +154,7 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nva3_copy_priv *priv; int ret; - ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable, - "PCE0", "copy0", &priv); + ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv); *pobject = nv_object(priv); if (ret) return ret; @@ -141,22 +164,59 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine, nv_engine(priv)->cclass = &nva3_copy_cclass; nv_engine(priv)->sclass = nva3_copy_sclass; nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush; - nv_falcon(priv)->code.data = nva3_pcopy_code; - nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code); - nv_falcon(priv)->data.data = nva3_pcopy_data; - nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data); return 0; } +static int +nva3_copy_init(struct nouveau_object *object) +{ + struct nva3_copy_priv *priv = (void *)object; + int ret, i; + + ret = nouveau_copy_init(&priv->base); + if (ret) + return ret; + + /* disable all interrupts */ + nv_wr32(priv, 0x104014, 0xffffffff); + + /* upload ucode */ + nv_wr32(priv, 0x1041c0, 0x01000000); + for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++) + nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]); + + nv_wr32(priv, 0x104180, 0x01000000); + for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) { + if ((i & 0x3f) == 0) + nv_wr32(priv, 0x104188, i >> 6); + nv_wr32(priv, 0x104184, nva3_pcopy_code[i]); + } + + /* start it running */ + nv_wr32(priv, 0x10410c, 0x00000000); + nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */ + nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */ + return 0; +} + +static int +nva3_copy_fini(struct nouveau_object *object, bool suspend) +{ + struct nva3_copy_priv *priv = (void *)object; + + nv_mask(priv, 0x104048, 0x00000003, 0x00000000); + nv_wr32(priv, 0x104014, 0xffffffff); + + return nouveau_copy_fini(&priv->base, suspend); +} + struct nouveau_oclass nva3_copy_oclass = { .handle = NV_ENGINE(COPY0, 0xa3), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nva3_copy_ctor, - .dtor = _nouveau_falcon_dtor, - .init = _nouveau_falcon_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, + .dtor = _nouveau_copy_dtor, + .init = nva3_copy_init, + .fini = nva3_copy_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c index b3ed2737e21f..06d4a8791055 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c @@ -22,9 +22,10 @@ * Authors: Ben Skeggs */ -#include -#include +#include #include +#include +#include #include #include @@ -32,7 +33,11 @@ #include "fuc/nvc0.fuc.h" struct nvc0_copy_priv { - struct nouveau_falcon base; + struct nouveau_copy base; +}; + +struct nvc0_copy_chan { + struct nouveau_copy_chan base; }; /******************************************************************************* @@ -55,14 +60,32 @@ nvc0_copy1_sclass[] = { * PCOPY context ******************************************************************************/ +static int +nvc0_copy_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_copy_chan *priv; + int ret; + + ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, + 256, NVOBJ_FLAG_ZERO_ALLOC, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + static struct nouveau_ofuncs nvc0_copy_context_ofuncs = { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, + .ctor = nvc0_copy_context_ctor, + .dtor = _nouveau_copy_context_dtor, + .init = _nouveau_copy_context_init, + .fini = _nouveau_copy_context_fini, + .rd32 = _nouveau_copy_context_rd32, + .wr32 = _nouveau_copy_context_wr32, }; static struct nouveau_oclass @@ -81,18 +104,50 @@ nvc0_copy1_cclass = { * PCOPY engine/subdev functions ******************************************************************************/ -static int -nvc0_copy_init(struct nouveau_object *object) +static const struct nouveau_enum nvc0_copy_isr_error_name[] = { + { 0x0001, "ILLEGAL_MTHD" }, + { 0x0002, "INVALID_ENUM" }, + { 0x0003, "INVALID_BITFIELD" }, + {} +}; + +static void +nvc0_copy_intr(struct nouveau_subdev *subdev) { - struct nvc0_copy_priv *priv = (void *)object; - int ret; + struct nouveau_fifo *pfifo = nouveau_fifo(subdev); + struct nouveau_engine *engine = nv_engine(subdev); + struct nouveau_object *engctx; + int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0; + struct nvc0_copy_priv *priv = (void *)subdev; + u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000)); + u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000)); + u32 stat = intr & disp & ~(disp >> 16); + u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff; + u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff; + u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16; + u32 mthd = (addr & 0x07ff) << 2; + u32 subc = (addr & 0x3800) >> 11; + u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000)); + int chid; - ret = nouveau_falcon_init(&priv->base); - if (ret) - return ret; + engctx = nouveau_engctx_get(engine, inst); + chid = pfifo->chid(pfifo, engctx); - nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0); - return 0; + if (stat & 0x00000040) { + nv_error(priv, "DISPATCH_ERROR ["); + nouveau_enum_print(nvc0_copy_isr_error_name, ssta); + printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n", + chid, (u64)inst << 12, subc, mthd, data); + nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040); + stat &= ~0x00000040; + } + + if (stat) { + nv_error(priv, "unhandled intr 0x%08x\n", stat); + nv_wr32(priv, 0x104004 + (idx * 0x1000), stat); + } + + nouveau_engctx_put(engctx); } static int @@ -106,20 +161,15 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (nv_rd32(parent, 0x022500) & 0x00000100) return -ENODEV; - ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true, - "PCE0", "copy0", &priv); + ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x00000040; - nv_subdev(priv)->intr = nva3_copy_intr; + nv_subdev(priv)->intr = nvc0_copy_intr; nv_engine(priv)->cclass = &nvc0_copy0_cclass; nv_engine(priv)->sclass = nvc0_copy0_sclass; - nv_falcon(priv)->code.data = nvc0_pcopy_code; - nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code); - nv_falcon(priv)->data.data = nvc0_pcopy_data; - nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data); return 0; } @@ -134,33 +184,72 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (nv_rd32(parent, 0x022500) & 0x00000200) return -ENODEV; - ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true, - "PCE1", "copy1", &priv); + ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x00000080; - nv_subdev(priv)->intr = nva3_copy_intr; + nv_subdev(priv)->intr = nvc0_copy_intr; nv_engine(priv)->cclass = &nvc0_copy1_cclass; nv_engine(priv)->sclass = nvc0_copy1_sclass; - nv_falcon(priv)->code.data = nvc0_pcopy_code; - nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code); - nv_falcon(priv)->data.data = nvc0_pcopy_data; - nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data); return 0; } +static int +nvc0_copy_init(struct nouveau_object *object) +{ + int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0; + struct nvc0_copy_priv *priv = (void *)object; + int ret, i; + + ret = nouveau_copy_init(&priv->base); + if (ret) + return ret; + + /* disable all interrupts */ + nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff); + + /* upload ucode */ + nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000); + for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++) + nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]); + + nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000); + for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) { + if ((i & 0x3f) == 0) + nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6); + nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]); + } + + /* start it running */ + nv_wr32(priv, 0x104084 + (idx * 0x1000), idx); + nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000); + nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */ + nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */ + return 0; +} + +static int +nvc0_copy_fini(struct nouveau_object *object, bool suspend) +{ + int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0; + struct nvc0_copy_priv *priv = (void *)object; + + nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000); + nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff); + + return nouveau_copy_fini(&priv->base, suspend); +} + struct nouveau_oclass nvc0_copy0_oclass = { .handle = NV_ENGINE(COPY0, 0xc0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvc0_copy0_ctor, - .dtor = _nouveau_falcon_dtor, + .dtor = _nouveau_copy_dtor, .init = nvc0_copy_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, + .fini = nvc0_copy_fini, }, }; @@ -169,10 +258,8 @@ nvc0_copy1_oclass = { .handle = NV_ENGINE(COPY1, 0xc0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvc0_copy1_ctor, - .dtor = _nouveau_falcon_dtor, + .dtor = _nouveau_copy_dtor, .init = nvc0_copy_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, + .fini = nvc0_copy_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c index dbbe9e8998fe..2017c1579ac5 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c @@ -30,7 +30,11 @@ #include struct nve0_copy_priv { - struct nouveau_engine base; + struct nouveau_copy base; +}; + +struct nve0_copy_chan { + struct nouveau_copy_chan base; }; /******************************************************************************* @@ -47,14 +51,32 @@ nve0_copy_sclass[] = { * PCOPY context ******************************************************************************/ +static int +nve0_copy_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nve0_copy_chan *priv; + int ret; + + ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, + 256, NVOBJ_FLAG_ZERO_ALLOC, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + static struct nouveau_ofuncs nve0_copy_context_ofuncs = { - .ctor = _nouveau_engctx_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, + .ctor = nve0_copy_context_ctor, + .dtor = _nouveau_copy_context_dtor, + .init = _nouveau_copy_context_init, + .fini = _nouveau_copy_context_fini, + .rd32 = _nouveau_copy_context_rd32, + .wr32 = _nouveau_copy_context_wr32, }; static struct nouveau_oclass @@ -78,8 +100,7 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (nv_rd32(parent, 0x022500) & 0x00000100) return -ENODEV; - ret = nouveau_engine_create(parent, engine, oclass, true, - "PCE0", "copy0", &priv); + ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv); *pobject = nv_object(priv); if (ret) return ret; @@ -101,8 +122,7 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (nv_rd32(parent, 0x022500) & 0x00000200) return -ENODEV; - ret = nouveau_engine_create(parent, engine, oclass, true, - "PCE1", "copy1", &priv); + ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv); *pobject = nv_object(priv); if (ret) return ret; @@ -118,9 +138,9 @@ nve0_copy0_oclass = { .handle = NV_ENGINE(COPY0, 0xe0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nve0_copy0_ctor, - .dtor = _nouveau_engine_dtor, - .init = _nouveau_engine_init, - .fini = _nouveau_engine_fini, + .dtor = _nouveau_copy_dtor, + .init = _nouveau_copy_init, + .fini = _nouveau_copy_fini, }, }; @@ -129,8 +149,8 @@ nve0_copy1_oclass = { .handle = NV_ENGINE(COPY1, 0xe0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nve0_copy1_ctor, - .dtor = _nouveau_engine_dtor, - .init = _nouveau_engine_init, - .fini = _nouveau_engine_fini, + .dtor = _nouveau_copy_dtor, + .init = _nouveau_copy_init, + .fini = _nouveau_copy_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c index b97490512723..1d85e5b66ca0 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c @@ -34,7 +34,11 @@ #include struct nv84_crypt_priv { - struct nouveau_engine base; + struct nouveau_crypt base; +}; + +struct nv84_crypt_chan { + struct nouveau_crypt_chan base; }; /******************************************************************************* @@ -83,16 +87,34 @@ nv84_crypt_sclass[] = { * PCRYPT context ******************************************************************************/ +static int +nv84_crypt_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv84_crypt_chan *priv; + int ret; + + ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256, + 0, NVOBJ_FLAG_ZERO_ALLOC, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + static struct nouveau_oclass nv84_crypt_cclass = { .handle = NV_ENGCTX(CRYPT, 0x84), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_engctx_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, + .ctor = nv84_crypt_context_ctor, + .dtor = _nouveau_crypt_context_dtor, + .init = _nouveau_crypt_context_init, + .fini = _nouveau_crypt_context_fini, + .rd32 = _nouveau_crypt_context_rd32, + .wr32 = _nouveau_crypt_context_wr32, }, }; @@ -135,6 +157,7 @@ nv84_crypt_intr(struct nouveau_subdev *subdev) nv_wr32(priv, 0x102130, stat); nv_wr32(priv, 0x10200c, 0x10); + nv50_fb_trap(nouveau_fb(priv), 1); nouveau_engctx_put(engctx); } @@ -153,8 +176,7 @@ nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv84_crypt_priv *priv; int ret; - ret = nouveau_engine_create(parent, engine, oclass, true, - "PCRYPT", "crypt", &priv); + ret = nouveau_crypt_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; @@ -173,7 +195,7 @@ nv84_crypt_init(struct nouveau_object *object) struct nv84_crypt_priv *priv = (void *)object; int ret; - ret = nouveau_engine_init(&priv->base); + ret = nouveau_crypt_init(&priv->base); if (ret) return ret; @@ -188,8 +210,8 @@ nv84_crypt_oclass = { .handle = NV_ENGINE(CRYPT, 0x84), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv84_crypt_ctor, - .dtor = _nouveau_engine_dtor, + .dtor = _nouveau_crypt_dtor, .init = nv84_crypt_init, - .fini = _nouveau_engine_fini, + .fini = _nouveau_crypt_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c index 21986f3bf0c8..9e3876c89b96 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -37,7 +36,11 @@ #include "fuc/nv98.fuc.h" struct nv98_crypt_priv { - struct nouveau_falcon base; + struct nouveau_crypt base; +}; + +struct nv98_crypt_chan { + struct nouveau_crypt_chan base; }; /******************************************************************************* @@ -54,16 +57,34 @@ nv98_crypt_sclass[] = { * PCRYPT context ******************************************************************************/ +static int +nv98_crypt_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv98_crypt_chan *priv; + int ret; + + ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256, + 256, NVOBJ_FLAG_ZERO_ALLOC, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + static struct nouveau_oclass nv98_crypt_cclass = { .handle = NV_ENGCTX(CRYPT, 0x98), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, + .ctor = nv98_crypt_context_ctor, + .dtor = _nouveau_crypt_context_dtor, + .init = _nouveau_crypt_context_init, + .fini = _nouveau_crypt_context_fini, + .rd32 = _nouveau_crypt_context_rd32, + .wr32 = _nouveau_crypt_context_wr32, }, }; @@ -113,6 +134,7 @@ nv98_crypt_intr(struct nouveau_subdev *subdev) nv_wr32(priv, 0x087004, stat); } + nv50_fb_trap(nouveau_fb(priv), 1); nouveau_engctx_put(engctx); } @@ -131,8 +153,7 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv98_crypt_priv *priv; int ret; - ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true, - "PCRYPT", "crypt", &priv); + ret = nouveau_crypt_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; @@ -142,10 +163,36 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine, nv_engine(priv)->cclass = &nv98_crypt_cclass; nv_engine(priv)->sclass = nv98_crypt_sclass; nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush; - nv_falcon(priv)->code.data = nv98_pcrypt_code; - nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code); - nv_falcon(priv)->data.data = nv98_pcrypt_data; - nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data); + return 0; +} + +static int +nv98_crypt_init(struct nouveau_object *object) +{ + struct nv98_crypt_priv *priv = (void *)object; + int ret, i; + + ret = nouveau_crypt_init(&priv->base); + if (ret) + return ret; + + /* wait for exit interrupt to signal */ + nv_wait(priv, 0x087008, 0x00000010, 0x00000010); + nv_wr32(priv, 0x087004, 0x00000010); + + /* upload microcode code and data segments */ + nv_wr32(priv, 0x087ff8, 0x00100000); + for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++) + nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]); + + nv_wr32(priv, 0x087ff8, 0x00000000); + for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++) + nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]); + + /* start it running */ + nv_wr32(priv, 0x08710c, 0x00000000); + nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */ + nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */ return 0; } @@ -154,10 +201,8 @@ nv98_crypt_oclass = { .handle = NV_ENGINE(CRYPT, 0x98), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv98_crypt_ctor, - .dtor = _nouveau_falcon_dtor, - .init = _nouveau_falcon_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, + .dtor = _nouveau_crypt_dtor, + .init = nv98_crypt_init, + .fini = _nouveau_crypt_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c deleted file mode 100644 index d0817d94454c..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include -#include -#include - -#include "nv50.h" - -int -nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data) -{ - const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) | - (data & NV50_DISP_DAC_PWR_VSYNC) | - (data & NV50_DISP_DAC_PWR_DATA) | - (data & NV50_DISP_DAC_PWR_STATE); - const u32 doff = (or * 0x800); - nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); - nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat); - nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); - return 0; -} - -int -nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval) -{ - const u32 doff = (or * 0x800); - int load = -EINVAL; - nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval); - udelay(9500); - nv_wr32(priv, 0x61a00c + doff, 0x80000000); - load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27; - nv_wr32(priv, 0x61a00c + doff, 0x00000000); - return load; -} - -int -nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR); - u32 *data = args; - int ret; - - if (size < sizeof(u32)) - return -EINVAL; - - switch (mthd & ~0x3f) { - case NV50_DISP_DAC_PWR: - ret = priv->dac.power(priv, or, data[0]); - break; - case NV50_DISP_DAC_LOAD: - ret = priv->dac.sense(priv, or, data[0]); - if (ret >= 0) { - data[0] = ret; - ret = 0; - } - break; - default: - BUG_ON(1); - } - - return ret; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c deleted file mode 100644 index 373dbcc523b2..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include "nv50.h" - -int -nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) -{ - const u32 soff = (or * 0x800); - int i; - - if (data && data[0]) { - for (i = 0; i < size; i++) - nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); - nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); - } else - if (data) { - nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001); - } else { - nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000); - } - - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c deleted file mode 100644 index dc57e24fc1df..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include -#include -#include -#include - -#include "nv50.h" - -int -nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) -{ - const u32 soff = (or * 0x030); - int i; - - if (data && data[0]) { - for (i = 0; i < size; i++) - nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); - nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); - } else - if (data) { - nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001); - } else { - nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000); - } - - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c deleted file mode 100644 index 0d36bdc51417..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include "nv50.h" - -int -nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) -{ - const u32 hoff = (head * 0x800); - - if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { - nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000); - nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000); - nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000); - return 0; - } - - /* AVI InfoFrame */ - nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000); - nv_wr32(priv, 0x616528 + hoff, 0x000d0282); - nv_wr32(priv, 0x61652c + hoff, 0x0000006f); - nv_wr32(priv, 0x616530 + hoff, 0x00000000); - nv_wr32(priv, 0x616534 + hoff, 0x00000000); - nv_wr32(priv, 0x616538 + hoff, 0x00000000); - nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001); - - /* Audio InfoFrame */ - nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000); - nv_wr32(priv, 0x616508 + hoff, 0x000a0184); - nv_wr32(priv, 0x61650c + hoff, 0x00000071); - nv_wr32(priv, 0x616510 + hoff, 0x00000000); - nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001); - - /* ??? */ - nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ - nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ - nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ - - /* HDMI_CTRL */ - nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */); - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c deleted file mode 100644 index f065fc248adf..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include "nv50.h" - -int -nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) -{ - const u32 soff = (or * 0x800); - - if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { - nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000); - nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000); - nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000); - return 0; - } - - /* AVI InfoFrame */ - nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000); - nv_wr32(priv, 0x61c528 + soff, 0x000d0282); - nv_wr32(priv, 0x61c52c + soff, 0x0000006f); - nv_wr32(priv, 0x61c530 + soff, 0x00000000); - nv_wr32(priv, 0x61c534 + soff, 0x00000000); - nv_wr32(priv, 0x61c538 + soff, 0x00000000); - nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001); - - /* Audio InfoFrame */ - nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000); - nv_wr32(priv, 0x61c508 + soff, 0x000a0184); - nv_wr32(priv, 0x61c50c + soff, 0x00000071); - nv_wr32(priv, 0x61c510 + soff, 0x00000000); - nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001); - - /* ??? */ - nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ - nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ - nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ - - /* HDMI_CTRL */ - nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */); - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c deleted file mode 100644 index 5151bb261832..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include "nv50.h" - -int -nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) -{ - const u32 hoff = (head * 0x800); - - if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { - nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000); - nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000); - nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000); - return 0; - } - - /* AVI InfoFrame */ - nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000); - nv_wr32(priv, 0x61671c + hoff, 0x000d0282); - nv_wr32(priv, 0x616720 + hoff, 0x0000006f); - nv_wr32(priv, 0x616724 + hoff, 0x00000000); - nv_wr32(priv, 0x616728 + hoff, 0x00000000); - nv_wr32(priv, 0x61672c + hoff, 0x00000000); - nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001); - - /* ??? InfoFrame? */ - nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000); - nv_wr32(priv, 0x6167ac + hoff, 0x00000010); - nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001); - - /* HDMI_CTRL */ - nv_mask(priv, 0x616798 + hoff, 0x401f007f, data); - - /* NFI, audio doesn't work without it though.. */ - nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000); - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 0f09af135415..15b182c84ce8 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -22,740 +22,20 @@ * Authors: Ben Skeggs */ -#include -#include -#include -#include +#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "nv50.h" - -/******************************************************************************* - * EVO channel base class - ******************************************************************************/ - -int -nv50_disp_chan_create_(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, int chid, - int length, void **pobject) -{ - struct nv50_disp_base *base = (void *)parent; - struct nv50_disp_chan *chan; - int ret; - - if (base->chan & (1 << chid)) - return -EBUSY; - base->chan |= (1 << chid); - - ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL, - (1ULL << NVDEV_ENGINE_DMAOBJ), - length, pobject); - chan = *pobject; - if (ret) - return ret; - - chan->chid = chid; - return 0; -} - -void -nv50_disp_chan_destroy(struct nv50_disp_chan *chan) -{ - struct nv50_disp_base *base = (void *)nv_object(chan)->parent; - base->chan &= ~(1 << chan->chid); - nouveau_namedb_destroy(&chan->base); -} - -u32 -nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_chan *chan = (void *)object; - return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr); -} - -void -nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_chan *chan = (void *)object; - nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data); -} - -/******************************************************************************* - * EVO DMA channel base class - ******************************************************************************/ - -static int -nv50_disp_dmac_object_attach(struct nouveau_object *parent, - struct nouveau_object *object, u32 name) -{ - struct nv50_disp_base *base = (void *)parent->parent; - struct nv50_disp_chan *chan = (void *)parent; - u32 addr = nv_gpuobj(object)->node->offset; - u32 chid = chan->chid; - u32 data = (chid << 28) | (addr << 10) | chid; - return nouveau_ramht_insert(base->ramht, chid, name, data); -} - -static void -nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie) -{ - struct nv50_disp_base *base = (void *)parent->parent; - nouveau_ramht_remove(base->ramht, cookie); -} - -int -nv50_disp_dmac_create_(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, u32 pushbuf, int chid, - int length, void **pobject) -{ - struct nv50_disp_dmac *dmac; - int ret; - - ret = nv50_disp_chan_create_(parent, engine, oclass, chid, - length, pobject); - dmac = *pobject; - if (ret) - return ret; - - dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf); - if (!dmac->pushdma) - return -ENOENT; - - switch (nv_mclass(dmac->pushdma)) { - case 0x0002: - case 0x003d: - if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff) - return -EINVAL; - - switch (dmac->pushdma->target) { - case NV_MEM_TARGET_VRAM: - dmac->push = 0x00000000 | dmac->pushdma->start >> 8; - break; - case NV_MEM_TARGET_PCI_NOSNOOP: - dmac->push = 0x00000003 | dmac->pushdma->start >> 8; - break; - default: - return -EINVAL; - } - break; - default: - return -EINVAL; - } - - return 0; -} - -void -nv50_disp_dmac_dtor(struct nouveau_object *object) -{ - struct nv50_disp_dmac *dmac = (void *)object; - nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma); - nv50_disp_chan_destroy(&dmac->base); -} - -static int -nv50_disp_dmac_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *dmac = (void *)object; - int chid = dmac->base.chid; - int ret; - - ret = nv50_disp_chan_init(&dmac->base); - if (ret) - return ret; - - /* enable error reporting */ - nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid); - - /* initialise channel for dma command submission */ - nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push); - nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000); - nv_wr32(priv, 0x61020c + (chid * 0x0010), chid); - nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010); - nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000); - nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013); - - /* wait for it to go inactive */ - if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) { - nv_error(dmac, "init timeout, 0x%08x\n", - nv_rd32(priv, 0x610200 + (chid * 0x10))); - return -EBUSY; - } - - return 0; -} - -static int -nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *dmac = (void *)object; - int chid = dmac->base.chid; - - /* deactivate channel */ - nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000); - nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000); - if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) { - nv_error(dmac, "fini timeout, 0x%08x\n", - nv_rd32(priv, 0x610200 + (chid * 0x10))); - if (suspend) - return -EBUSY; - } - - /* disable error reporting */ - nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid); - - return nv50_disp_chan_fini(&dmac->base, suspend); -} - -/******************************************************************************* - * EVO master channel object - ******************************************************************************/ - -static int -nv50_disp_mast_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_mast_class *args = data; - struct nv50_disp_dmac *mast; - int ret; - - if (size < sizeof(*args)) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 0, sizeof(*mast), (void **)&mast); - *pobject = nv_object(mast); - if (ret) - return ret; - - nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach; - nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach; - return 0; -} - -static int -nv50_disp_mast_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *mast = (void *)object; - int ret; - - ret = nv50_disp_chan_init(&mast->base); - if (ret) - return ret; - - /* enable error reporting */ - nv_mask(priv, 0x610028, 0x00010001, 0x00010001); - - /* attempt to unstick channel from some unknown state */ - if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000) - nv_mask(priv, 0x610200, 0x00800000, 0x00800000); - if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000) - nv_mask(priv, 0x610200, 0x00600000, 0x00600000); - - /* initialise channel for dma command submission */ - nv_wr32(priv, 0x610204, mast->push); - nv_wr32(priv, 0x610208, 0x00010000); - nv_wr32(priv, 0x61020c, 0x00000000); - nv_mask(priv, 0x610200, 0x00000010, 0x00000010); - nv_wr32(priv, 0x640000, 0x00000000); - nv_wr32(priv, 0x610200, 0x01000013); - - /* wait for it to go inactive */ - if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) { - nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200)); - return -EBUSY; - } - - return 0; -} - -static int -nv50_disp_mast_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *mast = (void *)object; - - /* deactivate channel */ - nv_mask(priv, 0x610200, 0x00000010, 0x00000000); - nv_mask(priv, 0x610200, 0x00000003, 0x00000000); - if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) { - nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200)); - if (suspend) - return -EBUSY; - } - - /* disable error reporting */ - nv_mask(priv, 0x610028, 0x00010001, 0x00000000); - - return nv50_disp_chan_fini(&mast->base, suspend); -} - -struct nouveau_ofuncs -nv50_disp_mast_ofuncs = { - .ctor = nv50_disp_mast_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nv50_disp_mast_init, - .fini = nv50_disp_mast_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO sync channel objects - ******************************************************************************/ - -static int -nv50_disp_sync_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_sync_class *args = data; - struct nv50_disp_dmac *dmac; - int ret; - - if (size < sizeof(*data) || args->head > 1) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 1 + args->head, sizeof(*dmac), - (void **)&dmac); - *pobject = nv_object(dmac); - if (ret) - return ret; - - nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach; - nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach; - return 0; -} - -struct nouveau_ofuncs -nv50_disp_sync_ofuncs = { - .ctor = nv50_disp_sync_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nv50_disp_dmac_init, - .fini = nv50_disp_dmac_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO overlay channel objects - ******************************************************************************/ - -static int -nv50_disp_ovly_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_ovly_class *args = data; - struct nv50_disp_dmac *dmac; - int ret; - - if (size < sizeof(*data) || args->head > 1) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 3 + args->head, sizeof(*dmac), - (void **)&dmac); - *pobject = nv_object(dmac); - if (ret) - return ret; - - nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach; - nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach; - return 0; -} - -struct nouveau_ofuncs -nv50_disp_ovly_ofuncs = { - .ctor = nv50_disp_ovly_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nv50_disp_dmac_init, - .fini = nv50_disp_dmac_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO PIO channel base class - ******************************************************************************/ - -static int -nv50_disp_pioc_create_(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, int chid, - int length, void **pobject) -{ - return nv50_disp_chan_create_(parent, engine, oclass, chid, - length, pobject); -} - -static void -nv50_disp_pioc_dtor(struct nouveau_object *object) -{ - struct nv50_disp_pioc *pioc = (void *)object; - nv50_disp_chan_destroy(&pioc->base); -} - -static int -nv50_disp_pioc_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_pioc *pioc = (void *)object; - int chid = pioc->base.chid; - int ret; - - ret = nv50_disp_chan_init(&pioc->base); - if (ret) - return ret; - - nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000); - if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) { - nv_error(pioc, "timeout0: 0x%08x\n", - nv_rd32(priv, 0x610200 + (chid * 0x10))); - return -EBUSY; - } - - nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001); - if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) { - nv_error(pioc, "timeout1: 0x%08x\n", - nv_rd32(priv, 0x610200 + (chid * 0x10))); - return -EBUSY; - } - - return 0; -} - -static int -nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_pioc *pioc = (void *)object; - int chid = pioc->base.chid; - - nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000); - if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) { - nv_error(pioc, "timeout: 0x%08x\n", - nv_rd32(priv, 0x610200 + (chid * 0x10))); - if (suspend) - return -EBUSY; - } - - return nv50_disp_chan_fini(&pioc->base, suspend); -} - -/******************************************************************************* - * EVO immediate overlay channel objects - ******************************************************************************/ - -static int -nv50_disp_oimm_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_oimm_class *args = data; - struct nv50_disp_pioc *pioc; - int ret; - - if (size < sizeof(*args) || args->head > 1) - return -EINVAL; - - ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head, - sizeof(*pioc), (void **)&pioc); - *pobject = nv_object(pioc); - if (ret) - return ret; - - return 0; -} - -struct nouveau_ofuncs -nv50_disp_oimm_ofuncs = { - .ctor = nv50_disp_oimm_ctor, - .dtor = nv50_disp_pioc_dtor, - .init = nv50_disp_pioc_init, - .fini = nv50_disp_pioc_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO cursor channel objects - ******************************************************************************/ - -static int -nv50_disp_curs_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_curs_class *args = data; - struct nv50_disp_pioc *pioc; - int ret; - - if (size < sizeof(*args) || args->head > 1) - return -EINVAL; - - ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head, - sizeof(*pioc), (void **)&pioc); - *pobject = nv_object(pioc); - if (ret) - return ret; - - return 0; -} - -struct nouveau_ofuncs -nv50_disp_curs_ofuncs = { - .ctor = nv50_disp_curs_ctor, - .dtor = nv50_disp_pioc_dtor, - .init = nv50_disp_pioc_init, - .fini = nv50_disp_pioc_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * Base display object - ******************************************************************************/ - -static int -nv50_disp_base_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_base *base; - int ret; - - ret = nouveau_parent_create(parent, engine, oclass, 0, - priv->sclass, 0, &base); - *pobject = nv_object(base); - if (ret) - return ret; - - return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht); -} - -static void -nv50_disp_base_dtor(struct nouveau_object *object) -{ - struct nv50_disp_base *base = (void *)object; - nouveau_ramht_ref(NULL, &base->ramht); - nouveau_parent_destroy(&base->base); -} - -static int -nv50_disp_base_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_base *base = (void *)object; - int ret, i; - u32 tmp; - - ret = nouveau_parent_init(&base->base); - if (ret) - return ret; - - /* The below segments of code copying values from one register to - * another appear to inform EVO of the display capabilities or - * something similar. NFI what the 0x614004 caps are for.. - */ - tmp = nv_rd32(priv, 0x614004); - nv_wr32(priv, 0x610184, tmp); - - /* ... CRTC caps */ - for (i = 0; i < priv->head.nr; i++) { - tmp = nv_rd32(priv, 0x616100 + (i * 0x800)); - nv_wr32(priv, 0x610190 + (i * 0x10), tmp); - tmp = nv_rd32(priv, 0x616104 + (i * 0x800)); - nv_wr32(priv, 0x610194 + (i * 0x10), tmp); - tmp = nv_rd32(priv, 0x616108 + (i * 0x800)); - nv_wr32(priv, 0x610198 + (i * 0x10), tmp); - tmp = nv_rd32(priv, 0x61610c + (i * 0x800)); - nv_wr32(priv, 0x61019c + (i * 0x10), tmp); - } - - /* ... DAC caps */ - for (i = 0; i < priv->dac.nr; i++) { - tmp = nv_rd32(priv, 0x61a000 + (i * 0x800)); - nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp); - } - - /* ... SOR caps */ - for (i = 0; i < priv->sor.nr; i++) { - tmp = nv_rd32(priv, 0x61c000 + (i * 0x800)); - nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp); - } - - /* ... EXT caps */ - for (i = 0; i < 3; i++) { - tmp = nv_rd32(priv, 0x61e000 + (i * 0x800)); - nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp); - } - - /* steal display away from vbios, or something like that */ - if (nv_rd32(priv, 0x610024) & 0x00000100) { - nv_wr32(priv, 0x610024, 0x00000100); - nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000); - if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) { - nv_error(priv, "timeout acquiring display\n"); - return -EBUSY; - } - } - - /* point at display engine memory area (hash table, objects) */ - nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9); - - /* enable supervisor interrupts, disable everything else */ - nv_wr32(priv, 0x61002c, 0x00000370); - nv_wr32(priv, 0x610028, 0x00000000); - return 0; -} - -static int -nv50_disp_base_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_base *base = (void *)object; - - /* disable all interrupts */ - nv_wr32(priv, 0x610024, 0x00000000); - nv_wr32(priv, 0x610020, 0x00000000); - - return nouveau_parent_fini(&base->base, suspend); -} - -struct nouveau_ofuncs -nv50_disp_base_ofuncs = { - .ctor = nv50_disp_base_ctor, - .dtor = nv50_disp_base_dtor, - .init = nv50_disp_base_init, - .fini = nv50_disp_base_fini, -}; - -static struct nouveau_omthds -nv50_disp_base_omthds[] = { - { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, - { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, - { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, - {}, -}; - -static struct nouveau_oclass -nv50_disp_base_oclass[] = { - { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds }, - {} +struct nv50_disp_priv { + struct nouveau_disp base; }; static struct nouveau_oclass nv50_disp_sclass[] = { - { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, - {} -}; - -/******************************************************************************* - * Display context, tracks instmem allocation and prevents more than one - * client using the display hardware at any time. - ******************************************************************************/ - -static int -nv50_disp_data_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv = (void *)engine; - struct nouveau_engctx *ectx; - int ret = -EBUSY; - - /* no context needed for channel objects... */ - if (nv_mclass(parent) != NV_DEVICE_CLASS) { - atomic_inc(&parent->refcount); - *pobject = parent; - return 0; - } - - /* allocate display hardware to client */ - mutex_lock(&nv_subdev(priv)->mutex); - if (list_empty(&nv_engine(priv)->contexts)) { - ret = nouveau_engctx_create(parent, engine, oclass, NULL, - 0x10000, 0x10000, - NVOBJ_FLAG_HEAP, &ectx); - *pobject = nv_object(ectx); - } - mutex_unlock(&nv_subdev(priv)->mutex); - return ret; -} - -struct nouveau_oclass -nv50_disp_cclass = { - .handle = NV_ENGCTX(DISP, 0x50), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv50_disp_data_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, - }, + {}, }; -/******************************************************************************* - * Display engine implementation - ******************************************************************************/ - -static void -nv50_disp_intr_error(struct nv50_disp_priv *priv) -{ - u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16; - u32 addr, data; - int chid; - - for (chid = 0; chid < 5; chid++) { - if (!(channels & (1 << chid))) - continue; - - nv_wr32(priv, 0x610020, 0x00010000 << chid); - addr = nv_rd32(priv, 0x610080 + (chid * 0x08)); - data = nv_rd32(priv, 0x610084 + (chid * 0x08)); - nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000); - - nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n", - chid, addr & 0xffc, data, addr); - } -} - static void nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) { @@ -800,422 +80,30 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) disp->vblank.notify(disp->vblank.data, crtc); } -static u16 -exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, - struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_outp *info) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - u16 mask, type, data; - - if (outp < 4) { - type = DCB_OUTPUT_ANALOG; - mask = 0; - } else { - outp -= 4; - switch (ctrl & 0x00000f00) { - case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break; - case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break; - case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break; - case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break; - case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break; - case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; - default: - nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); - return 0x0000; - } - } - - mask = 0x00c0 & (mask << 6); - mask |= 0x0001 << outp; - mask |= 0x0100 << head; - - data = dcb_outp_match(bios, type, mask, ver, hdr, dcb); - if (!data) - return 0x0000; - - return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info); -} - -static bool -exec_script(struct nv50_disp_priv *priv, int head, int id) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - struct nvbios_outp info; - struct dcb_output dcb; - u8 ver, hdr, cnt, len; - u16 data; - u32 ctrl = 0x00000000; - int i; - - for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) - ctrl = nv_rd32(priv, 0x610b5c + (i * 8)); - - if (nv_device(priv)->chipset < 0x90 || - nv_device(priv)->chipset == 0x92 || - nv_device(priv)->chipset == 0xa0) { - for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) - ctrl = nv_rd32(priv, 0x610b74 + (i * 8)); - i += 3; - } else { - for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) - ctrl = nv_rd32(priv, 0x610798 + (i * 8)); - i += 3; - } - - if (!(ctrl & (1 << head))) - return false; - - data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info); - if (data) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = info.script[id], - .outp = &dcb, - .crtc = head, - .execute = 1, - }; - - return nvbios_exec(&init) == 0; - } - - return false; -} - -static u32 -exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, - struct dcb_output *outp) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - struct nvbios_outp info1; - struct nvbios_ocfg info2; - u8 ver, hdr, cnt, len; - u16 data, conf; - u32 ctrl = 0x00000000; - int i; - - for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) - ctrl = nv_rd32(priv, 0x610b58 + (i * 8)); - - if (nv_device(priv)->chipset < 0x90 || - nv_device(priv)->chipset == 0x92 || - nv_device(priv)->chipset == 0xa0) { - for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) - ctrl = nv_rd32(priv, 0x610b70 + (i * 8)); - i += 3; - } else { - for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) - ctrl = nv_rd32(priv, 0x610794 + (i * 8)); - i += 3; - } - - if (!(ctrl & (1 << head))) - return 0x0000; - - data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1); - if (!data) - return 0x0000; - - switch (outp->type) { - case DCB_OUTPUT_TMDS: - conf = (ctrl & 0x00000f00) >> 8; - if (pclk >= 165000) - conf |= 0x0100; - break; - case DCB_OUTPUT_LVDS: - conf = priv->sor.lvdsconf; - break; - case DCB_OUTPUT_DP: - conf = (ctrl & 0x00000f00) >> 8; - break; - case DCB_OUTPUT_ANALOG: - default: - conf = 0x00ff; - break; - } - - data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2); - if (data) { - data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); - if (data) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = data, - .outp = outp, - .crtc = head, - .execute = 1, - }; - - if (nvbios_exec(&init)) - return 0x0000; - return conf; - } - } - - return 0x0000; -} - -static void -nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super) -{ - int head = ffs((super & 0x00000060) >> 5) - 1; - if (head >= 0) { - head = ffs((super & 0x00000180) >> 7) - 1; - if (head >= 0) - exec_script(priv, head, 1); - } - - nv_wr32(priv, 0x610024, 0x00000010); - nv_wr32(priv, 0x610030, 0x80000000); -} - -static void -nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv, - struct dcb_output *outp, u32 pclk) -{ - const int link = !(outp->sorconf.link & 1); - const int or = ffs(outp->or) - 1; - const u32 soff = ( or * 0x800); - const u32 loff = (link * 0x080) + soff; - const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8)); - const u32 symbol = 100000; - u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000; - u32 clksor = nv_rd32(priv, 0x614300 + soff); - int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; - int TU, VTUi, VTUf, VTUa; - u64 link_data_rate, link_ratio, unk; - u32 best_diff = 64 * symbol; - u32 link_nr, link_bw, bits, r; - - /* calculate packed data rate for each lane */ - if (dpctrl > 0x00030000) link_nr = 4; - else if (dpctrl > 0x00010000) link_nr = 2; - else link_nr = 1; - - if (clksor & 0x000c0000) - link_bw = 270000; - else - link_bw = 162000; - - if ((ctrl & 0xf0000) == 0x60000) bits = 30; - else if ((ctrl & 0xf0000) == 0x50000) bits = 24; - else bits = 18; - - link_data_rate = (pclk * bits / 8) / link_nr; - - /* calculate ratio of packed data rate to link symbol rate */ - link_ratio = link_data_rate * symbol; - r = do_div(link_ratio, link_bw); - - for (TU = 64; TU >= 32; TU--) { - /* calculate average number of valid symbols in each TU */ - u32 tu_valid = link_ratio * TU; - u32 calc, diff; - - /* find a hw representation for the fraction.. */ - VTUi = tu_valid / symbol; - calc = VTUi * symbol; - diff = tu_valid - calc; - if (diff) { - if (diff >= (symbol / 2)) { - VTUf = symbol / (symbol - diff); - if (symbol - (VTUf * diff)) - VTUf++; - - if (VTUf <= 15) { - VTUa = 1; - calc += symbol - (symbol / VTUf); - } else { - VTUa = 0; - VTUf = 1; - calc += symbol; - } - } else { - VTUa = 0; - VTUf = min((int)(symbol / diff), 15); - calc += symbol / VTUf; - } - - diff = calc - tu_valid; - } else { - /* no remainder, but the hw doesn't like the fractional - * part to be zero. decrement the integer part and - * have the fraction add a whole symbol back - */ - VTUa = 0; - VTUf = 1; - VTUi--; - } - - if (diff < best_diff) { - best_diff = diff; - bestTU = TU; - bestVTUa = VTUa; - bestVTUf = VTUf; - bestVTUi = VTUi; - if (diff == 0) - break; - } - } - - if (!bestTU) { - nv_error(priv, "unable to find suitable dp config\n"); - return; - } - - /* XXX close to vbios numbers, but not right */ - unk = (symbol - link_ratio) * bestTU; - unk *= link_ratio; - r = do_div(unk, symbol); - r = do_div(unk, symbol); - unk += 6; - - nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2); - nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 | - bestVTUf << 16 | - bestVTUi << 8 | unk); -} - -static void -nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super) -{ - struct dcb_output outp; - u32 addr, mask, data; - int head; - - /* finish detaching encoder? */ - head = ffs((super & 0x00000180) >> 7) - 1; - if (head >= 0) - exec_script(priv, head, 2); - - /* check whether a vpll change is required */ - head = ffs((super & 0x00000600) >> 9) - 1; - if (head >= 0) { - u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; - if (pclk) { - struct nouveau_clock *clk = nouveau_clock(priv); - clk->pll_set(clk, PLL_VPLL0 + head, pclk); - } - - nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000); - } - - /* (re)attach the relevant OR to the head */ - head = ffs((super & 0x00000180) >> 7) - 1; - if (head >= 0) { - u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; - u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp); - if (conf) { - if (outp.type == DCB_OUTPUT_ANALOG) { - addr = 0x614280 + (ffs(outp.or) - 1) * 0x800; - mask = 0xffffffff; - data = 0x00000000; - } else { - if (outp.type == DCB_OUTPUT_DP) - nv50_disp_intr_unk20_dp(priv, &outp, pclk); - addr = 0x614300 + (ffs(outp.or) - 1) * 0x800; - mask = 0x00000707; - data = (conf & 0x0100) ? 0x0101 : 0x0000; - } - - nv_mask(priv, addr, mask, data); - } - } - - nv_wr32(priv, 0x610024, 0x00000020); - nv_wr32(priv, 0x610030, 0x80000000); -} - -/* If programming a TMDS output on a SOR that can also be configured for - * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off. - * - * It looks like the VBIOS TMDS scripts make an attempt at this, however, - * the VBIOS scripts on at least one board I have only switch it off on - * link 0, causing a blank display if the output has previously been - * programmed for DisplayPort. - */ -static void -nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - const int link = !(outp->sorconf.link & 1); - const int or = ffs(outp->or) - 1; - const u32 loff = (or * 0x800) + (link * 0x80); - const u16 mask = (outp->sorconf.link << 6) | outp->or; - u8 ver, hdr; - - if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp)) - nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000); -} - static void -nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super) -{ - int head = ffs((super & 0x00000180) >> 7) - 1; - if (head >= 0) { - struct dcb_output outp; - u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; - if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) { - if (outp.type == DCB_OUTPUT_TMDS) - nv50_disp_intr_unk40_tmds(priv, &outp); - } - } - - nv_wr32(priv, 0x610024, 0x00000040); - nv_wr32(priv, 0x610030, 0x80000000); -} - -static void -nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1) -{ - u32 super = nv_rd32(priv, 0x610030); - - nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super); - - if (intr1 & 0x00000010) - nv50_disp_intr_unk10(priv, super); - if (intr1 & 0x00000020) - nv50_disp_intr_unk20(priv, super); - if (intr1 & 0x00000040) - nv50_disp_intr_unk40(priv, super); -} - -void nv50_disp_intr(struct nouveau_subdev *subdev) { struct nv50_disp_priv *priv = (void *)subdev; - u32 intr0 = nv_rd32(priv, 0x610020); - u32 intr1 = nv_rd32(priv, 0x610024); + u32 stat1 = nv_rd32(priv, 0x610024); - if (intr0 & 0x001f0000) { - nv50_disp_intr_error(priv); - intr0 &= ~0x001f0000; - } - - if (intr1 & 0x00000004) { + if (stat1 & 0x00000004) { nv50_disp_intr_vblank(priv, 0); nv_wr32(priv, 0x610024, 0x00000004); - intr1 &= ~0x00000004; + stat1 &= ~0x00000004; } - if (intr1 & 0x00000008) { + if (stat1 & 0x00000008) { nv50_disp_intr_vblank(priv, 1); nv_wr32(priv, 0x610024, 0x00000008); - intr1 &= ~0x00000008; + stat1 &= ~0x00000008; } - if (intr1 & 0x00000070) { - nv50_disp_intr_super(priv, intr1); - intr1 &= ~0x00000070; - } } static int nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) { struct nv50_disp_priv *priv; int ret; @@ -1226,16 +114,8 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - nv_engine(priv)->sclass = nv50_disp_base_oclass; - nv_engine(priv)->cclass = &nv50_disp_cclass; + nv_engine(priv)->sclass = nv50_disp_sclass; nv_subdev(priv)->intr = nv50_disp_intr; - priv->sclass = nv50_disp_sclass; - priv->head.nr = 2; - priv->dac.nr = 3; - priv->sor.nr = 2; - priv->dac.power = nv50_dac_power; - priv->dac.sense = nv50_dac_sense; - priv->sor.power = nv50_sor_power; INIT_LIST_HEAD(&priv->base.vblank.list); spin_lock_init(&priv->base.vblank.lock); diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h deleted file mode 100644 index a6bb931450f1..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h +++ /dev/null @@ -1,142 +0,0 @@ -#ifndef __NV50_DISP_H__ -#define __NV50_DISP_H__ - -#include -#include -#include - -#include -#include - -struct dcb_output; - -struct nv50_disp_priv { - struct nouveau_disp base; - struct nouveau_oclass *sclass; - struct { - int nr; - } head; - struct { - int nr; - int (*power)(struct nv50_disp_priv *, int dac, u32 data); - int (*sense)(struct nv50_disp_priv *, int dac, u32 load); - } dac; - struct { - int nr; - int (*power)(struct nv50_disp_priv *, int sor, u32 data); - int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32); - int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32); - int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link, - int head, u16 type, u16 mask, u32 data, - struct dcb_output *); - int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link, - int head, u16 type, u16 mask, u32 data, - struct dcb_output *); - int (*dp_train)(struct nv50_disp_priv *, int sor, int link, - u16 type, u16 mask, u32 data, - struct dcb_output *); - int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link, - int head, u16 type, u16 mask, u32 data, - struct dcb_output *); - int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link, - int lane, u16 type, u16 mask, u32 data, - struct dcb_output *); - u32 lvdsconf; - } sor; -}; - -#define DAC_MTHD(n) (n), (n) + 0x03 - -int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32); -int nv50_dac_power(struct nv50_disp_priv *, int, u32); -int nv50_dac_sense(struct nv50_disp_priv *, int, u32); - -#define SOR_MTHD(n) (n), (n) + 0x3f - -int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32); -int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32); - -int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); -int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); -int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); - -int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32); -int nv50_sor_power(struct nv50_disp_priv *, int, u32); - -int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16, - u32, struct dcb_output *); -int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16, - u32, struct dcb_output *); -int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32, - struct dcb_output *); -int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, - struct dcb_output *); -int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, - struct dcb_output *); - -int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32, - struct dcb_output *); -int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, - struct dcb_output *); -int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, - struct dcb_output *); - -struct nv50_disp_base { - struct nouveau_parent base; - struct nouveau_ramht *ramht; - u32 chan; -}; - -struct nv50_disp_chan { - struct nouveau_namedb base; - int chid; -}; - -int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, int, int, void **); -void nv50_disp_chan_destroy(struct nv50_disp_chan *); -u32 nv50_disp_chan_rd32(struct nouveau_object *, u64); -void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32); - -#define nv50_disp_chan_init(a) \ - nouveau_namedb_init(&(a)->base) -#define nv50_disp_chan_fini(a,b) \ - nouveau_namedb_fini(&(a)->base, (b)) - -int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, u32, int, int, void **); -void nv50_disp_dmac_dtor(struct nouveau_object *); - -struct nv50_disp_dmac { - struct nv50_disp_chan base; - struct nouveau_dmaobj *pushdma; - u32 push; -}; - -struct nv50_disp_pioc { - struct nv50_disp_chan base; -}; - -extern struct nouveau_ofuncs nv50_disp_mast_ofuncs; -extern struct nouveau_ofuncs nv50_disp_sync_ofuncs; -extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs; -extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs; -extern struct nouveau_ofuncs nv50_disp_curs_ofuncs; -extern struct nouveau_ofuncs nv50_disp_base_ofuncs; -extern struct nouveau_oclass nv50_disp_cclass; -void nv50_disp_intr(struct nouveau_subdev *); - -extern struct nouveau_omthds nv84_disp_base_omthds[]; - -extern struct nouveau_omthds nva3_disp_base_omthds[]; - -extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs; -extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs; -extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs; -extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs; -extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs; -extern struct nouveau_ofuncs nvd0_disp_base_ofuncs; -extern struct nouveau_oclass nvd0_disp_cclass; -void nvd0_disp_intr(struct nouveau_subdev *); - -#endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c deleted file mode 100644 index fc84eacdfbec..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include - -#include "nv50.h" - -static struct nouveau_oclass -nv84_disp_sclass[] = { - { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, - {} -}; - -struct nouveau_omthds -nv84_disp_base_omthds[] = { - { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, - { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, - { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, - {}, -}; - -static struct nouveau_oclass -nv84_disp_base_oclass[] = { - { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds }, - {} -}; - -static int -nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv; - int ret; - - ret = nouveau_disp_create(parent, engine, oclass, "PDISP", - "display", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_engine(priv)->sclass = nv84_disp_base_oclass; - nv_engine(priv)->cclass = &nv50_disp_cclass; - nv_subdev(priv)->intr = nv50_disp_intr; - priv->sclass = nv84_disp_sclass; - priv->head.nr = 2; - priv->dac.nr = 3; - priv->sor.nr = 2; - priv->dac.power = nv50_dac_power; - priv->dac.sense = nv50_dac_sense; - priv->sor.power = nv50_sor_power; - priv->sor.hdmi = nv84_hdmi_ctrl; - - INIT_LIST_HEAD(&priv->base.vblank.list); - spin_lock_init(&priv->base.vblank.lock); - return 0; -} - -struct nouveau_oclass -nv84_disp_oclass = { - .handle = NV_ENGINE(DISP, 0x82), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv84_disp_ctor, - .dtor = _nouveau_disp_dtor, - .init = _nouveau_disp_init, - .fini = _nouveau_disp_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c deleted file mode 100644 index ba9dfd4669a2..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include - -#include "nv50.h" - -static struct nouveau_oclass -nv94_disp_sclass[] = { - { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, - {} -}; - -static struct nouveau_omthds -nv94_disp_base_omthds[] = { - { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd }, - { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, - { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, - {}, -}; - -static struct nouveau_oclass -nv94_disp_base_oclass[] = { - { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds }, - {} -}; - -static int -nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv; - int ret; - - ret = nouveau_disp_create(parent, engine, oclass, "PDISP", - "display", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_engine(priv)->sclass = nv94_disp_base_oclass; - nv_engine(priv)->cclass = &nv50_disp_cclass; - nv_subdev(priv)->intr = nv50_disp_intr; - priv->sclass = nv94_disp_sclass; - priv->head.nr = 2; - priv->dac.nr = 3; - priv->sor.nr = 4; - priv->dac.power = nv50_dac_power; - priv->dac.sense = nv50_dac_sense; - priv->sor.power = nv50_sor_power; - priv->sor.hdmi = nv84_hdmi_ctrl; - priv->sor.dp_train = nv94_sor_dp_train; - priv->sor.dp_train_init = nv94_sor_dp_train_init; - priv->sor.dp_train_fini = nv94_sor_dp_train_fini; - priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl; - priv->sor.dp_drvctl = nv94_sor_dp_drvctl; - - INIT_LIST_HEAD(&priv->base.vblank.list); - spin_lock_init(&priv->base.vblank.lock); - return 0; -} - -struct nouveau_oclass -nv94_disp_oclass = { - .handle = NV_ENGINE(DISP, 0x88), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv94_disp_ctor, - .dtor = _nouveau_disp_dtor, - .init = _nouveau_disp_init, - .fini = _nouveau_disp_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c deleted file mode 100644 index 5d63902cdeda..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include - -#include "nv50.h" - -static struct nouveau_oclass -nva0_disp_sclass[] = { - { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, - {} -}; - -static struct nouveau_oclass -nva0_disp_base_oclass[] = { - { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds }, - {} -}; - -static int -nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv; - int ret; - - ret = nouveau_disp_create(parent, engine, oclass, "PDISP", - "display", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_engine(priv)->sclass = nva0_disp_base_oclass; - nv_engine(priv)->cclass = &nv50_disp_cclass; - nv_subdev(priv)->intr = nv50_disp_intr; - priv->sclass = nva0_disp_sclass; - priv->head.nr = 2; - priv->dac.nr = 3; - priv->sor.nr = 2; - priv->dac.power = nv50_dac_power; - priv->dac.sense = nv50_dac_sense; - priv->sor.power = nv50_sor_power; - priv->sor.hdmi = nv84_hdmi_ctrl; - - INIT_LIST_HEAD(&priv->base.vblank.list); - spin_lock_init(&priv->base.vblank.lock); - return 0; -} - -struct nouveau_oclass -nva0_disp_oclass = { - .handle = NV_ENGINE(DISP, 0x83), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nva0_disp_ctor, - .dtor = _nouveau_disp_dtor, - .init = _nouveau_disp_init, - .fini = _nouveau_disp_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c deleted file mode 100644 index e9192ca389fa..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include - -#include "nv50.h" - -static struct nouveau_oclass -nva3_disp_sclass[] = { - { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, - {} -}; - -struct nouveau_omthds -nva3_disp_base_omthds[] = { - { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd }, - { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd }, - { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, - { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, - {}, -}; - -static struct nouveau_oclass -nva3_disp_base_oclass[] = { - { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds }, - {} -}; - -static int -nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv; - int ret; - - ret = nouveau_disp_create(parent, engine, oclass, "PDISP", - "display", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_engine(priv)->sclass = nva3_disp_base_oclass; - nv_engine(priv)->cclass = &nv50_disp_cclass; - nv_subdev(priv)->intr = nv50_disp_intr; - priv->sclass = nva3_disp_sclass; - priv->head.nr = 2; - priv->dac.nr = 3; - priv->sor.nr = 4; - priv->dac.power = nv50_dac_power; - priv->dac.sense = nv50_dac_sense; - priv->sor.power = nv50_sor_power; - priv->sor.hda_eld = nva3_hda_eld; - priv->sor.hdmi = nva3_hdmi_ctrl; - priv->sor.dp_train = nv94_sor_dp_train; - priv->sor.dp_train_init = nv94_sor_dp_train_init; - priv->sor.dp_train_fini = nv94_sor_dp_train_fini; - priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl; - priv->sor.dp_drvctl = nv94_sor_dp_drvctl; - - INIT_LIST_HEAD(&priv->base.vblank.list); - spin_lock_init(&priv->base.vblank.lock); - return 0; -} - -struct nouveau_oclass -nva3_disp_oclass = { - .handle = NV_ENGINE(DISP, 0x85), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nva3_disp_ctor, - .dtor = _nouveau_disp_dtor, - .init = _nouveau_disp_init, - .fini = _nouveau_disp_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c index 9e38ebff5fb3..d93efbcf75b8 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c @@ -22,808 +22,22 @@ * Authors: Ben Skeggs */ -#include -#include -#include -#include +#include #include #include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "nv50.h" - -/******************************************************************************* - * EVO DMA channel base class - ******************************************************************************/ - -static int -nvd0_disp_dmac_object_attach(struct nouveau_object *parent, - struct nouveau_object *object, u32 name) -{ - struct nv50_disp_base *base = (void *)parent->parent; - struct nv50_disp_chan *chan = (void *)parent; - u32 addr = nv_gpuobj(object)->node->offset; - u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001; - return nouveau_ramht_insert(base->ramht, chan->chid, name, data); -} - -static void -nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie) -{ - struct nv50_disp_base *base = (void *)parent->parent; - nouveau_ramht_remove(base->ramht, cookie); -} - -static int -nvd0_disp_dmac_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *dmac = (void *)object; - int chid = dmac->base.chid; - int ret; - - ret = nv50_disp_chan_init(&dmac->base); - if (ret) - return ret; - - /* enable error reporting */ - nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid); - nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); - - /* initialise channel for dma command submission */ - nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push); - nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000); - nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001); - nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); - nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000); - nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013); - - /* wait for it to go inactive */ - if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) { - nv_error(dmac, "init: 0x%08x\n", - nv_rd32(priv, 0x610490 + (chid * 0x10))); - return -EBUSY; - } - - return 0; -} - -static int -nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *dmac = (void *)object; - int chid = dmac->base.chid; - - /* deactivate channel */ - nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000); - nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000); - if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) { - nv_error(dmac, "fini: 0x%08x\n", - nv_rd32(priv, 0x610490 + (chid * 0x10))); - if (suspend) - return -EBUSY; - } - - /* disable error reporting */ - nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000); - nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000); - - return nv50_disp_chan_fini(&dmac->base, suspend); -} - -/******************************************************************************* - * EVO master channel object - ******************************************************************************/ - -static int -nvd0_disp_mast_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_mast_class *args = data; - struct nv50_disp_dmac *mast; - int ret; - - if (size < sizeof(*args)) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 0, sizeof(*mast), (void **)&mast); - *pobject = nv_object(mast); - if (ret) - return ret; - - nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach; - nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach; - return 0; -} - -static int -nvd0_disp_mast_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *mast = (void *)object; - int ret; - - ret = nv50_disp_chan_init(&mast->base); - if (ret) - return ret; - - /* enable error reporting */ - nv_mask(priv, 0x610090, 0x00000001, 0x00000001); - nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001); - - /* initialise channel for dma command submission */ - nv_wr32(priv, 0x610494, mast->push); - nv_wr32(priv, 0x610498, 0x00010000); - nv_wr32(priv, 0x61049c, 0x00000001); - nv_mask(priv, 0x610490, 0x00000010, 0x00000010); - nv_wr32(priv, 0x640000, 0x00000000); - nv_wr32(priv, 0x610490, 0x01000013); - - /* wait for it to go inactive */ - if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) { - nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490)); - return -EBUSY; - } - - return 0; -} - -static int -nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_dmac *mast = (void *)object; - - /* deactivate channel */ - nv_mask(priv, 0x610490, 0x00000010, 0x00000000); - nv_mask(priv, 0x610490, 0x00000003, 0x00000000); - if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) { - nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490)); - if (suspend) - return -EBUSY; - } - - /* disable error reporting */ - nv_mask(priv, 0x610090, 0x00000001, 0x00000000); - nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000); - - return nv50_disp_chan_fini(&mast->base, suspend); -} - -struct nouveau_ofuncs -nvd0_disp_mast_ofuncs = { - .ctor = nvd0_disp_mast_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nvd0_disp_mast_init, - .fini = nvd0_disp_mast_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO sync channel objects - ******************************************************************************/ - -static int -nvd0_disp_sync_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_sync_class *args = data; - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_dmac *dmac; - int ret; - - if (size < sizeof(*data) || args->head >= priv->head.nr) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 1 + args->head, sizeof(*dmac), - (void **)&dmac); - *pobject = nv_object(dmac); - if (ret) - return ret; - - nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach; - nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach; - return 0; -} - -struct nouveau_ofuncs -nvd0_disp_sync_ofuncs = { - .ctor = nvd0_disp_sync_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nvd0_disp_dmac_init, - .fini = nvd0_disp_dmac_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO overlay channel objects - ******************************************************************************/ - -static int -nvd0_disp_ovly_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_ovly_class *args = data; - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_dmac *dmac; - int ret; - - if (size < sizeof(*data) || args->head >= priv->head.nr) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 5 + args->head, sizeof(*dmac), - (void **)&dmac); - *pobject = nv_object(dmac); - if (ret) - return ret; - - nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach; - nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach; - return 0; -} - -struct nouveau_ofuncs -nvd0_disp_ovly_ofuncs = { - .ctor = nvd0_disp_ovly_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nvd0_disp_dmac_init, - .fini = nvd0_disp_dmac_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO PIO channel base class - ******************************************************************************/ - -static int -nvd0_disp_pioc_create_(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, int chid, - int length, void **pobject) -{ - return nv50_disp_chan_create_(parent, engine, oclass, chid, - length, pobject); -} - -static void -nvd0_disp_pioc_dtor(struct nouveau_object *object) -{ - struct nv50_disp_pioc *pioc = (void *)object; - nv50_disp_chan_destroy(&pioc->base); -} - -static int -nvd0_disp_pioc_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_pioc *pioc = (void *)object; - int chid = pioc->base.chid; - int ret; - - ret = nv50_disp_chan_init(&pioc->base); - if (ret) - return ret; - - /* enable error reporting */ - nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid); - nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); - - /* activate channel */ - nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001); - if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) { - nv_error(pioc, "init: 0x%08x\n", - nv_rd32(priv, 0x610490 + (chid * 0x10))); - return -EBUSY; - } - - return 0; -} - -static int -nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_pioc *pioc = (void *)object; - int chid = pioc->base.chid; - - nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000); - if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) { - nv_error(pioc, "timeout: 0x%08x\n", - nv_rd32(priv, 0x610490 + (chid * 0x10))); - if (suspend) - return -EBUSY; - } - - /* disable error reporting */ - nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000); - nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000); - - return nv50_disp_chan_fini(&pioc->base, suspend); -} - -/******************************************************************************* - * EVO immediate overlay channel objects - ******************************************************************************/ - -static int -nvd0_disp_oimm_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_oimm_class *args = data; - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_pioc *pioc; - int ret; - - if (size < sizeof(*args) || args->head >= priv->head.nr) - return -EINVAL; - - ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head, - sizeof(*pioc), (void **)&pioc); - *pobject = nv_object(pioc); - if (ret) - return ret; - - return 0; -} - -struct nouveau_ofuncs -nvd0_disp_oimm_ofuncs = { - .ctor = nvd0_disp_oimm_ctor, - .dtor = nvd0_disp_pioc_dtor, - .init = nvd0_disp_pioc_init, - .fini = nvd0_disp_pioc_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * EVO cursor channel objects - ******************************************************************************/ - -static int -nvd0_disp_curs_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_curs_class *args = data; - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_pioc *pioc; - int ret; - - if (size < sizeof(*args) || args->head >= priv->head.nr) - return -EINVAL; - - ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head, - sizeof(*pioc), (void **)&pioc); - *pobject = nv_object(pioc); - if (ret) - return ret; - - return 0; -} - -struct nouveau_ofuncs -nvd0_disp_curs_ofuncs = { - .ctor = nvd0_disp_curs_ctor, - .dtor = nvd0_disp_pioc_dtor, - .init = nvd0_disp_pioc_init, - .fini = nvd0_disp_pioc_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, -}; - -/******************************************************************************* - * Base display object - ******************************************************************************/ - -static int -nvd0_disp_base_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_base *base; - int ret; - - ret = nouveau_parent_create(parent, engine, oclass, 0, - priv->sclass, 0, &base); - *pobject = nv_object(base); - if (ret) - return ret; - - return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht); -} - -static void -nvd0_disp_base_dtor(struct nouveau_object *object) -{ - struct nv50_disp_base *base = (void *)object; - nouveau_ramht_ref(NULL, &base->ramht); - nouveau_parent_destroy(&base->base); -} - -static int -nvd0_disp_base_init(struct nouveau_object *object) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_base *base = (void *)object; - int ret, i; - u32 tmp; - - ret = nouveau_parent_init(&base->base); - if (ret) - return ret; - - /* The below segments of code copying values from one register to - * another appear to inform EVO of the display capabilities or - * something similar. - */ - - /* ... CRTC caps */ - for (i = 0; i < priv->head.nr; i++) { - tmp = nv_rd32(priv, 0x616104 + (i * 0x800)); - nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp); - tmp = nv_rd32(priv, 0x616108 + (i * 0x800)); - nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp); - tmp = nv_rd32(priv, 0x61610c + (i * 0x800)); - nv_wr32(priv, 0x6101bc + (i * 0x800), tmp); - } - - /* ... DAC caps */ - for (i = 0; i < priv->dac.nr; i++) { - tmp = nv_rd32(priv, 0x61a000 + (i * 0x800)); - nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp); - } - - /* ... SOR caps */ - for (i = 0; i < priv->sor.nr; i++) { - tmp = nv_rd32(priv, 0x61c000 + (i * 0x800)); - nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp); - } - - /* steal display away from vbios, or something like that */ - if (nv_rd32(priv, 0x6100ac) & 0x00000100) { - nv_wr32(priv, 0x6100ac, 0x00000100); - nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000); - if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) { - nv_error(priv, "timeout acquiring display\n"); - return -EBUSY; - } - } - - /* point at display engine memory area (hash table, objects) */ - nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9); - - /* enable supervisor interrupts, disable everything else */ - nv_wr32(priv, 0x610090, 0x00000000); - nv_wr32(priv, 0x6100a0, 0x00000000); - nv_wr32(priv, 0x6100b0, 0x00000307); - - return 0; -} - -static int -nvd0_disp_base_fini(struct nouveau_object *object, bool suspend) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv50_disp_base *base = (void *)object; - - /* disable all interrupts */ - nv_wr32(priv, 0x6100b0, 0x00000000); - - return nouveau_parent_fini(&base->base, suspend); -} - -struct nouveau_ofuncs -nvd0_disp_base_ofuncs = { - .ctor = nvd0_disp_base_ctor, - .dtor = nvd0_disp_base_dtor, - .init = nvd0_disp_base_init, - .fini = nvd0_disp_base_fini, -}; - -static struct nouveau_oclass -nvd0_disp_base_oclass[] = { - { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds }, - {} +struct nvd0_disp_priv { + struct nouveau_disp base; }; static struct nouveau_oclass nvd0_disp_sclass[] = { - { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, - { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, - { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, - { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, - { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, - {} + {}, }; -/******************************************************************************* - * Display engine implementation - ******************************************************************************/ - -static u16 -exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, - struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_outp *info) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - u16 mask, type, data; - - if (outp < 4) { - type = DCB_OUTPUT_ANALOG; - mask = 0; - } else { - outp -= 4; - switch (ctrl & 0x00000f00) { - case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break; - case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break; - case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break; - case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break; - case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break; - case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; - default: - nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); - return 0x0000; - } - dcb->sorconf.link = mask; - } - - mask = 0x00c0 & (mask << 6); - mask |= 0x0001 << outp; - mask |= 0x0100 << head; - - data = dcb_outp_match(bios, type, mask, ver, hdr, dcb); - if (!data) - return 0x0000; - - return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info); -} - -static bool -exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - struct nvbios_outp info; - struct dcb_output dcb; - u8 ver, hdr, cnt, len; - u16 data; - - data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info); - if (data) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = info.script[id], - .outp = &dcb, - .crtc = head, - .execute = 1, - }; - - return nvbios_exec(&init) == 0; - } - - return false; -} - -static u32 -exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp, - u32 ctrl, int id, u32 pclk) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - struct nvbios_outp info1; - struct nvbios_ocfg info2; - struct dcb_output dcb; - u8 ver, hdr, cnt, len; - u16 data, conf; - - data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1); - if (data == 0x0000) - return false; - - switch (dcb.type) { - case DCB_OUTPUT_TMDS: - conf = (ctrl & 0x00000f00) >> 8; - if (pclk >= 165000) - conf |= 0x0100; - break; - case DCB_OUTPUT_LVDS: - conf = priv->sor.lvdsconf; - break; - case DCB_OUTPUT_DP: - conf = (ctrl & 0x00000f00) >> 8; - break; - case DCB_OUTPUT_ANALOG: - default: - conf = 0x00ff; - break; - } - - data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2); - if (data) { - data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); - if (data) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = data, - .outp = &dcb, - .crtc = head, - .execute = 1, - }; - - if (nvbios_exec(&init)) - return 0x0000; - return conf; - } - } - - return 0x0000; -} - -static void -nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask) -{ - int i; - - for (i = 0; mask && i < 8; i++) { - u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20)); - if (mcc & (1 << head)) - exec_script(priv, head, i, mcc, 1); - } - - nv_wr32(priv, 0x6101d4, 0x00000000); - nv_wr32(priv, 0x6109d4, 0x00000000); - nv_wr32(priv, 0x6101d0, 0x80000000); -} - static void -nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or) -{ - const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020)); - const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300)); - const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; - const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1; - const u32 hoff = (head * 0x800); - const u32 soff = ( or * 0x800); - const u32 loff = (link * 0x080) + soff; - const u32 symbol = 100000; - const u32 TU = 64; - u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000; - u32 clksor = nv_rd32(priv, 0x612300 + soff); - u32 datarate, link_nr, link_bw, bits; - u64 ratio, value; - - if ((conf & 0x3c0) == 0x180) bits = 30; - else if ((conf & 0x3c0) == 0x140) bits = 24; - else bits = 18; - datarate = (pclk * bits) / 8; - - if (dpctrl > 0x00030000) link_nr = 4; - else if (dpctrl > 0x00010000) link_nr = 2; - else link_nr = 1; - - link_bw = (clksor & 0x007c0000) >> 18; - link_bw *= 27000; - - ratio = datarate; - ratio *= symbol; - do_div(ratio, link_nr * link_bw); - - value = (symbol - ratio) * TU; - value *= ratio; - do_div(value, symbol); - do_div(value, symbol); - - value += 5; - value |= 0x08000000; - - nv_wr32(priv, 0x616610 + hoff, value); -} - -static void -nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask) -{ - u32 pclk; - int i; - - for (i = 0; mask && i < 8; i++) { - u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20)); - if (mcc & (1 << head)) - exec_script(priv, head, i, mcc, 2); - } - - pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; - nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask); - if (pclk && (mask & 0x00010000)) { - struct nouveau_clock *clk = nouveau_clock(priv); - clk->pll_set(clk, PLL_VPLL0 + head, pclk); - } - - nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000); - - for (i = 0; mask && i < 8; i++) { - u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg; - if (mcp & (1 << head)) { - if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) { - u32 addr, mask, data = 0x00000000; - if (i < 4) { - addr = 0x612280 + ((i - 0) * 0x800); - mask = 0xffffffff; - } else { - switch (mcp & 0x00000f00) { - case 0x00000800: - case 0x00000900: - nvd0_display_unk2_calc_tu(priv, head, i - 4); - break; - default: - break; - } - - addr = 0x612300 + ((i - 4) * 0x800); - mask = 0x00000707; - if (cfg & 0x00000100) - data = 0x00000101; - } - nv_mask(priv, addr, mask, data); - } - break; - } - } - - nv_wr32(priv, 0x6101d4, 0x00000000); - nv_wr32(priv, 0x6109d4, 0x00000000); - nv_wr32(priv, 0x6101d0, 0x80000000); -} - -static void -nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask) -{ - int pclk, i; - - pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; - - for (i = 0; mask && i < 8; i++) { - u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)); - if (mcp & (1 << head)) - exec_clkcmp(priv, head, i, mcp, 1, pclk); - } - - nv_wr32(priv, 0x6101d4, 0x00000000); - nv_wr32(priv, 0x6109d4, 0x00000000); - nv_wr32(priv, 0x6101d0, 0x80000000); -} - -static void -nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) +nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc) { struct nouveau_bar *bar = nouveau_bar(priv); struct nouveau_disp *disp = &priv->base; @@ -851,71 +65,14 @@ nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) disp->vblank.notify(disp->vblank.data, crtc); } -void +static void nvd0_disp_intr(struct nouveau_subdev *subdev) { - struct nv50_disp_priv *priv = (void *)subdev; + struct nvd0_disp_priv *priv = (void *)subdev; u32 intr = nv_rd32(priv, 0x610088); int i; - if (intr & 0x00000001) { - u32 stat = nv_rd32(priv, 0x61008c); - nv_wr32(priv, 0x61008c, stat); - intr &= ~0x00000001; - } - - if (intr & 0x00000002) { - u32 stat = nv_rd32(priv, 0x61009c); - int chid = ffs(stat) - 1; - if (chid >= 0) { - u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12)); - u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12)); - u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12)); - - nv_error(priv, "chid %d mthd 0x%04x data 0x%08x " - "0x%08x 0x%08x\n", - chid, (mthd & 0x0000ffc), data, mthd, unkn); - nv_wr32(priv, 0x61009c, (1 << chid)); - nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000); - } - - intr &= ~0x00000002; - } - - if (intr & 0x00100000) { - u32 stat = nv_rd32(priv, 0x6100ac); - u32 mask = 0, crtc = ~0; - - while (!mask && ++crtc < priv->head.nr) - mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800)); - - if (stat & 0x00000001) { - nv_wr32(priv, 0x6100ac, 0x00000001); - nvd0_display_unk1_handler(priv, crtc, mask); - stat &= ~0x00000001; - } - - if (stat & 0x00000002) { - nv_wr32(priv, 0x6100ac, 0x00000002); - nvd0_display_unk2_handler(priv, crtc, mask); - stat &= ~0x00000002; - } - - if (stat & 0x00000004) { - nv_wr32(priv, 0x6100ac, 0x00000004); - nvd0_display_unk4_handler(priv, crtc, mask); - stat &= ~0x00000004; - } - - if (stat) { - nv_info(priv, "unknown intr24 0x%08x\n", stat); - nv_wr32(priv, 0x6100ac, stat); - } - - intr &= ~0x00100000; - } - - for (i = 0; i < priv->head.nr; i++) { + for (i = 0; i < 4; i++) { u32 mask = 0x01000000 << i; if (mask & intr) { u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800)); @@ -929,10 +86,10 @@ nvd0_disp_intr(struct nouveau_subdev *subdev) static int nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) { - struct nv50_disp_priv *priv; + struct nvd0_disp_priv *priv; int ret; ret = nouveau_disp_create(parent, engine, oclass, "PDISP", @@ -941,23 +98,8 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - nv_engine(priv)->sclass = nvd0_disp_base_oclass; - nv_engine(priv)->cclass = &nv50_disp_cclass; + nv_engine(priv)->sclass = nvd0_disp_sclass; nv_subdev(priv)->intr = nvd0_disp_intr; - priv->sclass = nvd0_disp_sclass; - priv->head.nr = nv_rd32(priv, 0x022448); - priv->dac.nr = 3; - priv->sor.nr = 4; - priv->dac.power = nv50_dac_power; - priv->dac.sense = nv50_dac_sense; - priv->sor.power = nv50_sor_power; - priv->sor.hda_eld = nvd0_hda_eld; - priv->sor.hdmi = nvd0_hdmi_ctrl; - priv->sor.dp_train = nvd0_sor_dp_train; - priv->sor.dp_train_init = nv94_sor_dp_train_init; - priv->sor.dp_train_fini = nv94_sor_dp_train_fini; - priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl; - priv->sor.dp_drvctl = nvd0_sor_dp_drvctl; INIT_LIST_HEAD(&priv->base.vblank.list); spin_lock_init(&priv->base.vblank.lock); @@ -966,7 +108,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass nvd0_disp_oclass = { - .handle = NV_ENGINE(DISP, 0x90), + .handle = NV_ENGINE(DISP, 0xd0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvd0_disp_ctor, .dtor = _nouveau_disp_dtor, diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c deleted file mode 100644 index 259537c4587e..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include - -#include "nv50.h" - -static struct nouveau_oclass -nve0_disp_sclass[] = { - { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, - { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, - { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, - { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, - { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, - {} -}; - -static struct nouveau_oclass -nve0_disp_base_oclass[] = { - { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds }, - {} -}; - -static int -nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv; - int ret; - - ret = nouveau_disp_create(parent, engine, oclass, "PDISP", - "display", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_engine(priv)->sclass = nve0_disp_base_oclass; - nv_engine(priv)->cclass = &nv50_disp_cclass; - nv_subdev(priv)->intr = nvd0_disp_intr; - priv->sclass = nve0_disp_sclass; - priv->head.nr = nv_rd32(priv, 0x022448); - priv->dac.nr = 3; - priv->sor.nr = 4; - priv->dac.power = nv50_dac_power; - priv->dac.sense = nv50_dac_sense; - priv->sor.power = nv50_sor_power; - priv->sor.hda_eld = nvd0_hda_eld; - priv->sor.hdmi = nvd0_hdmi_ctrl; - priv->sor.dp_train = nvd0_sor_dp_train; - priv->sor.dp_train_init = nv94_sor_dp_train_init; - priv->sor.dp_train_fini = nv94_sor_dp_train_fini; - priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl; - priv->sor.dp_drvctl = nvd0_sor_dp_drvctl; - - INIT_LIST_HEAD(&priv->base.vblank.list); - spin_lock_init(&priv->base.vblank.lock); - return 0; -} - -struct nouveau_oclass -nve0_disp_oclass = { - .handle = NV_ENGINE(DISP, 0x91), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nve0_disp_ctor, - .dtor = _nouveau_disp_dtor, - .init = _nouveau_disp_init, - .fini = _nouveau_disp_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c deleted file mode 100644 index 39b6b67732d0..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include -#include -#include - -#include "nv50.h" - -int -nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data) -{ - const u32 stat = data & NV50_DISP_SOR_PWR_STATE; - const u32 soff = (or * 0x800); - nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); - nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat); - nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); - nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000); - return 0; -} - -int -nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nouveau_bios *bios = nouveau_bios(priv); - const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12; - const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; - const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2; - const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); - const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or); - struct dcb_output outp; - u8 ver, hdr; - u32 data; - int ret = -EINVAL; - - if (size < sizeof(u32)) - return -EINVAL; - data = *(u32 *)args; - - if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp)) - return -ENODEV; - - switch (mthd & ~0x3f) { - case NV50_DISP_SOR_PWR: - ret = priv->sor.power(priv, or, data); - break; - case NVA3_DISP_SOR_HDA_ELD: - ret = priv->sor.hda_eld(priv, or, args, size); - break; - case NV84_DISP_SOR_HDMI_PWR: - ret = priv->sor.hdmi(priv, head, or, data); - break; - case NV50_DISP_SOR_LVDS_SCRIPT: - priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID; - ret = 0; - break; - case NV94_DISP_SOR_DP_TRAIN: - switch (data & NV94_DISP_SOR_DP_TRAIN_OP) { - case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN: - ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp); - break; - case NV94_DISP_SOR_DP_TRAIN_OP_INIT: - ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp); - break; - case NV94_DISP_SOR_DP_TRAIN_OP_FINI: - ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp); - break; - default: - break; - } - break; - case NV94_DISP_SOR_DP_LNKCTL: - ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp); - break; - case NV94_DISP_SOR_DP_DRVCTL(0): - case NV94_DISP_SOR_DP_DRVCTL(1): - case NV94_DISP_SOR_DP_DRVCTL(2): - case NV94_DISP_SOR_DP_DRVCTL(3): - ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6, - type, mask, data, &outp); - break; - default: - BUG_ON(1); - } - - return ret; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c deleted file mode 100644 index f6edd009762e..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include -#include -#include -#include - -#include "nv50.h" - -static inline u32 -nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane) -{ - static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */ - static const u8 nv94[] = { 16, 8, 0, 24 }; - if (nv_device(priv)->chipset == 0xaf) - return nvaf[lane]; - return nv94[lane]; -} - -int -nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head, - u16 type, u16 mask, u32 data, struct dcb_output *dcbo) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - struct nvbios_dpout info; - u8 ver, hdr, cnt, len; - u16 outp; - - outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info); - if (outp) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .outp = dcbo, - .crtc = head, - .execute = 1, - }; - - if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON) - init.offset = info.script[2]; - else - init.offset = info.script[3]; - nvbios_exec(&init); - - init.offset = info.script[0]; - nvbios_exec(&init); - } - - return 0; -} - -int -nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head, - u16 type, u16 mask, u32 data, struct dcb_output *dcbo) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - struct nvbios_dpout info; - u8 ver, hdr, cnt, len; - u16 outp; - - outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info); - if (outp) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = info.script[1], - .outp = dcbo, - .crtc = head, - .execute = 1, - }; - - nvbios_exec(&init); - } - - return 0; -} - -int -nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link, - u16 type, u16 mask, u32 data, struct dcb_output *info) -{ - const u32 loff = (or * 0x800) + (link * 0x80); - const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN); - nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24); - return 0; -} - -int -nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head, - u16 type, u16 mask, u32 data, struct dcb_output *dcbo) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - const u32 loff = (or * 0x800) + (link * 0x80); - const u32 soff = (or * 0x800); - u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8; - u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT); - u32 dpctrl = 0x00000000; - u32 clksor = 0x00000000; - u32 outp, lane = 0; - u8 ver, hdr, cnt, len; - struct nvbios_dpout info; - int i; - - /* -> 10Khz units */ - link_bw *= 2700; - - outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info); - if (outp && info.lnkcmp) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = 0x0000, - .outp = dcbo, - .crtc = head, - .execute = 1, - }; - - while (link_bw < nv_ro16(bios, info.lnkcmp)) - info.lnkcmp += 4; - init.offset = nv_ro16(bios, info.lnkcmp + 2); - - nvbios_exec(&init); - } - - dpctrl |= ((1 << link_nr) - 1) << 16; - if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH) - dpctrl |= 0x00004000; - if (link_bw > 16200) - clksor |= 0x00040000; - - for (i = 0; i < link_nr; i++) - lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3); - - nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor); - nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl); - nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane); - return 0; -} - -int -nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane, - u16 type, u16 mask, u32 data, struct dcb_output *dcbo) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - const u32 loff = (or * 0x800) + (link * 0x80); - const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8; - const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE); - u32 addr, shift = nv94_sor_dp_lane_map(priv, lane); - u8 ver, hdr, cnt, len; - struct nvbios_dpout outp; - struct nvbios_dpcfg ocfg; - - addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp); - if (!addr) - return -ENODEV; - - addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg); - if (!addr) - return -EINVAL; - - nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift); - nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift); - nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8); - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c deleted file mode 100644 index c37ce7e29f5d..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include - -#include -#include -#include -#include - -#include "nv50.h" - -static inline u32 -nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane) -{ - static const u8 nvd0[] = { 16, 8, 0, 24 }; - return nvd0[lane]; -} - -int -nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link, - u16 type, u16 mask, u32 data, struct dcb_output *info) -{ - const u32 loff = (or * 0x800) + (link * 0x80); - const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN); - nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt); - return 0; -} - -int -nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head, - u16 type, u16 mask, u32 data, struct dcb_output *dcbo) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - const u32 loff = (or * 0x800) + (link * 0x80); - const u32 soff = (or * 0x800); - const u8 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8; - const u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT); - u32 dpctrl = 0x00000000; - u32 clksor = 0x00000000; - u32 outp, lane = 0; - u8 ver, hdr, cnt, len; - struct nvbios_dpout info; - int i; - - outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info); - if (outp && info.lnkcmp) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = 0x0000, - .outp = dcbo, - .crtc = head, - .execute = 1, - }; - - while (nv_ro08(bios, info.lnkcmp) < link_bw) - info.lnkcmp += 3; - init.offset = nv_ro16(bios, info.lnkcmp + 1); - - nvbios_exec(&init); - } - - clksor |= link_bw << 18; - dpctrl |= ((1 << link_nr) - 1) << 16; - if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH) - dpctrl |= 0x00004000; - - for (i = 0; i < link_nr; i++) - lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3); - - nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor); - nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl); - nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane); - return 0; -} - -int -nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane, - u16 type, u16 mask, u32 data, struct dcb_output *dcbo) -{ - struct nouveau_bios *bios = nouveau_bios(priv); - const u32 loff = (or * 0x800) + (link * 0x80); - const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8; - const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE); - u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane); - u8 ver, hdr, cnt, len; - struct nvbios_dpout outp; - struct nvbios_dpcfg ocfg; - - addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp); - if (!addr) - return -ENODEV; - - addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg); - if (!addr) - return -EINVAL; - - nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift); - nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift); - nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8); - nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000); - return 0; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c index 5103e88d1877..e1f013d39768 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c @@ -28,39 +28,37 @@ #include #include -static int -nouveau_dmaobj_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) +int +nouveau_dmaobj_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, + void *data, u32 size, int len, void **pobject) { - struct nouveau_dmaeng *dmaeng = (void *)engine; - struct nouveau_dmaobj *dmaobj; - struct nouveau_gpuobj *gpuobj; struct nv_dma_class *args = data; + struct nouveau_dmaobj *object; int ret; if (size < sizeof(*args)) return -EINVAL; - ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj); - *pobject = nv_object(dmaobj); + ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject); + object = *pobject; if (ret) return ret; switch (args->flags & NV_DMA_TARGET_MASK) { case NV_DMA_TARGET_VM: - dmaobj->target = NV_MEM_TARGET_VM; + object->target = NV_MEM_TARGET_VM; break; case NV_DMA_TARGET_VRAM: - dmaobj->target = NV_MEM_TARGET_VRAM; + object->target = NV_MEM_TARGET_VRAM; break; case NV_DMA_TARGET_PCI: - dmaobj->target = NV_MEM_TARGET_PCI; + object->target = NV_MEM_TARGET_PCI; break; case NV_DMA_TARGET_PCI_US: case NV_DMA_TARGET_AGP: - dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP; + object->target = NV_MEM_TARGET_PCI_NOSNOOP; break; default: return -EINVAL; @@ -68,53 +66,22 @@ nouveau_dmaobj_ctor(struct nouveau_object *parent, switch (args->flags & NV_DMA_ACCESS_MASK) { case NV_DMA_ACCESS_VM: - dmaobj->access = NV_MEM_ACCESS_VM; + object->access = NV_MEM_ACCESS_VM; break; case NV_DMA_ACCESS_RD: - dmaobj->access = NV_MEM_ACCESS_RO; + object->access = NV_MEM_ACCESS_RO; break; case NV_DMA_ACCESS_WR: - dmaobj->access = NV_MEM_ACCESS_WO; + object->access = NV_MEM_ACCESS_WO; break; case NV_DMA_ACCESS_RDWR: - dmaobj->access = NV_MEM_ACCESS_RW; + object->access = NV_MEM_ACCESS_RW; break; default: return -EINVAL; } - dmaobj->start = args->start; - dmaobj->limit = args->limit; - dmaobj->conf0 = args->conf0; - - switch (nv_mclass(parent)) { - case NV_DEVICE_CLASS: - /* delayed, or no, binding */ - break; - default: - ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj); - if (ret == 0) { - nouveau_object_ref(NULL, pobject); - *pobject = nv_object(gpuobj); - } - break; - } - - return ret; + object->start = args->start; + object->limit = args->limit; + return 0; } - -static struct nouveau_ofuncs -nouveau_dmaobj_ofuncs = { - .ctor = nouveau_dmaobj_ctor, - .dtor = nouveau_object_destroy, - .init = nouveau_object_init, - .fini = nouveau_object_fini, -}; - -struct nouveau_oclass -nouveau_dmaobj_sclass[] = { - { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, - { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, - { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, - {} -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c index 027d8217c0fa..9f4cc2f31994 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c @@ -34,6 +34,10 @@ struct nv04_dmaeng_priv { struct nouveau_dmaeng base; }; +struct nv04_dmaobj_priv { + struct nouveau_dmaobj base; +}; + static int nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng, struct nouveau_object *parent, @@ -49,18 +53,6 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng, u32 length = dmaobj->limit - dmaobj->start; int ret; - if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { - switch (nv_mclass(parent->parent)) { - case NV03_CHANNEL_DMA_CLASS: - case NV10_CHANNEL_DMA_CLASS: - case NV17_CHANNEL_DMA_CLASS: - case NV40_CHANNEL_DMA_CLASS: - break; - default: - return -EINVAL; - } - } - if (dmaobj->target == NV_MEM_TARGET_VM) { if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) { struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0]; @@ -113,6 +105,56 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng, return ret; } +static int +nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_dmaeng *dmaeng = (void *)engine; + struct nv04_dmaobj_priv *dmaobj; + struct nouveau_gpuobj *gpuobj; + int ret; + + ret = nouveau_dmaobj_create(parent, engine, oclass, + data, size, &dmaobj); + *pobject = nv_object(dmaobj); + if (ret) + return ret; + + switch (nv_mclass(parent)) { + case NV_DEVICE_CLASS: + break; + case NV03_CHANNEL_DMA_CLASS: + case NV10_CHANNEL_DMA_CLASS: + case NV17_CHANNEL_DMA_CLASS: + case NV40_CHANNEL_DMA_CLASS: + ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj); + nouveau_object_ref(NULL, pobject); + *pobject = nv_object(gpuobj); + break; + default: + return -EINVAL; + } + + return ret; +} + +static struct nouveau_ofuncs +nv04_dmaobj_ofuncs = { + .ctor = nv04_dmaobj_ctor, + .dtor = _nouveau_dmaobj_dtor, + .init = _nouveau_dmaobj_init, + .fini = _nouveau_dmaobj_fini, +}; + +static struct nouveau_oclass +nv04_dmaobj_sclass[] = { + { 0x0002, &nv04_dmaobj_ofuncs }, + { 0x0003, &nv04_dmaobj_ofuncs }, + { 0x003d, &nv04_dmaobj_ofuncs }, + {} +}; + static int nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -126,7 +168,7 @@ nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - nv_engine(priv)->sclass = nouveau_dmaobj_sclass; + priv->base.base.sclass = nv04_dmaobj_sclass; priv->base.bind = nv04_dmaobj_bind; return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c index 750183f7c057..045d2565e289 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c @@ -32,74 +32,36 @@ struct nv50_dmaeng_priv { struct nouveau_dmaeng base; }; +struct nv50_dmaobj_priv { + struct nouveau_dmaobj base; +}; + static int nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng, struct nouveau_object *parent, struct nouveau_dmaobj *dmaobj, struct nouveau_gpuobj **pgpuobj) { - u32 flags0 = nv_mclass(dmaobj); - u32 flags5 = 0x00000000; + u32 flags = nv_mclass(dmaobj); int ret; - if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { - switch (nv_mclass(parent->parent)) { - case NV50_CHANNEL_DMA_CLASS: - case NV84_CHANNEL_DMA_CLASS: - case NV50_CHANNEL_IND_CLASS: - case NV84_CHANNEL_IND_CLASS: - case NV50_DISP_MAST_CLASS: - case NV84_DISP_MAST_CLASS: - case NV94_DISP_MAST_CLASS: - case NVA0_DISP_MAST_CLASS: - case NVA3_DISP_MAST_CLASS: - case NV50_DISP_SYNC_CLASS: - case NV84_DISP_SYNC_CLASS: - case NV94_DISP_SYNC_CLASS: - case NVA0_DISP_SYNC_CLASS: - case NVA3_DISP_SYNC_CLASS: - case NV50_DISP_OVLY_CLASS: - case NV84_DISP_OVLY_CLASS: - case NV94_DISP_OVLY_CLASS: - case NVA0_DISP_OVLY_CLASS: - case NVA3_DISP_OVLY_CLASS: - break; - default: - return -EINVAL; - } - } - - if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) { - if (dmaobj->target == NV_MEM_TARGET_VM) { - dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM; - dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM; - dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM; - dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM; - } else { - dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US; - dmaobj->conf0 |= NV50_DMA_CONF0_PART_256; - dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE; - dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR; - } - } - - flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22; - flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22; - flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV); - flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART); - switch (dmaobj->target) { case NV_MEM_TARGET_VM: - flags0 |= 0x00000000; + flags |= 0x00000000; + flags |= 0x60000000; /* COMPRESSION_USEVM */ + flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */ break; case NV_MEM_TARGET_VRAM: - flags0 |= 0x00010000; + flags |= 0x00010000; + flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */ break; case NV_MEM_TARGET_PCI: - flags0 |= 0x00020000; + flags |= 0x00020000; + flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */ break; case NV_MEM_TARGET_PCI_NOSNOOP: - flags0 |= 0x00030000; + flags |= 0x00030000; + flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */ break; default: return -EINVAL; @@ -109,28 +71,78 @@ nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng, case NV_MEM_ACCESS_VM: break; case NV_MEM_ACCESS_RO: - flags0 |= 0x00040000; + flags |= 0x00040000; break; case NV_MEM_ACCESS_WO: case NV_MEM_ACCESS_RW: - flags0 |= 0x00080000; + flags |= 0x00080000; break; } ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); if (ret == 0) { - nv_wo32(*pgpuobj, 0x00, flags0); + nv_wo32(*pgpuobj, 0x00, flags); nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | upper_32_bits(dmaobj->start)); nv_wo32(*pgpuobj, 0x10, 0x00000000); - nv_wo32(*pgpuobj, 0x14, flags5); + nv_wo32(*pgpuobj, 0x14, 0x00000000); } return ret; } +static int +nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_dmaeng *dmaeng = (void *)engine; + struct nv50_dmaobj_priv *dmaobj; + struct nouveau_gpuobj *gpuobj; + int ret; + + ret = nouveau_dmaobj_create(parent, engine, oclass, + data, size, &dmaobj); + *pobject = nv_object(dmaobj); + if (ret) + return ret; + + switch (nv_mclass(parent)) { + case NV_DEVICE_CLASS: + break; + case NV50_CHANNEL_DMA_CLASS: + case NV84_CHANNEL_DMA_CLASS: + case NV50_CHANNEL_IND_CLASS: + case NV84_CHANNEL_IND_CLASS: + ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj); + nouveau_object_ref(NULL, pobject); + *pobject = nv_object(gpuobj); + break; + default: + return -EINVAL; + } + + return ret; +} + +static struct nouveau_ofuncs +nv50_dmaobj_ofuncs = { + .ctor = nv50_dmaobj_ctor, + .dtor = _nouveau_dmaobj_dtor, + .init = _nouveau_dmaobj_init, + .fini = _nouveau_dmaobj_fini, +}; + +static struct nouveau_oclass +nv50_dmaobj_sclass[] = { + { 0x0002, &nv50_dmaobj_ofuncs }, + { 0x0003, &nv50_dmaobj_ofuncs }, + { 0x003d, &nv50_dmaobj_ofuncs }, + {} +}; + static int nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -144,7 +156,7 @@ nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - nv_engine(priv)->sclass = nouveau_dmaobj_sclass; + priv->base.base.sclass = nv50_dmaobj_sclass; priv->base.bind = nv50_dmaobj_bind; return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c index cd3970d03b80..5baa08695535 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c @@ -22,9 +22,7 @@ * Authors: Ben Skeggs */ -#include #include -#include #include #include @@ -33,85 +31,44 @@ struct nvc0_dmaeng_priv { struct nouveau_dmaeng base; }; +struct nvc0_dmaobj_priv { + struct nouveau_dmaobj base; +}; + static int -nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, - struct nouveau_object *parent, - struct nouveau_dmaobj *dmaobj, - struct nouveau_gpuobj **pgpuobj) +nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) { - u32 flags0 = nv_mclass(dmaobj); - u32 flags5 = 0x00000000; + struct nvc0_dmaobj_priv *dmaobj; int ret; - if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { - switch (nv_mclass(parent->parent)) { - case NVA3_DISP_MAST_CLASS: - case NVA3_DISP_SYNC_CLASS: - case NVA3_DISP_OVLY_CLASS: - break; - default: - return -EINVAL; - } - } else - return 0; - - if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) { - if (dmaobj->target == NV_MEM_TARGET_VM) { - dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM; - dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM; - } else { - dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US; - dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR; - dmaobj->conf0 |= 0x00020000; - } - } - - flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22; - flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV); - flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN); + ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj); + *pobject = nv_object(dmaobj); + if (ret) + return ret; - switch (dmaobj->target) { - case NV_MEM_TARGET_VM: - flags0 |= 0x00000000; - break; - case NV_MEM_TARGET_VRAM: - flags0 |= 0x00010000; - break; - case NV_MEM_TARGET_PCI: - flags0 |= 0x00020000; - break; - case NV_MEM_TARGET_PCI_NOSNOOP: - flags0 |= 0x00030000; - break; - default: + if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start) return -EINVAL; - } - switch (dmaobj->access) { - case NV_MEM_ACCESS_VM: - break; - case NV_MEM_ACCESS_RO: - flags0 |= 0x00040000; - break; - case NV_MEM_ACCESS_WO: - case NV_MEM_ACCESS_RW: - flags0 |= 0x00080000; - break; - } + return 0; +} - ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); - if (ret == 0) { - nv_wo32(*pgpuobj, 0x00, flags0); - nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); - nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); - nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | - upper_32_bits(dmaobj->start)); - nv_wo32(*pgpuobj, 0x10, 0x00000000); - nv_wo32(*pgpuobj, 0x14, flags5); - } +static struct nouveau_ofuncs +nvc0_dmaobj_ofuncs = { + .ctor = nvc0_dmaobj_ctor, + .dtor = _nouveau_dmaobj_dtor, + .init = _nouveau_dmaobj_init, + .fini = _nouveau_dmaobj_fini, +}; - return ret; -} +static struct nouveau_oclass +nvc0_dmaobj_sclass[] = { + { 0x0002, &nvc0_dmaobj_ofuncs }, + { 0x0003, &nvc0_dmaobj_ofuncs }, + { 0x003d, &nvc0_dmaobj_ofuncs }, + {} +}; static int nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, @@ -126,8 +83,7 @@ nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - nv_engine(priv)->sclass = nouveau_dmaobj_sclass; - priv->base.bind = nvc0_dmaobj_bind; + priv->base.base.sclass = nvc0_dmaobj_sclass; return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c deleted file mode 100644 index d1528752980c..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include -#include - -#include -#include - -struct nvd0_dmaeng_priv { - struct nouveau_dmaeng base; -}; - -static int -nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, - struct nouveau_object *parent, - struct nouveau_dmaobj *dmaobj, - struct nouveau_gpuobj **pgpuobj) -{ - u32 flags0 = 0x00000000; - int ret; - - if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { - switch (nv_mclass(parent->parent)) { - case NVD0_DISP_MAST_CLASS: - case NVD0_DISP_SYNC_CLASS: - case NVD0_DISP_OVLY_CLASS: - case NVE0_DISP_MAST_CLASS: - case NVE0_DISP_SYNC_CLASS: - case NVE0_DISP_OVLY_CLASS: - break; - default: - return -EINVAL; - } - } else - return 0; - - if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) { - if (dmaobj->target == NV_MEM_TARGET_VM) { - dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM; - dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP; - } else { - dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR; - dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP; - } - } - - flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20; - flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4; - - switch (dmaobj->target) { - case NV_MEM_TARGET_VRAM: - flags0 |= 0x00000009; - break; - default: - return -EINVAL; - break; - } - - ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); - if (ret == 0) { - nv_wo32(*pgpuobj, 0x00, flags0); - nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8); - nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8); - nv_wo32(*pgpuobj, 0x0c, 0x00000000); - nv_wo32(*pgpuobj, 0x10, 0x00000000); - nv_wo32(*pgpuobj, 0x14, 0x00000000); - } - - return ret; -} - -static int -nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nvd0_dmaeng_priv *priv; - int ret; - - ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_engine(priv)->sclass = nouveau_dmaobj_sclass; - priv->base.bind = nvd0_dmaobj_bind; - return 0; -} - -struct nouveau_oclass -nvd0_dmaeng_oclass = { - .handle = NV_ENGINE(DMAOBJ, 0xd0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nvd0_dmaeng_ctor, - .dtor = _nouveau_dmaeng_dtor, - .init = _nouveau_dmaeng_init, - .fini = _nouveau_dmaeng_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/base.c index c2b9db335816..bbb43c67c2ae 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/base.c @@ -24,7 +24,6 @@ #include #include -#include #include #include @@ -34,7 +33,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, int bar, u32 addr, u32 size, u32 pushbuf, - u64 engmask, int len, void **ptr) + u32 engmask, int len, void **ptr) { struct nouveau_device *device = nv_device(engine); struct nouveau_fifo *priv = (void *)engine; @@ -57,16 +56,18 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent, dmaeng = (void *)chan->pushdma->base.engine; switch (chan->pushdma->base.oclass->handle) { - case NV_DMA_FROM_MEMORY_CLASS: - case NV_DMA_IN_MEMORY_CLASS: + case 0x0002: + case 0x003d: break; default: return -EINVAL; } - ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu); - if (ret) - return ret; + if (dmaeng->bind) { + ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu); + if (ret) + return ret; + } /* find a free fifo channel */ spin_lock_irqsave(&priv->lock, flags); @@ -118,14 +119,14 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object) } u32 -_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr) +_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr) { struct nouveau_fifo_chan *chan = (void *)object; return ioread32_native(chan->user + addr); } void -_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data) +_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data) { struct nouveau_fifo_chan *chan = (void *)object; iowrite32_native(data, chan->user + addr); diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c index a47a8548f9e0..ea76e3e8c9c2 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c @@ -126,9 +126,9 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 0x10000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR), &chan); + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR), &chan); *pobject = nv_object(chan); if (ret) return ret; @@ -440,7 +440,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) } if (!nv04_fifo_swmthd(priv, chid, mthd, data)) { - nv_error(priv, "CACHE_ERROR - Ch %d/%d " + nv_info(priv, "CACHE_ERROR - Ch %d/%d " "Mthd 0x%04x Data 0x%08x\n", chid, (mthd >> 13) & 7, mthd & 0x1ffc, data); @@ -476,7 +476,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) u32 ib_get = nv_rd32(priv, 0x003334); u32 ib_put = nv_rd32(priv, 0x003330); - nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x " + nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x " "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " "State 0x%08x (err: %s) Push 0x%08x\n", chid, ho_get, dma_get, ho_put, @@ -494,7 +494,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) nv_wr32(priv, 0x003334, ib_put); } } else { - nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x " + nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x " "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n", chid, dma_get, dma_put, state, nv_dma_state_err(state), push); @@ -525,13 +525,14 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) if (device->card_type == NV_50) { if (status & 0x00000010) { + nv50_fb_trap(nouveau_fb(priv), 1); status &= ~0x00000010; nv_wr32(priv, 0x002100, 0x00000010); } } if (status) { - nv_warn(priv, "unknown intr 0x%08x, ch %d\n", + nv_info(priv, "unknown intr 0x%08x, ch %d\n", status, chid); nv_wr32(priv, NV03_PFIFO_INTR_0, status); status = 0; @@ -541,7 +542,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) } if (status) { - nv_error(priv, "still angry after %d spins, halt\n", cnt); + nv_info(priv, "still angry after %d spins, halt\n", cnt); nv_wr32(priv, 0x002140, 0); nv_wr32(priv, 0x000140, 0); } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c index 2c927c1d173b..4ba75422b89d 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c @@ -69,9 +69,9 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 0x10000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR), &chan); + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR), &chan); *pobject = nv_object(chan); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c index a9cb51d38c57..b96e6b0ae2b1 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c @@ -74,10 +74,10 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 0x10000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR) | - (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */ + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR) | + (1 << NVDEV_ENGINE_MPEG), /* NV31- */ &chan); *pobject = nv_object(chan); if (ret) diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c index 2b1f91721225..559c3b4e1b86 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c @@ -192,10 +192,10 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 0x1000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR) | - (1ULL << NVDEV_ENGINE_MPEG), &chan); + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR) | + (1 << NVDEV_ENGINE_MPEG), &chan); *pobject = nv_object(chan); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c index bd096364f680..536e7634a00d 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c @@ -112,6 +112,14 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend, return -EINVAL; } + nv_wo32(base->eng, addr + 0x00, 0x00000000); + nv_wo32(base->eng, addr + 0x04, 0x00000000); + nv_wo32(base->eng, addr + 0x08, 0x00000000); + nv_wo32(base->eng, addr + 0x0c, 0x00000000); + nv_wo32(base->eng, addr + 0x10, 0x00000000); + nv_wo32(base->eng, addr + 0x14, 0x00000000); + bar->flush(bar); + /* HW bug workaround: * * PFIFO will hang forever if the connected engines don't report @@ -133,18 +141,8 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend, if (suspend) ret = -EBUSY; } - nv_wr32(priv, 0x00b860, me); - - if (ret == 0) { - nv_wo32(base->eng, addr + 0x00, 0x00000000); - nv_wo32(base->eng, addr + 0x04, 0x00000000); - nv_wo32(base->eng, addr + 0x08, 0x00000000); - nv_wo32(base->eng, addr + 0x0c, 0x00000000); - nv_wo32(base->eng, addr + 0x10, 0x00000000); - nv_wo32(base->eng, addr + 0x14, 0x00000000); - bar->flush(bar); - } + nv_wr32(priv, 0x00b860, me); return ret; } @@ -196,10 +194,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 0x2000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR) | - (1ULL << NVDEV_ENGINE_MPEG), &chan); + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR) | + (1 << NVDEV_ENGINE_MPEG), &chan); *pobject = nv_object(chan); if (ret) return ret; @@ -249,10 +247,10 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 0x2000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR) | - (1ULL << NVDEV_ENGINE_MPEG), &chan); + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR) | + (1 << NVDEV_ENGINE_MPEG), &chan); *pobject = nv_object(chan); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c index 1eb1c512f503..b4fd26d8f166 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c @@ -95,6 +95,14 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend, return -EINVAL; } + nv_wo32(base->eng, addr + 0x00, 0x00000000); + nv_wo32(base->eng, addr + 0x04, 0x00000000); + nv_wo32(base->eng, addr + 0x08, 0x00000000); + nv_wo32(base->eng, addr + 0x0c, 0x00000000); + nv_wo32(base->eng, addr + 0x10, 0x00000000); + nv_wo32(base->eng, addr + 0x14, 0x00000000); + bar->flush(bar); + save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn); nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12); done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff); @@ -104,14 +112,6 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend, if (suspend) return -EBUSY; } - - nv_wo32(base->eng, addr + 0x00, 0x00000000); - nv_wo32(base->eng, addr + 0x04, 0x00000000); - nv_wo32(base->eng, addr + 0x08, 0x00000000); - nv_wo32(base->eng, addr + 0x0c, 0x00000000); - nv_wo32(base->eng, addr + 0x10, 0x00000000); - nv_wo32(base->eng, addr + 0x14, 0x00000000); - bar->flush(bar); return 0; } @@ -163,17 +163,17 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 0x2000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR) | - (1ULL << NVDEV_ENGINE_MPEG) | - (1ULL << NVDEV_ENGINE_ME) | - (1ULL << NVDEV_ENGINE_VP) | - (1ULL << NVDEV_ENGINE_CRYPT) | - (1ULL << NVDEV_ENGINE_BSP) | - (1ULL << NVDEV_ENGINE_PPP) | - (1ULL << NVDEV_ENGINE_COPY0) | - (1ULL << NVDEV_ENGINE_UNK1C1), &chan); + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR) | + (1 << NVDEV_ENGINE_MPEG) | + (1 << NVDEV_ENGINE_ME) | + (1 << NVDEV_ENGINE_VP) | + (1 << NVDEV_ENGINE_CRYPT) | + (1 << NVDEV_ENGINE_BSP) | + (1 << NVDEV_ENGINE_PPP) | + (1 << NVDEV_ENGINE_COPY0) | + (1 << NVDEV_ENGINE_UNK1C1), &chan); *pobject = nv_object(chan); if (ret) return ret; @@ -225,17 +225,17 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 0x2000, args->pushbuf, - (1ULL << NVDEV_ENGINE_DMAOBJ) | - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR) | - (1ULL << NVDEV_ENGINE_MPEG) | - (1ULL << NVDEV_ENGINE_ME) | - (1ULL << NVDEV_ENGINE_VP) | - (1ULL << NVDEV_ENGINE_CRYPT) | - (1ULL << NVDEV_ENGINE_BSP) | - (1ULL << NVDEV_ENGINE_PPP) | - (1ULL << NVDEV_ENGINE_COPY0) | - (1ULL << NVDEV_ENGINE_UNK1C1), &chan); + (1 << NVDEV_ENGINE_DMAOBJ) | + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR) | + (1 << NVDEV_ENGINE_MPEG) | + (1 << NVDEV_ENGINE_ME) | + (1 << NVDEV_ENGINE_VP) | + (1 << NVDEV_ENGINE_CRYPT) | + (1 << NVDEV_ENGINE_BSP) | + (1 << NVDEV_ENGINE_PPP) | + (1 << NVDEV_ENGINE_COPY0) | + (1 << NVDEV_ENGINE_UNK1C1), &chan); *pobject = nv_object(chan); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c index b4365dde1859..6f21be600557 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c @@ -103,9 +103,6 @@ nvc0_fifo_context_attach(struct nouveau_object *parent, case NVDEV_ENGINE_GR : addr = 0x0210; break; case NVDEV_ENGINE_COPY0: addr = 0x0230; break; case NVDEV_ENGINE_COPY1: addr = 0x0240; break; - case NVDEV_ENGINE_BSP : addr = 0x0270; break; - case NVDEV_ENGINE_VP : addr = 0x0250; break; - case NVDEV_ENGINE_PPP : addr = 0x0260; break; default: return -EINVAL; } @@ -140,13 +137,14 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend, case NVDEV_ENGINE_GR : addr = 0x0210; break; case NVDEV_ENGINE_COPY0: addr = 0x0230; break; case NVDEV_ENGINE_COPY1: addr = 0x0240; break; - case NVDEV_ENGINE_BSP : addr = 0x0270; break; - case NVDEV_ENGINE_VP : addr = 0x0250; break; - case NVDEV_ENGINE_PPP : addr = 0x0260; break; default: return -EINVAL; } + nv_wo32(base, addr + 0x00, 0x00000000); + nv_wo32(base, addr + 0x04, 0x00000000); + bar->flush(bar); + nv_wr32(priv, 0x002634, chan->base.chid); if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { nv_error(priv, "channel %d kick timeout\n", chan->base.chid); @@ -154,9 +152,6 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend, return -EBUSY; } - nv_wo32(base, addr + 0x00, 0x00000000); - nv_wo32(base, addr + 0x04, 0x00000000); - bar->flush(bar); return 0; } @@ -180,13 +175,10 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent, ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, priv->user.bar.offset, 0x1000, args->pushbuf, - (1ULL << NVDEV_ENGINE_SW) | - (1ULL << NVDEV_ENGINE_GR) | - (1ULL << NVDEV_ENGINE_COPY0) | - (1ULL << NVDEV_ENGINE_COPY1) | - (1ULL << NVDEV_ENGINE_BSP) | - (1ULL << NVDEV_ENGINE_VP) | - (1ULL << NVDEV_ENGINE_PPP), &chan); + (1 << NVDEV_ENGINE_SW) | + (1 << NVDEV_ENGINE_GR) | + (1 << NVDEV_ENGINE_COPY0) | + (1 << NVDEV_ENGINE_COPY1), &chan); *pobject = nv_object(chan); if (ret) return ret; @@ -502,7 +494,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev) u32 stat = nv_rd32(priv, 0x002100) & mask; if (stat & 0x00000100) { - nv_warn(priv, "unknown status 0x00000100\n"); + nv_info(priv, "unknown status 0x00000100\n"); nv_wr32(priv, 0x002100, 0x00000100); stat &= ~0x00000100; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index c930da99c2c1..36e81b6fafbc 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c @@ -38,12 +38,12 @@ #include #include -#define _(a,b) { (a), ((1ULL << (a)) | (b)) } +#define _(a,b) { (a), ((1 << (a)) | (b)) } static const struct { - u64 subdev; - u64 mask; + int subdev; + u32 mask; } fifo_engine[] = { - _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW)), + _(NVDEV_ENGINE_GR , (1 << NVDEV_ENGINE_SW)), _(NVDEV_ENGINE_VP , 0), _(NVDEV_ENGINE_PPP , 0), _(NVDEV_ENGINE_BSP , 0), @@ -138,9 +138,6 @@ nve0_fifo_context_attach(struct nouveau_object *parent, case NVDEV_ENGINE_GR : case NVDEV_ENGINE_COPY0: case NVDEV_ENGINE_COPY1: addr = 0x0210; break; - case NVDEV_ENGINE_BSP : addr = 0x0270; break; - case NVDEV_ENGINE_VP : addr = 0x0250; break; - case NVDEV_ENGINE_PPP : addr = 0x0260; break; default: return -EINVAL; } @@ -175,13 +172,14 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend, case NVDEV_ENGINE_GR : case NVDEV_ENGINE_COPY0: case NVDEV_ENGINE_COPY1: addr = 0x0210; break; - case NVDEV_ENGINE_BSP : addr = 0x0270; break; - case NVDEV_ENGINE_VP : addr = 0x0250; break; - case NVDEV_ENGINE_PPP : addr = 0x0260; break; default: return -EINVAL; } + nv_wo32(base, addr + 0x00, 0x00000000); + nv_wo32(base, addr + 0x04, 0x00000000); + bar->flush(bar); + nv_wr32(priv, 0x002634, chan->base.chid); if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { nv_error(priv, "channel %d kick timeout\n", chan->base.chid); @@ -189,9 +187,6 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend, return -EBUSY; } - nv_wo32(base, addr + 0x00, 0x00000000); - nv_wo32(base, addr + 0x04, 0x00000000); - bar->flush(bar); return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c index e30a9c5ff1fc..618528248457 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c @@ -787,168 +787,168 @@ nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd, static struct nouveau_omthds nv03_graph_gdi_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_patt }, - { 0x0188, 0x0188, nv04_graph_mthd_bind_rop }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_beta1 }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_patt }, + { 0x0188, nv04_graph_mthd_bind_rop }, + { 0x018c, nv04_graph_mthd_bind_beta1 }, + { 0x0190, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_gdi_omthds[] = { - { 0x0188, 0x0188, nv04_graph_mthd_bind_patt }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0188, nv04_graph_mthd_bind_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv01_graph_blit_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, - { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, - { 0x018c, 0x018c, nv01_graph_mthd_bind_patt }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_rop }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst }, - { 0x019c, 0x019c, nv04_graph_mthd_bind_surf_src }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, nv01_graph_mthd_bind_patt }, + { 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, nv04_graph_mthd_bind_surf_dst }, + { 0x019c, nv04_graph_mthd_bind_surf_src }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_blit_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, - { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_patt }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_rop }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 }, - { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, nv04_graph_mthd_bind_patt }, + { 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, nv04_graph_mthd_bind_beta4 }, + { 0x019c, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_iifc_omthds[] = { - { 0x0188, 0x0188, nv01_graph_mthd_bind_chroma }, - { 0x018c, 0x018c, nv01_graph_mthd_bind_clip }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_patt }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_rop }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_beta1 }, - { 0x019c, 0x019c, nv04_graph_mthd_bind_beta4 }, - { 0x01a0, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf }, - { 0x03e4, 0x03e4, nv04_graph_mthd_set_operation }, + { 0x0188, nv01_graph_mthd_bind_chroma }, + { 0x018c, nv01_graph_mthd_bind_clip }, + { 0x0190, nv04_graph_mthd_bind_patt }, + { 0x0194, nv04_graph_mthd_bind_rop }, + { 0x0198, nv04_graph_mthd_bind_beta1 }, + { 0x019c, nv04_graph_mthd_bind_beta4 }, + { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf }, + { 0x03e4, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv01_graph_ifc_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, - { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, - { 0x018c, 0x018c, nv01_graph_mthd_bind_patt }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_rop }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, nv01_graph_mthd_bind_patt }, + { 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_ifc_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, - { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_patt }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_rop }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 }, - { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, nv04_graph_mthd_bind_patt }, + { 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, nv04_graph_mthd_bind_beta4 }, + { 0x019c, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv03_graph_sifc_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, - { 0x0188, 0x0188, nv01_graph_mthd_bind_patt }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, nv01_graph_mthd_bind_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_sifc_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, - { 0x0188, 0x0188, nv04_graph_mthd_bind_patt }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, nv04_graph_mthd_bind_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv03_graph_sifm_omthds[] = { - { 0x0188, 0x0188, nv01_graph_mthd_bind_patt }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst }, - { 0x0304, 0x0304, nv04_graph_mthd_set_operation }, + { 0x0188, nv01_graph_mthd_bind_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_surf_dst }, + { 0x0304, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_sifm_omthds[] = { - { 0x0188, 0x0188, nv04_graph_mthd_bind_patt }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d }, - { 0x0304, 0x0304, nv04_graph_mthd_set_operation }, + { 0x0188, nv04_graph_mthd_bind_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x0304, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_surf3d_omthds[] = { - { 0x02f8, 0x02f8, nv04_graph_mthd_surf3d_clip_h }, - { 0x02fc, 0x02fc, nv04_graph_mthd_surf3d_clip_v }, + { 0x02f8, nv04_graph_mthd_surf3d_clip_h }, + { 0x02fc, nv04_graph_mthd_surf3d_clip_v }, {} }; static struct nouveau_omthds nv03_graph_ttri_omthds[] = { - { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_surf_color }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_zeta }, + { 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, nv04_graph_mthd_bind_surf_color }, + { 0x0190, nv04_graph_mthd_bind_surf_zeta }, {} }; static struct nouveau_omthds nv01_graph_prim_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_clip }, - { 0x0188, 0x0188, nv01_graph_mthd_bind_patt }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_clip }, + { 0x0188, nv01_graph_mthd_bind_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; static struct nouveau_omthds nv04_graph_prim_omthds[] = { - { 0x0184, 0x0184, nv01_graph_mthd_bind_clip }, - { 0x0188, 0x0188, nv04_graph_mthd_bind_patt }, - { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + { 0x0184, nv01_graph_mthd_bind_clip }, + { 0x0188, nv04_graph_mthd_bind_patt }, + { 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, nv04_graph_mthd_set_operation }, {} }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c index 5c0f843ea249..92521c89e77f 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c @@ -570,11 +570,11 @@ nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd, static struct nouveau_omthds nv17_celcius_omthds[] = { - { 0x1638, 0x1638, nv17_graph_mthd_lma_window }, - { 0x163c, 0x163c, nv17_graph_mthd_lma_window }, - { 0x1640, 0x1640, nv17_graph_mthd_lma_window }, - { 0x1644, 0x1644, nv17_graph_mthd_lma_window }, - { 0x1658, 0x1658, nv17_graph_mthd_lma_enable }, + { 0x1638, nv17_graph_mthd_lma_window }, + { 0x163c, nv17_graph_mthd_lma_window }, + { 0x1640, nv17_graph_mthd_lma_window }, + { 0x1644, nv17_graph_mthd_lma_window }, + { 0x1658, nv17_graph_mthd_lma_enable }, {} }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c index 5b20401bf911..8f3f619c4a78 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c @@ -183,7 +183,7 @@ nv20_graph_tile_prog(struct nouveau_engine *engine, int i) nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr); - if (nv_device(engine)->chipset != 0x34) { + if (nv_device(engine)->card_type == NV_20) { nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp); nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i); nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp); @@ -224,14 +224,14 @@ nv20_graph_intr(struct nouveau_subdev *subdev) nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); if (show) { - nv_error(priv, ""); + nv_info(priv, ""); nouveau_bitfield_print(nv10_graph_intr_name, show); printk(" nsource:"); nouveau_bitfield_print(nv04_graph_nsource, nsource); printk(" nstatus:"); nouveau_bitfield_print(nv10_graph_nstatus, nstatus); printk("\n"); - nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n", + nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n", chid, subc, class, mthd, data); } diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c index 0b36dd3deebd..cc6574eeb80e 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c @@ -216,10 +216,10 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i) switch (nv_device(priv)->chipset) { case 0x40: - case 0x41: + case 0x41: /* guess */ case 0x42: case 0x43: - case 0x45: + case 0x45: /* guess */ case 0x4e: nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch); nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit); @@ -227,21 +227,6 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i) nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch); nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit); nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr); - switch (nv_device(priv)->chipset) { - case 0x40: - case 0x45: - nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp); - nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp); - break; - case 0x41: - case 0x42: - case 0x43: - nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp); - nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp); - break; - default: - break; - } break; case 0x44: case 0x4a: @@ -250,31 +235,18 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i) nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr); break; case 0x46: - case 0x4c: case 0x47: case 0x49: case 0x4b: - case 0x63: + case 0x4c: case 0x67: - case 0x68: + default: nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch); nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit); nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr); nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch); nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit); nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr); - switch (nv_device(priv)->chipset) { - case 0x47: - case 0x49: - case 0x4b: - nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp); - nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp); - break; - default: - break; - } - break; - default: break; } @@ -321,7 +293,7 @@ nv40_graph_intr(struct nouveau_subdev *subdev) nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); if (show) { - nv_error(priv, ""); + nv_info(priv, ""); nouveau_bitfield_print(nv10_graph_intr_name, show); printk(" nsource:"); nouveau_bitfield_print(nv04_graph_nsource, nsource); diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c index b1c3d835b4c2..ab3b9dcaf478 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c @@ -184,65 +184,6 @@ nv50_graph_tlb_flush(struct nouveau_engine *engine) return 0; } -static const struct nouveau_bitfield nv50_pgraph_status[] = { - { 0x00000001, "BUSY" }, /* set when any bit is set */ - { 0x00000002, "DISPATCH" }, - { 0x00000004, "UNK2" }, - { 0x00000008, "UNK3" }, - { 0x00000010, "UNK4" }, - { 0x00000020, "UNK5" }, - { 0x00000040, "M2MF" }, - { 0x00000080, "UNK7" }, - { 0x00000100, "CTXPROG" }, - { 0x00000200, "VFETCH" }, - { 0x00000400, "CCACHE_UNK4" }, - { 0x00000800, "STRMOUT_GSCHED_UNK5" }, - { 0x00001000, "UNK14XX" }, - { 0x00002000, "UNK24XX_CSCHED" }, - { 0x00004000, "UNK1CXX" }, - { 0x00008000, "CLIPID" }, - { 0x00010000, "ZCULL" }, - { 0x00020000, "ENG2D" }, - { 0x00040000, "UNK34XX" }, - { 0x00080000, "TPRAST" }, - { 0x00100000, "TPROP" }, - { 0x00200000, "TEX" }, - { 0x00400000, "TPVP" }, - { 0x00800000, "MP" }, - { 0x01000000, "ROP" }, - {} -}; - -static const char *const nv50_pgraph_vstatus_0[] = { - "VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL -}; - -static const char *const nv50_pgraph_vstatus_1[] = { - "TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL -}; - -static const char *const nv50_pgraph_vstatus_2[] = { - "UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX", - "ROP", NULL -}; - -static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r, - const char *const units[], u32 status) -{ - int i; - - nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status); - - for (i = 0; units[i] && status; i++) { - if ((status & 7) == 1) - pr_cont(" %s", units[i]); - status >>= 3; - } - if (status) - pr_cont(" (invalid: 0x%x)", status); - pr_cont("\n"); -} - static int nv84_graph_tlb_flush(struct nouveau_engine *engine) { @@ -278,19 +219,10 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine) !(timeout = ptimer->read(ptimer) - start > 2000000000)); if (timeout) { - nv_error(priv, "PGRAPH TLB flush idle timeout fail\n"); - - tmp = nv_rd32(priv, 0x400700); - nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp); - nouveau_bitfield_print(nv50_pgraph_status, tmp); - pr_cont("\n"); - - nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0, - nv_rd32(priv, 0x400380)); - nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1, - nv_rd32(priv, 0x400384)); - nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2, - nv_rd32(priv, 0x400388)); + nv_error(priv, "PGRAPH TLB flush idle timeout fail: " + "0x%08x 0x%08x 0x%08x 0x%08x\n", + nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380), + nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388)); } nv50_vm_flush_engine(&engine->base, 0x00); @@ -521,13 +453,13 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old, } if (ustatus) { if (display) - nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); + nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); } nv_wr32(priv, ustatus_addr, 0xc0000000); } if (!tps && display) - nv_warn(priv, "%s - No TPs claiming errors?\n", name); + nv_info(priv, "%s - No TPs claiming errors?\n", name); } static int @@ -786,12 +718,13 @@ nv50_graph_intr(struct nouveau_subdev *subdev) nv_wr32(priv, 0x400500, 0x00010001); if (show) { - nv_error(priv, ""); + nv_info(priv, ""); nouveau_bitfield_print(nv50_graph_intr_name, show); printk("\n"); nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x " "mthd 0x%04x data 0x%08x\n", chid, (u64)inst << 12, subc, class, mthd, data); + nv50_fb_trap(nouveau_fb(priv), 1); } if (nv_rd32(priv, 0x400824) & (1 << 31)) diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c index 47a02081d708..c62f2d0f5f0a 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c @@ -814,7 +814,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv) nv_wr32(priv, 0x41a100, 0x00000002); nv_wr32(priv, 0x409100, 0x00000002); if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001)) - nv_warn(priv, "0x409800 wait failed\n"); + nv_info(priv, "0x409800 wait failed\n"); nv_wr32(priv, 0x409840, 0xffffffff); nv_wr32(priv, 0x409500, 0x7fffffff); diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/regs.h index fde8e24415e4..9c715a25cecb 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/graph/regs.h +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/graph/regs.h @@ -205,7 +205,6 @@ #define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) #define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) #define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i)) -#define NV41_PGRAPH_ZCOMP0(i) (0x004009c0 + 4*(i)) #define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) #define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) #define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) @@ -217,7 +216,6 @@ #define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16)) #define NV04_PGRAPH_V_RAM 0x00400D40 #define NV04_PGRAPH_W_RAM 0x00400D80 -#define NV47_PGRAPH_ZCOMP0(i) (0x00400e00 + 4*(i)) #define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 #define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44 #define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48 @@ -263,12 +261,9 @@ #define NV04_PGRAPH_DMA_B_OFFSET 0x00401098 #define NV04_PGRAPH_DMA_B_SIZE 0x0040109C #define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0 -#define NV47_PGRAPH_ZCOMP1(i) (0x004068c0 + 4*(i)) #define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16)) #define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16)) #define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16)) #define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16)) -#define NV40_PGRAPH_ZCOMP1(i) (0x00406980 + 4*(i)) -#define NV41_PGRAPH_ZCOMP1(i) (0x004069c0 + 4*(i)) #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c index 9fd86375f4c4..1f394a2629e7 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c @@ -121,9 +121,9 @@ nv31_mpeg_ofuncs = { static struct nouveau_omthds nv31_mpeg_omthds[] = { - { 0x0190, 0x0190, nv31_mpeg_mthd_dma }, - { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma }, - { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma }, + { 0x0190, nv31_mpeg_mthd_dma }, + { 0x01a0, nv31_mpeg_mthd_dma }, + { 0x01b0, nv31_mpeg_mthd_dma }, {} }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c index bc7d12b30fc1..8678a9996d57 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c @@ -157,6 +157,7 @@ nv50_mpeg_intr(struct nouveau_subdev *subdev) nv_wr32(priv, 0x00b100, stat); nv_wr32(priv, 0x00b230, 0x00000001); + nv50_fb_trap(nouveau_fb(priv), 1); } static void diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c index 5a5b2a773ed7..50e7e0da1981 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c @@ -22,18 +22,18 @@ * Authors: Ben Skeggs */ -#include -#include +#include #include +#include #include struct nv98_ppp_priv { - struct nouveau_engine base; + struct nouveau_ppp base; }; struct nv98_ppp_chan { - struct nouveau_engctx base; + struct nouveau_ppp_chan base; }; /******************************************************************************* @@ -49,16 +49,61 @@ nv98_ppp_sclass[] = { * PPPP context ******************************************************************************/ +static int +nv98_ppp_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv98_ppp_chan *priv; + int ret; + + ret = nouveau_ppp_context_create(parent, engine, oclass, NULL, + 0, 0, 0, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +static void +nv98_ppp_context_dtor(struct nouveau_object *object) +{ + struct nv98_ppp_chan *priv = (void *)object; + nouveau_ppp_context_destroy(&priv->base); +} + +static int +nv98_ppp_context_init(struct nouveau_object *object) +{ + struct nv98_ppp_chan *priv = (void *)object; + int ret; + + ret = nouveau_ppp_context_init(&priv->base); + if (ret) + return ret; + + return 0; +} + +static int +nv98_ppp_context_fini(struct nouveau_object *object, bool suspend) +{ + struct nv98_ppp_chan *priv = (void *)object; + return nouveau_ppp_context_fini(&priv->base, suspend); +} + static struct nouveau_oclass nv98_ppp_cclass = { .handle = NV_ENGCTX(PPP, 0x98), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_engctx_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, + .ctor = nv98_ppp_context_ctor, + .dtor = nv98_ppp_context_dtor, + .init = nv98_ppp_context_init, + .fini = nv98_ppp_context_fini, + .rd32 = _nouveau_ppp_context_rd32, + .wr32 = _nouveau_ppp_context_wr32, }, }; @@ -66,6 +111,11 @@ nv98_ppp_cclass = { * PPPP engine/subdev functions ******************************************************************************/ +static void +nv98_ppp_intr(struct nouveau_subdev *subdev) +{ +} + static int nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -74,25 +124,52 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv98_ppp_priv *priv; int ret; - ret = nouveau_engine_create(parent, engine, oclass, true, - "PPPP", "ppp", &priv); + ret = nouveau_ppp_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x00400002; + nv_subdev(priv)->intr = nv98_ppp_intr; nv_engine(priv)->cclass = &nv98_ppp_cclass; nv_engine(priv)->sclass = nv98_ppp_sclass; return 0; } +static void +nv98_ppp_dtor(struct nouveau_object *object) +{ + struct nv98_ppp_priv *priv = (void *)object; + nouveau_ppp_destroy(&priv->base); +} + +static int +nv98_ppp_init(struct nouveau_object *object) +{ + struct nv98_ppp_priv *priv = (void *)object; + int ret; + + ret = nouveau_ppp_init(&priv->base); + if (ret) + return ret; + + return 0; +} + +static int +nv98_ppp_fini(struct nouveau_object *object, bool suspend) +{ + struct nv98_ppp_priv *priv = (void *)object; + return nouveau_ppp_fini(&priv->base, suspend); +} + struct nouveau_oclass nv98_ppp_oclass = { .handle = NV_ENGINE(PPP, 0x98), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv98_ppp_ctor, - .dtor = _nouveau_engine_dtor, - .init = _nouveau_engine_init, - .fini = _nouveau_engine_fini, + .dtor = nv98_ppp_dtor, + .init = nv98_ppp_init, + .fini = nv98_ppp_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c deleted file mode 100644 index ebf0d860e2dd..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2012 Maarten Lankhorst - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Maarten Lankhorst - */ - -#include - -#include - -struct nvc0_ppp_priv { - struct nouveau_falcon base; -}; - -/******************************************************************************* - * PPP object classes - ******************************************************************************/ - -static struct nouveau_oclass -nvc0_ppp_sclass[] = { - { 0x90b3, &nouveau_object_ofuncs }, - {}, -}; - -/******************************************************************************* - * PPPP context - ******************************************************************************/ - -static struct nouveau_oclass -nvc0_ppp_cclass = { - .handle = NV_ENGCTX(PPP, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, - }, -}; - -/******************************************************************************* - * PPPP engine/subdev functions - ******************************************************************************/ - -static int -nvc0_ppp_init(struct nouveau_object *object) -{ - struct nvc0_ppp_priv *priv = (void *)object; - int ret; - - ret = nouveau_falcon_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x086010, 0x0000fff2); - nv_wr32(priv, 0x08601c, 0x0000fff2); - return 0; -} - -static int -nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nvc0_ppp_priv *priv; - int ret; - - ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true, - "PPPP", "ppp", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_subdev(priv)->unit = 0x00000002; - nv_engine(priv)->cclass = &nvc0_ppp_cclass; - nv_engine(priv)->sclass = nvc0_ppp_sclass; - return 0; -} - -struct nouveau_oclass -nvc0_ppp_oclass = { - .handle = NV_ENGINE(PPP, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nvc0_ppp_ctor, - .dtor = _nouveau_falcon_dtor, - .init = nvc0_ppp_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv04.c index 2a859a31c30d..3ca4c3aa90b7 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv04.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv04.c @@ -63,8 +63,8 @@ nv04_software_flip(struct nouveau_object *object, u32 mthd, static struct nouveau_omthds nv04_software_omthds[] = { - { 0x0150, 0x0150, nv04_software_set_ref }, - { 0x0500, 0x0500, nv04_software_flip }, + { 0x0150, nv04_software_set_ref }, + { 0x0500, nv04_software_flip }, {} }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv10.c index a019364b1e13..6e699afbfdb7 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv10.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv10.c @@ -52,7 +52,7 @@ nv10_software_flip(struct nouveau_object *object, u32 mthd, static struct nouveau_omthds nv10_software_omthds[] = { - { 0x0500, 0x0500, nv10_software_flip }, + { 0x0500, nv10_software_flip }, {} }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv50.c index b0e7e1c01ce6..a2edcd38544a 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nv50.c @@ -117,11 +117,11 @@ nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd, static struct nouveau_omthds nv50_software_omthds[] = { - { 0x018c, 0x018c, nv50_software_mthd_dma_vblsem }, - { 0x0400, 0x0400, nv50_software_mthd_vblsem_offset }, - { 0x0404, 0x0404, nv50_software_mthd_vblsem_value }, - { 0x0408, 0x0408, nv50_software_mthd_vblsem_release }, - { 0x0500, 0x0500, nv50_software_mthd_flip }, + { 0x018c, nv50_software_mthd_dma_vblsem }, + { 0x0400, nv50_software_mthd_vblsem_offset }, + { 0x0404, nv50_software_mthd_vblsem_value }, + { 0x0408, nv50_software_mthd_vblsem_release }, + { 0x0500, nv50_software_mthd_flip }, {} }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c index 282a1cd1bc2f..b7b0d7e330d6 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c @@ -99,11 +99,11 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd, static struct nouveau_omthds nvc0_software_omthds[] = { - { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset }, - { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset }, - { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value }, - { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release }, - { 0x0500, 0x0500, nvc0_software_mthd_flip }, + { 0x0400, nvc0_software_mthd_vblsem_offset }, + { 0x0404, nvc0_software_mthd_vblsem_offset }, + { 0x0408, nvc0_software_mthd_vblsem_value }, + { 0x040c, nvc0_software_mthd_vblsem_release }, + { 0x0500, nvc0_software_mthd_flip }, {} }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c index 261cd96e6951..dd23c80e5405 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c @@ -22,13 +22,18 @@ * Authors: Ben Skeggs */ -#include +#include #include +#include #include struct nv84_vp_priv { - struct nouveau_engine base; + struct nouveau_vp base; +}; + +struct nv84_vp_chan { + struct nouveau_vp_chan base; }; /******************************************************************************* @@ -44,16 +49,61 @@ nv84_vp_sclass[] = { * PVP context ******************************************************************************/ +static int +nv84_vp_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv84_vp_chan *priv; + int ret; + + ret = nouveau_vp_context_create(parent, engine, oclass, NULL, + 0, 0, 0, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +static void +nv84_vp_context_dtor(struct nouveau_object *object) +{ + struct nv84_vp_chan *priv = (void *)object; + nouveau_vp_context_destroy(&priv->base); +} + +static int +nv84_vp_context_init(struct nouveau_object *object) +{ + struct nv84_vp_chan *priv = (void *)object; + int ret; + + ret = nouveau_vp_context_init(&priv->base); + if (ret) + return ret; + + return 0; +} + +static int +nv84_vp_context_fini(struct nouveau_object *object, bool suspend) +{ + struct nv84_vp_chan *priv = (void *)object; + return nouveau_vp_context_fini(&priv->base, suspend); +} + static struct nouveau_oclass nv84_vp_cclass = { .handle = NV_ENGCTX(VP, 0x84), .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_engctx_ctor, - .dtor = _nouveau_engctx_dtor, - .init = _nouveau_engctx_init, - .fini = _nouveau_engctx_fini, - .rd32 = _nouveau_engctx_rd32, - .wr32 = _nouveau_engctx_wr32, + .ctor = nv84_vp_context_ctor, + .dtor = nv84_vp_context_dtor, + .init = nv84_vp_context_init, + .fini = nv84_vp_context_fini, + .rd32 = _nouveau_vp_context_rd32, + .wr32 = _nouveau_vp_context_wr32, }, }; @@ -61,6 +111,11 @@ nv84_vp_cclass = { * PVP engine/subdev functions ******************************************************************************/ +static void +nv84_vp_intr(struct nouveau_subdev *subdev) +{ +} + static int nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -69,25 +124,52 @@ nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv84_vp_priv *priv; int ret; - ret = nouveau_engine_create(parent, engine, oclass, true, - "PVP", "vp", &priv); + ret = nouveau_vp_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x01020000; + nv_subdev(priv)->intr = nv84_vp_intr; nv_engine(priv)->cclass = &nv84_vp_cclass; nv_engine(priv)->sclass = nv84_vp_sclass; return 0; } +static void +nv84_vp_dtor(struct nouveau_object *object) +{ + struct nv84_vp_priv *priv = (void *)object; + nouveau_vp_destroy(&priv->base); +} + +static int +nv84_vp_init(struct nouveau_object *object) +{ + struct nv84_vp_priv *priv = (void *)object; + int ret; + + ret = nouveau_vp_init(&priv->base); + if (ret) + return ret; + + return 0; +} + +static int +nv84_vp_fini(struct nouveau_object *object, bool suspend) +{ + struct nv84_vp_priv *priv = (void *)object; + return nouveau_vp_fini(&priv->base, suspend); +} + struct nouveau_oclass nv84_vp_oclass = { .handle = NV_ENGINE(VP, 0x84), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv84_vp_ctor, - .dtor = _nouveau_engine_dtor, - .init = _nouveau_engine_init, - .fini = _nouveau_engine_fini, + .dtor = nv84_vp_dtor, + .init = nv84_vp_init, + .fini = nv84_vp_fini, }, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c deleted file mode 100644 index f761949d7039..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2012 Maarten Lankhorst - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Maarten Lankhorst - */ - -#include - -#include - -struct nvc0_vp_priv { - struct nouveau_falcon base; -}; - -/******************************************************************************* - * VP object classes - ******************************************************************************/ - -static struct nouveau_oclass -nvc0_vp_sclass[] = { - { 0x90b2, &nouveau_object_ofuncs }, - {}, -}; - -/******************************************************************************* - * PVP context - ******************************************************************************/ - -static struct nouveau_oclass -nvc0_vp_cclass = { - .handle = NV_ENGCTX(VP, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, - }, -}; - -/******************************************************************************* - * PVP engine/subdev functions - ******************************************************************************/ - -static int -nvc0_vp_init(struct nouveau_object *object) -{ - struct nvc0_vp_priv *priv = (void *)object; - int ret; - - ret = nouveau_falcon_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x085010, 0x0000fff2); - nv_wr32(priv, 0x08501c, 0x0000fff2); - return 0; -} - -static int -nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nvc0_vp_priv *priv; - int ret; - - ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true, - "PVP", "vp", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_subdev(priv)->unit = 0x00020000; - nv_engine(priv)->cclass = &nvc0_vp_cclass; - nv_engine(priv)->sclass = nvc0_vp_sclass; - return 0; -} - -struct nouveau_oclass -nvc0_vp_oclass = { - .handle = NV_ENGINE(VP, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nvc0_vp_ctor, - .dtor = _nouveau_falcon_dtor, - .init = nvc0_vp_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c deleted file mode 100644 index 2384ce5dbe16..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include - -#include - -struct nve0_vp_priv { - struct nouveau_falcon base; -}; - -/******************************************************************************* - * VP object classes - ******************************************************************************/ - -static struct nouveau_oclass -nve0_vp_sclass[] = { - { 0x95b2, &nouveau_object_ofuncs }, - {}, -}; - -/******************************************************************************* - * PVP context - ******************************************************************************/ - -static struct nouveau_oclass -nve0_vp_cclass = { - .handle = NV_ENGCTX(VP, 0xe0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = _nouveau_falcon_context_ctor, - .dtor = _nouveau_falcon_context_dtor, - .init = _nouveau_falcon_context_init, - .fini = _nouveau_falcon_context_fini, - .rd32 = _nouveau_falcon_context_rd32, - .wr32 = _nouveau_falcon_context_wr32, - }, -}; - -/******************************************************************************* - * PVP engine/subdev functions - ******************************************************************************/ - -static int -nve0_vp_init(struct nouveau_object *object) -{ - struct nve0_vp_priv *priv = (void *)object; - int ret; - - ret = nouveau_falcon_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x085010, 0x0000fff2); - nv_wr32(priv, 0x08501c, 0x0000fff2); - return 0; -} - -static int -nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nve0_vp_priv *priv; - int ret; - - ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true, - "PVP", "vp", &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - nv_subdev(priv)->unit = 0x00020000; - nv_engine(priv)->cclass = &nve0_vp_cclass; - nv_engine(priv)->sclass = nve0_vp_sclass; - return 0; -} - -struct nouveau_oclass -nve0_vp_oclass = { - .handle = NV_ENGINE(VP, 0xe0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nve0_vp_ctor, - .dtor = _nouveau_falcon_dtor, - .init = nve0_vp_init, - .fini = _nouveau_falcon_fini, - .rd32 = _nouveau_falcon_rd32, - .wr32 = _nouveau_falcon_wr32, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/class.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/class.h index 47c4b3a5bd3a..6180ae9800fc 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/class.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/core/class.h @@ -23,7 +23,6 @@ #define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL #define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL #define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL -#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL struct nv_device_class { u64 device; /* device identifier, ~0 for client default */ @@ -53,49 +52,11 @@ struct nv_device_class { #define NV_DMA_ACCESS_WR 0x00000200 #define NV_DMA_ACCESS_RDWR 0x00000300 -/* NV50:NVC0 */ -#define NV50_DMA_CONF0_ENABLE 0x80000000 -#define NV50_DMA_CONF0_PRIV 0x00300000 -#define NV50_DMA_CONF0_PRIV_VM 0x00000000 -#define NV50_DMA_CONF0_PRIV_US 0x00100000 -#define NV50_DMA_CONF0_PRIV__S 0x00200000 -#define NV50_DMA_CONF0_PART 0x00030000 -#define NV50_DMA_CONF0_PART_VM 0x00000000 -#define NV50_DMA_CONF0_PART_256 0x00010000 -#define NV50_DMA_CONF0_PART_1KB 0x00020000 -#define NV50_DMA_CONF0_COMP 0x00000180 -#define NV50_DMA_CONF0_COMP_NONE 0x00000000 -#define NV50_DMA_CONF0_COMP_VM 0x00000180 -#define NV50_DMA_CONF0_TYPE 0x0000007f -#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000 -#define NV50_DMA_CONF0_TYPE_VM 0x0000007f - -/* NVC0:NVD9 */ -#define NVC0_DMA_CONF0_ENABLE 0x80000000 -#define NVC0_DMA_CONF0_PRIV 0x00300000 -#define NVC0_DMA_CONF0_PRIV_VM 0x00000000 -#define NVC0_DMA_CONF0_PRIV_US 0x00100000 -#define NVC0_DMA_CONF0_PRIV__S 0x00200000 -#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000 -#define NVC0_DMA_CONF0_TYPE 0x000000ff -#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000 -#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff - -/* NVD9- */ -#define NVD0_DMA_CONF0_ENABLE 0x80000000 -#define NVD0_DMA_CONF0_PAGE 0x00000400 -#define NVD0_DMA_CONF0_PAGE_LP 0x00000000 -#define NVD0_DMA_CONF0_PAGE_SP 0x00000400 -#define NVD0_DMA_CONF0_TYPE 0x000000ff -#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000 -#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff - struct nv_dma_class { u32 flags; u32 pad0; u64 start; u64 limit; - u32 conf0; }; /* DMA FIFO channel classes @@ -154,190 +115,4 @@ struct nve0_channel_ind_class { u32 engine; }; -/* 5070: NV50_DISP - * 8270: NV84_DISP - * 8370: NVA0_DISP - * 8870: NV94_DISP - * 8570: NVA3_DISP - * 9070: NVD0_DISP - * 9170: NVE0_DISP - */ - -#define NV50_DISP_CLASS 0x00005070 -#define NV84_DISP_CLASS 0x00008270 -#define NVA0_DISP_CLASS 0x00008370 -#define NV94_DISP_CLASS 0x00008870 -#define NVA3_DISP_CLASS 0x00008570 -#define NVD0_DISP_CLASS 0x00009070 -#define NVE0_DISP_CLASS 0x00009170 - -#define NV50_DISP_SOR_MTHD 0x00010000 -#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000 -#define NV50_DISP_SOR_MTHD_HEAD 0x00000018 -#define NV50_DISP_SOR_MTHD_LINK 0x00000004 -#define NV50_DISP_SOR_MTHD_OR 0x00000003 - -#define NV50_DISP_SOR_PWR 0x00010000 -#define NV50_DISP_SOR_PWR_STATE 0x00000001 -#define NV50_DISP_SOR_PWR_STATE_ON 0x00000001 -#define NV50_DISP_SOR_PWR_STATE_OFF 0x00000000 -#define NVA3_DISP_SOR_HDA_ELD 0x00010100 -#define NV84_DISP_SOR_HDMI_PWR 0x00012000 -#define NV84_DISP_SOR_HDMI_PWR_STATE 0x40000000 -#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF 0x00000000 -#define NV84_DISP_SOR_HDMI_PWR_STATE_ON 0x40000000 -#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET 0x001f0000 -#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f -#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000 -#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff -#define NV94_DISP_SOR_DP_TRAIN 0x00016000 -#define NV94_DISP_SOR_DP_TRAIN_OP 0xf0000000 -#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN 0x00000000 -#define NV94_DISP_SOR_DP_TRAIN_OP_INIT 0x10000000 -#define NV94_DISP_SOR_DP_TRAIN_OP_FINI 0x20000000 -#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD 0x00000001 -#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF 0x00000000 -#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON 0x00000001 -#define NV94_DISP_SOR_DP_TRAIN_PATTERN 0x00000003 -#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED 0x00000000 -#define NV94_DISP_SOR_DP_LNKCTL 0x00016040 -#define NV94_DISP_SOR_DP_LNKCTL_FRAME 0x80000000 -#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD 0x00000000 -#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH 0x80000000 -#define NV94_DISP_SOR_DP_LNKCTL_WIDTH 0x00001f00 -#define NV94_DISP_SOR_DP_LNKCTL_COUNT 0x00000007 -#define NV94_DISP_SOR_DP_DRVCTL(l) ((l) * 0x40 + 0x00016100) -#define NV94_DISP_SOR_DP_DRVCTL_VS 0x00000300 -#define NV94_DISP_SOR_DP_DRVCTL_PE 0x00000003 - -#define NV50_DISP_DAC_MTHD 0x00020000 -#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000 -#define NV50_DISP_DAC_MTHD_OR 0x00000003 - -#define NV50_DISP_DAC_PWR 0x00020000 -#define NV50_DISP_DAC_PWR_HSYNC 0x00000001 -#define NV50_DISP_DAC_PWR_HSYNC_ON 0x00000000 -#define NV50_DISP_DAC_PWR_HSYNC_LO 0x00000001 -#define NV50_DISP_DAC_PWR_VSYNC 0x00000004 -#define NV50_DISP_DAC_PWR_VSYNC_ON 0x00000000 -#define NV50_DISP_DAC_PWR_VSYNC_LO 0x00000004 -#define NV50_DISP_DAC_PWR_DATA 0x00000010 -#define NV50_DISP_DAC_PWR_DATA_ON 0x00000000 -#define NV50_DISP_DAC_PWR_DATA_LO 0x00000010 -#define NV50_DISP_DAC_PWR_STATE 0x00000040 -#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000 -#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040 -#define NV50_DISP_DAC_LOAD 0x0002000c -#define NV50_DISP_DAC_LOAD_VALUE 0x00000007 - -struct nv50_display_class { -}; - -/* 507a: NV50_DISP_CURS - * 827a: NV84_DISP_CURS - * 837a: NVA0_DISP_CURS - * 887a: NV94_DISP_CURS - * 857a: NVA3_DISP_CURS - * 907a: NVD0_DISP_CURS - * 917a: NVE0_DISP_CURS - */ - -#define NV50_DISP_CURS_CLASS 0x0000507a -#define NV84_DISP_CURS_CLASS 0x0000827a -#define NVA0_DISP_CURS_CLASS 0x0000837a -#define NV94_DISP_CURS_CLASS 0x0000887a -#define NVA3_DISP_CURS_CLASS 0x0000857a -#define NVD0_DISP_CURS_CLASS 0x0000907a -#define NVE0_DISP_CURS_CLASS 0x0000917a - -struct nv50_display_curs_class { - u32 head; -}; - -/* 507b: NV50_DISP_OIMM - * 827b: NV84_DISP_OIMM - * 837b: NVA0_DISP_OIMM - * 887b: NV94_DISP_OIMM - * 857b: NVA3_DISP_OIMM - * 907b: NVD0_DISP_OIMM - * 917b: NVE0_DISP_OIMM - */ - -#define NV50_DISP_OIMM_CLASS 0x0000507b -#define NV84_DISP_OIMM_CLASS 0x0000827b -#define NVA0_DISP_OIMM_CLASS 0x0000837b -#define NV94_DISP_OIMM_CLASS 0x0000887b -#define NVA3_DISP_OIMM_CLASS 0x0000857b -#define NVD0_DISP_OIMM_CLASS 0x0000907b -#define NVE0_DISP_OIMM_CLASS 0x0000917b - -struct nv50_display_oimm_class { - u32 head; -}; - -/* 507c: NV50_DISP_SYNC - * 827c: NV84_DISP_SYNC - * 837c: NVA0_DISP_SYNC - * 887c: NV94_DISP_SYNC - * 857c: NVA3_DISP_SYNC - * 907c: NVD0_DISP_SYNC - * 917c: NVE0_DISP_SYNC - */ - -#define NV50_DISP_SYNC_CLASS 0x0000507c -#define NV84_DISP_SYNC_CLASS 0x0000827c -#define NVA0_DISP_SYNC_CLASS 0x0000837c -#define NV94_DISP_SYNC_CLASS 0x0000887c -#define NVA3_DISP_SYNC_CLASS 0x0000857c -#define NVD0_DISP_SYNC_CLASS 0x0000907c -#define NVE0_DISP_SYNC_CLASS 0x0000917c - -struct nv50_display_sync_class { - u32 pushbuf; - u32 head; -}; - -/* 507d: NV50_DISP_MAST - * 827d: NV84_DISP_MAST - * 837d: NVA0_DISP_MAST - * 887d: NV94_DISP_MAST - * 857d: NVA3_DISP_MAST - * 907d: NVD0_DISP_MAST - * 917d: NVE0_DISP_MAST - */ - -#define NV50_DISP_MAST_CLASS 0x0000507d -#define NV84_DISP_MAST_CLASS 0x0000827d -#define NVA0_DISP_MAST_CLASS 0x0000837d -#define NV94_DISP_MAST_CLASS 0x0000887d -#define NVA3_DISP_MAST_CLASS 0x0000857d -#define NVD0_DISP_MAST_CLASS 0x0000907d -#define NVE0_DISP_MAST_CLASS 0x0000917d - -struct nv50_display_mast_class { - u32 pushbuf; -}; - -/* 507e: NV50_DISP_OVLY - * 827e: NV84_DISP_OVLY - * 837e: NVA0_DISP_OVLY - * 887e: NV94_DISP_OVLY - * 857e: NVA3_DISP_OVLY - * 907e: NVD0_DISP_OVLY - * 917e: NVE0_DISP_OVLY - */ - -#define NV50_DISP_OVLY_CLASS 0x0000507e -#define NV84_DISP_OVLY_CLASS 0x0000827e -#define NVA0_DISP_OVLY_CLASS 0x0000837e -#define NV94_DISP_OVLY_CLASS 0x0000887e -#define NVA3_DISP_OVLY_CLASS 0x0000857e -#define NVD0_DISP_OVLY_CLASS 0x0000907e -#define NVE0_DISP_OVLY_CLASS 0x0000917e - -struct nv50_display_ovly_class { - u32 pushbuf; - u32 head; -}; - #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/engctx.h index 2fd48b564c7d..8a947b6872eb 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/engctx.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/core/engctx.h @@ -39,9 +39,6 @@ void nouveau_engctx_destroy(struct nouveau_engctx *); int nouveau_engctx_init(struct nouveau_engctx *); int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend); -int _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, void *, u32, - struct nouveau_object **); void _nouveau_engctx_dtor(struct nouveau_object *); int _nouveau_engctx_init(struct nouveau_object *); int _nouveau_engctx_fini(struct nouveau_object *, bool suspend); diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/falcon.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/falcon.h deleted file mode 100644 index 1edec386ab36..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/falcon.h +++ /dev/null @@ -1,81 +0,0 @@ -#ifndef __NOUVEAU_FALCON_H__ -#define __NOUVEAU_FALCON_H__ - -#include -#include -#include - -struct nouveau_falcon_chan { - struct nouveau_engctx base; -}; - -#define nouveau_falcon_context_create(p,e,c,g,s,a,f,d) \ - nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) -#define nouveau_falcon_context_destroy(d) \ - nouveau_engctx_destroy(&(d)->base) -#define nouveau_falcon_context_init(d) \ - nouveau_engctx_init(&(d)->base) -#define nouveau_falcon_context_fini(d,s) \ - nouveau_engctx_fini(&(d)->base, (s)) - -#define _nouveau_falcon_context_ctor _nouveau_engctx_ctor -#define _nouveau_falcon_context_dtor _nouveau_engctx_dtor -#define _nouveau_falcon_context_init _nouveau_engctx_init -#define _nouveau_falcon_context_fini _nouveau_engctx_fini -#define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32 -#define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32 - -struct nouveau_falcon_data { - bool external; -}; - -struct nouveau_falcon { - struct nouveau_engine base; - - u32 addr; - u8 version; - u8 secret; - - struct nouveau_gpuobj *core; - bool external; - - struct { - u32 limit; - u32 *data; - u32 size; - } code; - - struct { - u32 limit; - u32 *data; - u32 size; - } data; -}; - -#define nv_falcon(priv) (&(priv)->base) - -#define nouveau_falcon_create(p,e,c,b,d,i,f,r) \ - nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f), \ - sizeof(**r),(void **)r) -#define nouveau_falcon_destroy(p) \ - nouveau_engine_destroy(&(p)->base) -#define nouveau_falcon_init(p) ({ \ - struct nouveau_falcon *falcon = (p); \ - _nouveau_falcon_init(nv_object(falcon)); \ -}) -#define nouveau_falcon_fini(p,s) ({ \ - struct nouveau_falcon *falcon = (p); \ - _nouveau_falcon_fini(nv_object(falcon), (s)); \ -}) - -int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, u32, bool, const char *, - const char *, int, void **); - -#define _nouveau_falcon_dtor _nouveau_engine_dtor -int _nouveau_falcon_init(struct nouveau_object *); -int _nouveau_falcon_fini(struct nouveau_object *, bool); -u32 _nouveau_falcon_rd32(struct nouveau_object *, u64); -void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32); - -#endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h index b3b9ce4e9d38..6eaff79377ae 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h @@ -65,7 +65,7 @@ nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref) void _nouveau_gpuobj_dtor(struct nouveau_object *); int _nouveau_gpuobj_init(struct nouveau_object *); int _nouveau_gpuobj_fini(struct nouveau_object *, bool); -u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u64); -void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32); +u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u32); +void _nouveau_gpuobj_wr32(struct nouveau_object *, u32, u32); #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/mm.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/mm.h index 2514e81ade02..975137ba34a6 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/mm.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/core/mm.h @@ -21,12 +21,6 @@ struct nouveau_mm { int heap_nodes; }; -static inline bool -nouveau_mm_initialised(struct nouveau_mm *mm) -{ - return mm->block_size != 0; -} - int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block); int nouveau_mm_fini(struct nouveau_mm *); int nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min, diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/object.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/object.h index 5982935ee23a..486f1a9217fd 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/object.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/core/object.h @@ -70,8 +70,7 @@ nv_pclass(struct nouveau_object *parent, u32 oclass) } struct nouveau_omthds { - u32 start; - u32 limit; + u32 method; int (*call)(struct nouveau_object *, u32, void *, u32); }; @@ -82,12 +81,12 @@ struct nouveau_ofuncs { void (*dtor)(struct nouveau_object *); int (*init)(struct nouveau_object *); int (*fini)(struct nouveau_object *, bool suspend); - u8 (*rd08)(struct nouveau_object *, u64 offset); - u16 (*rd16)(struct nouveau_object *, u64 offset); - u32 (*rd32)(struct nouveau_object *, u64 offset); - void (*wr08)(struct nouveau_object *, u64 offset, u8 data); - void (*wr16)(struct nouveau_object *, u64 offset, u16 data); - void (*wr32)(struct nouveau_object *, u64 offset, u32 data); + u8 (*rd08)(struct nouveau_object *, u32 offset); + u16 (*rd16)(struct nouveau_object *, u32 offset); + u32 (*rd32)(struct nouveau_object *, u32 offset); + void (*wr08)(struct nouveau_object *, u32 offset, u8 data); + void (*wr16)(struct nouveau_object *, u32 offset, u16 data); + void (*wr32)(struct nouveau_object *, u32 offset, u32 data); }; static inline struct nouveau_ofuncs * @@ -110,27 +109,21 @@ int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle); void nouveau_object_debug(void); static inline int -nv_exec(void *obj, u32 mthd, void *data, u32 size) +nv_call(void *obj, u32 mthd, u32 data) { struct nouveau_omthds *method = nv_oclass(obj)->omthds; while (method && method->call) { - if (mthd >= method->start && mthd <= method->limit) - return method->call(obj, mthd, data, size); + if (method->method == mthd) + return method->call(obj, mthd, &data, sizeof(data)); method++; } return -EINVAL; } -static inline int -nv_call(void *obj, u32 mthd, u32 data) -{ - return nv_exec(obj, mthd, &data, sizeof(data)); -} - static inline u8 -nv_ro08(void *obj, u64 addr) +nv_ro08(void *obj, u32 addr) { u8 data = nv_ofuncs(obj)->rd08(obj, addr); nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data); @@ -138,7 +131,7 @@ nv_ro08(void *obj, u64 addr) } static inline u16 -nv_ro16(void *obj, u64 addr) +nv_ro16(void *obj, u32 addr) { u16 data = nv_ofuncs(obj)->rd16(obj, addr); nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data); @@ -146,7 +139,7 @@ nv_ro16(void *obj, u64 addr) } static inline u32 -nv_ro32(void *obj, u64 addr) +nv_ro32(void *obj, u32 addr) { u32 data = nv_ofuncs(obj)->rd32(obj, addr); nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data); @@ -154,28 +147,28 @@ nv_ro32(void *obj, u64 addr) } static inline void -nv_wo08(void *obj, u64 addr, u8 data) +nv_wo08(void *obj, u32 addr, u8 data) { nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data); nv_ofuncs(obj)->wr08(obj, addr, data); } static inline void -nv_wo16(void *obj, u64 addr, u16 data) +nv_wo16(void *obj, u32 addr, u16 data) { nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data); nv_ofuncs(obj)->wr16(obj, addr, data); } static inline void -nv_wo32(void *obj, u64 addr, u32 data) +nv_wo32(void *obj, u32 addr, u32 data) { nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data); nv_ofuncs(obj)->wr32(obj, addr, data); } static inline u32 -nv_mo32(void *obj, u64 addr, u32 mask, u32 data) +nv_mo32(void *obj, u32 addr, u32 mask, u32 data) { u32 temp = nv_ro32(obj, addr); nv_wo32(obj, addr, (temp & ~mask) | data); diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/core/parent.h b/trunk/drivers/gpu/drm/nouveau/core/include/core/parent.h index 31cd852c96df..3c2e940eb0f8 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/core/parent.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/core/parent.h @@ -14,7 +14,7 @@ struct nouveau_parent { struct nouveau_object base; struct nouveau_sclass *sclass; - u64 engine; + u32 engine; int (*context_attach)(struct nouveau_object *, struct nouveau_object *); diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/bsp.h index 13ccdf54dfad..75d1ed5f85fd 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/bsp.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/bsp.h @@ -1,8 +1,45 @@ #ifndef __NOUVEAU_BSP_H__ #define __NOUVEAU_BSP_H__ +#include +#include + +struct nouveau_bsp_chan { + struct nouveau_engctx base; +}; + +#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d) \ + nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) +#define nouveau_bsp_context_destroy(d) \ + nouveau_engctx_destroy(&(d)->base) +#define nouveau_bsp_context_init(d) \ + nouveau_engctx_init(&(d)->base) +#define nouveau_bsp_context_fini(d,s) \ + nouveau_engctx_fini(&(d)->base, (s)) + +#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor +#define _nouveau_bsp_context_init _nouveau_engctx_init +#define _nouveau_bsp_context_fini _nouveau_engctx_fini +#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32 +#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32 + +struct nouveau_bsp { + struct nouveau_engine base; +}; + +#define nouveau_bsp_create(p,e,c,d) \ + nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d)) +#define nouveau_bsp_destroy(d) \ + nouveau_engine_destroy(&(d)->base) +#define nouveau_bsp_init(d) \ + nouveau_engine_init(&(d)->base) +#define nouveau_bsp_fini(d,s) \ + nouveau_engine_fini(&(d)->base, (s)) + +#define _nouveau_bsp_dtor _nouveau_engine_dtor +#define _nouveau_bsp_init _nouveau_engine_init +#define _nouveau_bsp_fini _nouveau_engine_fini + extern struct nouveau_oclass nv84_bsp_oclass; -extern struct nouveau_oclass nvc0_bsp_oclass; -extern struct nouveau_oclass nve0_bsp_oclass; #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/copy.h index 8cad2cf28cef..70b9d8c5fcf5 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/copy.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/copy.h @@ -1,7 +1,44 @@ #ifndef __NOUVEAU_COPY_H__ #define __NOUVEAU_COPY_H__ -void nva3_copy_intr(struct nouveau_subdev *); +#include +#include + +struct nouveau_copy_chan { + struct nouveau_engctx base; +}; + +#define nouveau_copy_context_create(p,e,c,g,s,a,f,d) \ + nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) +#define nouveau_copy_context_destroy(d) \ + nouveau_engctx_destroy(&(d)->base) +#define nouveau_copy_context_init(d) \ + nouveau_engctx_init(&(d)->base) +#define nouveau_copy_context_fini(d,s) \ + nouveau_engctx_fini(&(d)->base, (s)) + +#define _nouveau_copy_context_dtor _nouveau_engctx_dtor +#define _nouveau_copy_context_init _nouveau_engctx_init +#define _nouveau_copy_context_fini _nouveau_engctx_fini +#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32 +#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32 + +struct nouveau_copy { + struct nouveau_engine base; +}; + +#define nouveau_copy_create(p,e,c,y,i,d) \ + nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d)) +#define nouveau_copy_destroy(d) \ + nouveau_engine_destroy(&(d)->base) +#define nouveau_copy_init(d) \ + nouveau_engine_init(&(d)->base) +#define nouveau_copy_fini(d,s) \ + nouveau_engine_fini(&(d)->base, (s)) + +#define _nouveau_copy_dtor _nouveau_engine_dtor +#define _nouveau_copy_init _nouveau_engine_init +#define _nouveau_copy_fini _nouveau_engine_fini extern struct nouveau_oclass nva3_copy_oclass; extern struct nouveau_oclass nvc0_copy0_oclass; diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/crypt.h index db975618e937..e3674743baaa 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/crypt.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/crypt.h @@ -1,6 +1,45 @@ #ifndef __NOUVEAU_CRYPT_H__ #define __NOUVEAU_CRYPT_H__ +#include +#include + +struct nouveau_crypt_chan { + struct nouveau_engctx base; +}; + +#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d) \ + nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) +#define nouveau_crypt_context_destroy(d) \ + nouveau_engctx_destroy(&(d)->base) +#define nouveau_crypt_context_init(d) \ + nouveau_engctx_init(&(d)->base) +#define nouveau_crypt_context_fini(d,s) \ + nouveau_engctx_fini(&(d)->base, (s)) + +#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor +#define _nouveau_crypt_context_init _nouveau_engctx_init +#define _nouveau_crypt_context_fini _nouveau_engctx_fini +#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32 +#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32 + +struct nouveau_crypt { + struct nouveau_engine base; +}; + +#define nouveau_crypt_create(p,e,c,d) \ + nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d)) +#define nouveau_crypt_destroy(d) \ + nouveau_engine_destroy(&(d)->base) +#define nouveau_crypt_init(d) \ + nouveau_engine_init(&(d)->base) +#define nouveau_crypt_fini(d,s) \ + nouveau_engine_fini(&(d)->base, (s)) + +#define _nouveau_crypt_dtor _nouveau_engine_dtor +#define _nouveau_crypt_init _nouveau_engine_init +#define _nouveau_crypt_fini _nouveau_engine_fini + extern struct nouveau_oclass nv84_crypt_oclass; extern struct nouveau_oclass nv98_crypt_oclass; diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/disp.h index 46948285f3e7..38ec1252cbaa 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/disp.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/disp.h @@ -39,11 +39,6 @@ nouveau_disp(void *obj) extern struct nouveau_oclass nv04_disp_oclass; extern struct nouveau_oclass nv50_disp_oclass; -extern struct nouveau_oclass nv84_disp_oclass; -extern struct nouveau_oclass nva0_disp_oclass; -extern struct nouveau_oclass nv94_disp_oclass; -extern struct nouveau_oclass nva3_disp_oclass; extern struct nouveau_oclass nvd0_disp_oclass; -extern struct nouveau_oclass nve0_disp_oclass; #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h index b28914ed1752..700ccbb1941f 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h @@ -12,17 +12,29 @@ struct nouveau_dmaobj { u32 access; u64 start; u64 limit; - u32 conf0; }; +#define nouveau_dmaobj_create(p,e,c,a,s,d) \ + nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d) +#define nouveau_dmaobj_destroy(p) \ + nouveau_object_destroy(&(p)->base) +#define nouveau_dmaobj_init(p) \ + nouveau_object_init(&(p)->base) +#define nouveau_dmaobj_fini(p,s) \ + nouveau_object_fini(&(p)->base, (s)) + +int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *data, u32 size, + int length, void **); + +#define _nouveau_dmaobj_dtor nouveau_object_destroy +#define _nouveau_dmaobj_init nouveau_object_init +#define _nouveau_dmaobj_fini nouveau_object_fini + struct nouveau_dmaeng { struct nouveau_engine base; - - /* creates a "physical" dma object from a struct nouveau_dmaobj */ - int (*bind)(struct nouveau_dmaeng *dmaeng, - struct nouveau_object *parent, - struct nouveau_dmaobj *dmaobj, - struct nouveau_gpuobj **); + int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent, + struct nouveau_dmaobj *, struct nouveau_gpuobj **); }; #define nouveau_dmaeng_create(p,e,c,d) \ @@ -41,8 +53,5 @@ struct nouveau_dmaeng { extern struct nouveau_oclass nv04_dmaeng_oclass; extern struct nouveau_oclass nv50_dmaeng_oclass; extern struct nouveau_oclass nvc0_dmaeng_oclass; -extern struct nouveau_oclass nvd0_dmaeng_oclass; - -extern struct nouveau_oclass nouveau_dmaobj_sclass[]; #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/fifo.h index f18846c8c6fe..d67fed1e3970 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/fifo.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/fifo.h @@ -33,15 +33,15 @@ int nouveau_fifo_channel_create_(struct nouveau_object *, struct nouveau_object *, struct nouveau_oclass *, int bar, u32 addr, u32 size, u32 push, - u64 engmask, int len, void **); + u32 engmask, int len, void **); void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *); #define _nouveau_fifo_channel_init _nouveau_namedb_init #define _nouveau_fifo_channel_fini _nouveau_namedb_fini void _nouveau_fifo_channel_dtor(struct nouveau_object *); -u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64); -void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32); +u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u32); +void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32); struct nouveau_fifo_base { struct nouveau_gpuobj base; diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/ppp.h index 0a66781e8cf1..74d554fb3281 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/ppp.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/ppp.h @@ -1,7 +1,45 @@ #ifndef __NOUVEAU_PPP_H__ #define __NOUVEAU_PPP_H__ +#include +#include + +struct nouveau_ppp_chan { + struct nouveau_engctx base; +}; + +#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d) \ + nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) +#define nouveau_ppp_context_destroy(d) \ + nouveau_engctx_destroy(&(d)->base) +#define nouveau_ppp_context_init(d) \ + nouveau_engctx_init(&(d)->base) +#define nouveau_ppp_context_fini(d,s) \ + nouveau_engctx_fini(&(d)->base, (s)) + +#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor +#define _nouveau_ppp_context_init _nouveau_engctx_init +#define _nouveau_ppp_context_fini _nouveau_engctx_fini +#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32 +#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32 + +struct nouveau_ppp { + struct nouveau_engine base; +}; + +#define nouveau_ppp_create(p,e,c,d) \ + nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d)) +#define nouveau_ppp_destroy(d) \ + nouveau_engine_destroy(&(d)->base) +#define nouveau_ppp_init(d) \ + nouveau_engine_init(&(d)->base) +#define nouveau_ppp_fini(d,s) \ + nouveau_engine_fini(&(d)->base, (s)) + +#define _nouveau_ppp_dtor _nouveau_engine_dtor +#define _nouveau_ppp_init _nouveau_engine_init +#define _nouveau_ppp_fini _nouveau_engine_fini + extern struct nouveau_oclass nv98_ppp_oclass; -extern struct nouveau_oclass nvc0_ppp_oclass; #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/trunk/drivers/gpu/drm/nouveau/core/include/engine/vp.h index d7b287b115bf..05cd08fba377 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/engine/vp.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/engine/vp.h @@ -1,8 +1,45 @@ #ifndef __NOUVEAU_VP_H__ #define __NOUVEAU_VP_H__ +#include +#include + +struct nouveau_vp_chan { + struct nouveau_engctx base; +}; + +#define nouveau_vp_context_create(p,e,c,g,s,a,f,d) \ + nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) +#define nouveau_vp_context_destroy(d) \ + nouveau_engctx_destroy(&(d)->base) +#define nouveau_vp_context_init(d) \ + nouveau_engctx_init(&(d)->base) +#define nouveau_vp_context_fini(d,s) \ + nouveau_engctx_fini(&(d)->base, (s)) + +#define _nouveau_vp_context_dtor _nouveau_engctx_dtor +#define _nouveau_vp_context_init _nouveau_engctx_init +#define _nouveau_vp_context_fini _nouveau_engctx_fini +#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32 +#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32 + +struct nouveau_vp { + struct nouveau_engine base; +}; + +#define nouveau_vp_create(p,e,c,d) \ + nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d)) +#define nouveau_vp_destroy(d) \ + nouveau_engine_destroy(&(d)->base) +#define nouveau_vp_init(d) \ + nouveau_engine_init(&(d)->base) +#define nouveau_vp_fini(d,s) \ + nouveau_engine_fini(&(d)->base, (s)) + +#define _nouveau_vp_dtor _nouveau_engine_dtor +#define _nouveau_vp_init _nouveau_engine_init +#define _nouveau_vp_fini _nouveau_engine_fini + extern struct nouveau_oclass nv84_vp_oclass; -extern struct nouveau_oclass nvc0_vp_oclass; -extern struct nouveau_oclass nve0_vp_oclass; #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h index b79025da581e..d682fb625833 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h @@ -23,7 +23,6 @@ struct dcb_output { uint8_t bus; uint8_t location; uint8_t or; - uint8_t link; bool duallink_possible; union { struct sor_conf { @@ -56,11 +55,36 @@ struct dcb_output { u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len); u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len); -u16 dcb_outp_parse(struct nouveau_bios *, u8 idx, u8 *, u8 *, - struct dcb_output *); -u16 dcb_outp_match(struct nouveau_bios *, u16 type, u16 mask, u8 *, u8 *, - struct dcb_output *); int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec) (struct nouveau_bios *, void *, int index, u16 entry)); + +/* BIT 'U'/'d' table encoder subtables have hashes matching them to + * a particular set of encoders. + * + * This function returns true if a particular DCB entry matches. + */ +static inline bool +dcb_hash_match(struct dcb_output *dcb, u32 hash) +{ + if ((hash & 0x000000f0) != (dcb->location << 4)) + return false; + if ((hash & 0x0000000f) != dcb->type) + return false; + if (!(hash & (dcb->or << 16))) + return false; + + switch (dcb->type) { + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_LVDS: + case DCB_OUTPUT_DP: + if (hash & 0x00c00000) { + if (!(hash & (dcb->sorconf.link << 22))) + return false; + } + default: + return true; + } +} + #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h deleted file mode 100644 index c35937e2f6a4..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef __NVBIOS_DISP_H__ -#define __NVBIOS_DISP_H__ - -u16 nvbios_disp_table(struct nouveau_bios *, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub); - -struct nvbios_disp { - u16 data; -}; - -u16 nvbios_disp_entry(struct nouveau_bios *, u8 idx, - u8 *ver, u8 *hdr__, u8 *sub); -u16 nvbios_disp_parse(struct nouveau_bios *, u8 idx, - u8 *ver, u8 *hdr__, u8 *sub, - struct nvbios_disp *); - -struct nvbios_outp { - u16 type; - u16 mask; - u16 script[3]; -}; - -u16 nvbios_outp_entry(struct nouveau_bios *, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len); -u16 nvbios_outp_parse(struct nouveau_bios *, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_outp *); -u16 nvbios_outp_match(struct nouveau_bios *, u16 type, u16 mask, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_outp *); - - -struct nvbios_ocfg { - u16 match; - u16 clkcmp[2]; -}; - -u16 nvbios_ocfg_entry(struct nouveau_bios *, u16 outp, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len); -u16 nvbios_ocfg_parse(struct nouveau_bios *, u16 outp, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_ocfg *); -u16 nvbios_ocfg_match(struct nouveau_bios *, u16 outp, u16 type, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_ocfg *); -u16 nvbios_oclk_match(struct nouveau_bios *, u16 cmp, u32 khz); - -#endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h index 6e54218b55fc..73b5e5d3e75a 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h @@ -1,34 +1,8 @@ #ifndef __NVBIOS_DP_H__ #define __NVBIOS_DP_H__ -struct nvbios_dpout { - u16 type; - u16 mask; - u8 flags; - u32 script[5]; - u32 lnkcmp; -}; - -u16 nvbios_dpout_parse(struct nouveau_bios *, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpout *); -u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpout *); - -struct nvbios_dpcfg { - u8 drv; - u8 pre; - u8 unk; -}; - -u16 -nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpcfg *); -u16 -nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpcfg *); +u16 dp_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); +u16 dp_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len); +u16 dp_outp_match(struct nouveau_bios *, struct dcb_output *, u8 *ver, u8 *len); #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/fb.h index da470e6851b1..5c1b5e1904f9 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/include/subdev/fb.h +++ b/trunk/drivers/gpu/drm/nouveau/core/include/subdev/fb.h @@ -69,11 +69,8 @@ struct nouveau_fb { } type; u64 stolen; u64 size; - int ranks; - int parts; - int (*init)(struct nouveau_fb *); int (*get)(struct nouveau_fb *, u64 size, u32 align, u32 size_nc, u32 type, struct nouveau_mem **); void (*put)(struct nouveau_fb *, struct nouveau_mem **); @@ -87,8 +84,6 @@ struct nouveau_fb { int regions; void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nouveau_fb_tile *); - void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags, - struct nouveau_fb_tile *); void (*fini)(struct nouveau_fb *, int i, struct nouveau_fb_tile *); void (*prog)(struct nouveau_fb *, int i, @@ -104,7 +99,7 @@ nouveau_fb(void *obj) #define nouveau_fb_create(p,e,c,d) \ nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d)) -int nouveau_fb_preinit(struct nouveau_fb *); +int nouveau_fb_created(struct nouveau_fb *); void nouveau_fb_destroy(struct nouveau_fb *); int nouveau_fb_init(struct nouveau_fb *); #define nouveau_fb_fini(p,s) \ @@ -116,19 +111,9 @@ int _nouveau_fb_init(struct nouveau_object *); extern struct nouveau_oclass nv04_fb_oclass; extern struct nouveau_oclass nv10_fb_oclass; -extern struct nouveau_oclass nv1a_fb_oclass; extern struct nouveau_oclass nv20_fb_oclass; -extern struct nouveau_oclass nv25_fb_oclass; extern struct nouveau_oclass nv30_fb_oclass; -extern struct nouveau_oclass nv35_fb_oclass; -extern struct nouveau_oclass nv36_fb_oclass; extern struct nouveau_oclass nv40_fb_oclass; -extern struct nouveau_oclass nv41_fb_oclass; -extern struct nouveau_oclass nv44_fb_oclass; -extern struct nouveau_oclass nv46_fb_oclass; -extern struct nouveau_oclass nv47_fb_oclass; -extern struct nouveau_oclass nv49_fb_oclass; -extern struct nouveau_oclass nv4e_fb_oclass; extern struct nouveau_oclass nv50_fb_oclass; extern struct nouveau_oclass nvc0_fb_oclass; @@ -137,35 +122,13 @@ int nouveau_fb_bios_memtype(struct nouveau_bios *); bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype); -void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, - u32 pitch, u32 flags, struct nouveau_fb_tile *); -void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *); void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); -int nv20_fb_vram_init(struct nouveau_fb *); -void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, - u32 pitch, u32 flags, struct nouveau_fb_tile *); -void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *); -void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); - -int nv30_fb_init(struct nouveau_object *); void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nouveau_fb_tile *); - -void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags, - struct nouveau_fb_tile *); - -int nv41_fb_vram_init(struct nouveau_fb *); -int nv41_fb_init(struct nouveau_object *); -void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); - -int nv44_fb_vram_init(struct nouveau_fb *); -int nv44_fb_init(struct nouveau_object *); -void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); - -void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, - u32 pitch, u32 flags, struct nouveau_fb_tile *); +void nv30_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *); void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **); +void nv50_fb_trap(struct nouveau_fb *, int display); #endif diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bar/base.c index d70ba342aa2e..cd01c533007a 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bar/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bar/base.c @@ -65,14 +65,14 @@ nouveau_barobj_dtor(struct nouveau_object *object) } static u32 -nouveau_barobj_rd32(struct nouveau_object *object, u64 addr) +nouveau_barobj_rd32(struct nouveau_object *object, u32 addr) { struct nouveau_barobj *barobj = (void *)object; return ioread32_native(barobj->iomem + addr); } static void -nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data) +nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data) { struct nouveau_barobj *barobj = (void *)object; iowrite32_native(data, barobj->iomem + addr); diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index dd111947eb86..70ca7d5a1aa1 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c @@ -63,7 +63,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios) struct pci_dev *pdev = nv_device(bios)->pdev; struct device_node *dn; const u32 *data; - int size; + int size, i; dn = pci_device_to_OF_node(pdev); if (!dn) { @@ -210,19 +210,11 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios) return; bios->data = kmalloc(bios->size, GFP_KERNEL); - if (bios->data) { - /* disobey the acpi spec - much faster on at least w530 ... */ - ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size); - if (ret != bios->size || - nvbios_checksum(bios->data, bios->size)) { - /* ... that didn't work, ok, i'll be good now */ - for (i = 0; i < bios->size; i += cnt) { - cnt = min((bios->size - i), (u32)4096); - ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt); - if (ret != cnt) - break; - } - } + for (i = 0; bios->data && i < bios->size; i += cnt) { + cnt = min((bios->size - i), (u32)4096); + ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt); + if (ret != cnt) + break; } } @@ -366,42 +358,42 @@ nouveau_bios_shadow(struct nouveau_bios *bios) } static u8 -nouveau_bios_rd08(struct nouveau_object *object, u64 addr) +nouveau_bios_rd08(struct nouveau_object *object, u32 addr) { struct nouveau_bios *bios = (void *)object; return bios->data[addr]; } static u16 -nouveau_bios_rd16(struct nouveau_object *object, u64 addr) +nouveau_bios_rd16(struct nouveau_object *object, u32 addr) { struct nouveau_bios *bios = (void *)object; return get_unaligned_le16(&bios->data[addr]); } static u32 -nouveau_bios_rd32(struct nouveau_object *object, u64 addr) +nouveau_bios_rd32(struct nouveau_object *object, u32 addr) { struct nouveau_bios *bios = (void *)object; return get_unaligned_le32(&bios->data[addr]); } static void -nouveau_bios_wr08(struct nouveau_object *object, u64 addr, u8 data) +nouveau_bios_wr08(struct nouveau_object *object, u32 addr, u8 data) { struct nouveau_bios *bios = (void *)object; bios->data[addr] = data; } static void -nouveau_bios_wr16(struct nouveau_object *object, u64 addr, u16 data) +nouveau_bios_wr16(struct nouveau_object *object, u32 addr, u16 data) { struct nouveau_bios *bios = (void *)object; put_unaligned_le16(data, &bios->data[addr]); } static void -nouveau_bios_wr32(struct nouveau_object *object, u64 addr, u32 data) +nouveau_bios_wr32(struct nouveau_object *object, u32 addr, u32 data) { struct nouveau_bios *bios = (void *)object; put_unaligned_le32(data, &bios->data[addr]); diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c index 0fd87df99dd6..c51197157749 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c @@ -107,69 +107,6 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len) return 0x0000; } -u16 -dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len, - struct dcb_output *outp) -{ - u16 dcb = dcb_outp(bios, idx, ver, len); - if (dcb) { - if (*ver >= 0x20) { - u32 conn = nv_ro32(bios, dcb + 0x00); - outp->or = (conn & 0x0f000000) >> 24; - outp->location = (conn & 0x00300000) >> 20; - outp->bus = (conn & 0x000f0000) >> 16; - outp->connector = (conn & 0x0000f000) >> 12; - outp->heads = (conn & 0x00000f00) >> 8; - outp->i2c_index = (conn & 0x000000f0) >> 4; - outp->type = (conn & 0x0000000f); - outp->link = 0; - } else { - dcb = 0x0000; - } - - if (*ver >= 0x40) { - u32 conf = nv_ro32(bios, dcb + 0x04); - switch (outp->type) { - case DCB_OUTPUT_TMDS: - case DCB_OUTPUT_LVDS: - case DCB_OUTPUT_DP: - outp->link = (conf & 0x00000030) >> 4; - outp->sorconf.link = outp->link; /*XXX*/ - break; - default: - break; - } - } - } - return dcb; -} - -static inline u16 -dcb_outp_hasht(struct dcb_output *outp) -{ - return outp->type; -} - -static inline u16 -dcb_outp_hashm(struct dcb_output *outp) -{ - return (outp->heads << 8) | (outp->link << 6) | outp->or; -} - -u16 -dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask, - u8 *ver, u8 *len, struct dcb_output *outp) -{ - u16 dcb, idx = 0; - while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) { - if (dcb_outp_hasht(outp) == type) { - if ((dcb_outp_hashm(outp) & mask) == mask) - break; - } - } - return dcb; -} - int dcb_outp_foreach(struct nouveau_bios *bios, void *data, int (*exec)(struct nouveau_bios *, void *, int, u16)) diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c deleted file mode 100644 index 7f16e52d9bea..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include -#include -#include - -u16 -nvbios_disp_table(struct nouveau_bios *bios, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub) -{ - struct bit_entry U; - - if (!bit_entry(bios, 'U', &U)) { - if (U.version == 1) { - u16 data = nv_ro16(bios, U.offset); - if (data) { - *ver = nv_ro08(bios, data + 0x00); - switch (*ver) { - case 0x20: - case 0x21: - *hdr = nv_ro08(bios, data + 0x01); - *len = nv_ro08(bios, data + 0x02); - *cnt = nv_ro08(bios, data + 0x03); - *sub = nv_ro08(bios, data + 0x04); - return data; - default: - break; - } - } - } - } - - return 0x0000; -} - -u16 -nvbios_disp_entry(struct nouveau_bios *bios, u8 idx, - u8 *ver, u8 *len, u8 *sub) -{ - u8 hdr, cnt; - u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub); - if (data && idx < cnt) - return data + hdr + (idx * *len); - *ver = 0x00; - return 0x0000; -} - -u16 -nvbios_disp_parse(struct nouveau_bios *bios, u8 idx, - u8 *ver, u8 *len, u8 *sub, - struct nvbios_disp *info) -{ - u16 data = nvbios_disp_entry(bios, idx, ver, len, sub); - if (data && *len >= 2) { - info->data = nv_ro16(bios, data + 0); - return data; - } - return 0x0000; -} - -u16 -nvbios_outp_entry(struct nouveau_bios *bios, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len) -{ - struct nvbios_disp info; - u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info); - if (data) { - *cnt = nv_ro08(bios, info.data + 0x05); - *len = 0x06; - data = info.data; - } - return data; -} - -u16 -nvbios_outp_parse(struct nouveau_bios *bios, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_outp *info) -{ - u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len); - if (data && *hdr >= 0x0a) { - info->type = nv_ro16(bios, data + 0x00); - info->mask = nv_ro32(bios, data + 0x02); - if (*ver <= 0x20) /* match any link */ - info->mask |= 0x00c0; - info->script[0] = nv_ro16(bios, data + 0x06); - info->script[1] = nv_ro16(bios, data + 0x08); - info->script[2] = 0x0000; - if (*hdr >= 0x0c) - info->script[2] = nv_ro16(bios, data + 0x0a); - return data; - } - return 0x0000; -} - -u16 -nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_outp *info) -{ - u16 data, idx = 0; - while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) { - if (data && info->type == type) { - if ((info->mask & mask) == mask) - break; - } - } - return data; -} - -u16 -nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len) -{ - if (idx < *cnt) - return outp + *hdr + (idx * *len); - return 0x0000; -} - -u16 -nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_ocfg *info) -{ - u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); - if (data) { - info->match = nv_ro16(bios, data + 0x00); - info->clkcmp[0] = nv_ro16(bios, data + 0x02); - info->clkcmp[1] = nv_ro16(bios, data + 0x04); - } - return data; -} - -u16 -nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_ocfg *info) -{ - u16 data, idx = 0; - while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) { - if (info->match == type) - break; - } - return data; -} - -u16 -nvbios_oclk_match(struct nouveau_bios *bios, u16 cmp, u32 khz) -{ - while (cmp) { - if (khz / 10 >= nv_ro16(bios, cmp + 0x00)) - return nv_ro16(bios, cmp + 0x02); - cmp += 0x04; - } - return 0x0000; -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c index 663853bcca82..3cbc0f3e8d5e 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c @@ -25,29 +25,23 @@ #include "subdev/bios.h" #include "subdev/bios/bit.h" +#include "subdev/bios/dcb.h" #include "subdev/bios/dp.h" -static u16 -nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +u16 +dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { - struct bit_entry d; + struct bit_entry bit_d; - if (!bit_entry(bios, 'd', &d)) { - if (d.version == 1 && d.length >= 2) { - u16 data = nv_ro16(bios, d.offset); + if (!bit_entry(bios, 'd', &bit_d)) { + if (bit_d.version == 1) { + u16 data = nv_ro16(bios, bit_d.offset); if (data) { - *ver = nv_ro08(bios, data + 0x00); - switch (*ver) { - case 0x21: - case 0x30: - case 0x40: - *hdr = nv_ro08(bios, data + 0x01); - *len = nv_ro08(bios, data + 0x02); - *cnt = nv_ro08(bios, data + 0x03); - return data; - default: - break; - } + *ver = nv_ro08(bios, data + 0); + *hdr = nv_ro08(bios, data + 1); + *len = nv_ro08(bios, data + 2); + *cnt = nv_ro08(bios, data + 3); + return data; } } } @@ -55,150 +49,28 @@ nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) return 0x0000; } -static u16 -nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len) -{ - u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len); - if (data && idx < *cnt) { - u16 outp = nv_ro16(bios, data + *hdr + idx * *len); - switch (*ver * !!outp) { - case 0x21: - case 0x30: - *hdr = nv_ro08(bios, data + 0x04); - *len = nv_ro08(bios, data + 0x05); - *cnt = nv_ro08(bios, outp + 0x04); - break; - case 0x40: - *hdr = nv_ro08(bios, data + 0x04); - *cnt = 0; - *len = 0; - break; - default: - break; - } - return outp; - } - *ver = 0x00; - return 0x0000; -} - u16 -nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpout *info) +dp_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len) { - u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len); - if (data && *ver) { - info->type = nv_ro16(bios, data + 0x00); - info->mask = nv_ro16(bios, data + 0x02); - switch (*ver) { - case 0x21: - case 0x30: - info->flags = nv_ro08(bios, data + 0x05); - info->script[0] = nv_ro16(bios, data + 0x06); - info->script[1] = nv_ro16(bios, data + 0x08); - info->lnkcmp = nv_ro16(bios, data + 0x0a); - info->script[2] = nv_ro16(bios, data + 0x0c); - info->script[3] = nv_ro16(bios, data + 0x0e); - info->script[4] = nv_ro16(bios, data + 0x10); - break; - case 0x40: - info->flags = nv_ro08(bios, data + 0x04); - info->script[0] = nv_ro16(bios, data + 0x05); - info->script[1] = nv_ro16(bios, data + 0x07); - info->lnkcmp = nv_ro16(bios, data + 0x09); - info->script[2] = nv_ro16(bios, data + 0x0b); - info->script[3] = nv_ro16(bios, data + 0x0d); - info->script[4] = nv_ro16(bios, data + 0x0f); - break; - default: - data = 0x0000; - break; - } - } - return data; + u8 hdr, cnt; + u16 table = dp_table(bios, ver, &hdr, &cnt, len); + if (table && idx < cnt) + return nv_ro16(bios, table + hdr + (idx * *len)); + return 0xffff; } u16 -nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpout *info) -{ - u16 data, idx = 0; - while ((data = nvbios_dpout_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) { - if (data && info->type == type) { - if ((info->mask & mask) == mask) - break; - } - } - return data; -} - -static u16 -nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +dp_outp_match(struct nouveau_bios *bios, struct dcb_output *outp, + u8 *ver, u8 *len) { - if (*ver >= 0x40) { - outp = nvbios_dp_table(bios, ver, hdr, cnt, len); - *hdr = *hdr + (*len * * cnt); - *len = nv_ro08(bios, outp + 0x06); - *cnt = nv_ro08(bios, outp + 0x07); - } - - if (idx < *cnt) - return outp + *hdr + (idx * *len); - - return 0x0000; -} - -u16 -nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpcfg *info) -{ - u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len); - if (data) { - switch (*ver) { - case 0x21: - info->drv = nv_ro08(bios, data + 0x02); - info->pre = nv_ro08(bios, data + 0x03); - info->unk = nv_ro08(bios, data + 0x04); - break; - case 0x30: - case 0x40: - info->drv = nv_ro08(bios, data + 0x01); - info->pre = nv_ro08(bios, data + 0x02); - info->unk = nv_ro08(bios, data + 0x03); - break; - default: - data = 0x0000; - break; - } - } - return data; -} - -u16 -nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe, - u8 *ver, u8 *hdr, u8 *cnt, u8 *len, - struct nvbios_dpcfg *info) -{ - u8 idx = 0xff; + u8 idx = 0; u16 data; - - if (*ver >= 0x30) { - const u8 vsoff[] = { 0, 4, 7, 9 }; - idx = (un * 10) + vsoff[vs] + pe; - } else { - while ((data = nvbios_dpcfg_entry(bios, outp, idx, - ver, hdr, cnt, len))) { - if (nv_ro08(bios, data + 0x00) == vs && - nv_ro08(bios, data + 0x01) == pe) - break; - idx++; + while ((data = dp_outp(bios, idx++, ver, len)) != 0xffff) { + if (data) { + u32 hash = nv_ro32(bios, data); + if (dcb_hash_match(outp, hash)) + return data; } } - - return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info); + return 0x0000; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c index c90d4aa3ae4f..4c9f1e508165 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c @@ -101,8 +101,8 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line, } /* DCB 2.2, fixed TVDAC GPIO data */ - if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len))) { - if (ver >= 0x22 && ver < 0x30 && func == DCB_GPIO_TVDAC0) { + if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x22) { + if (func == DCB_GPIO_TVDAC0) { u8 conf = nv_ro08(bios, entry - 5); u8 addr = nv_ro08(bios, entry - 4); if (conf & 0x01) { diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c index ae168bbb86d8..6be8c32f6e4c 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c @@ -743,10 +743,9 @@ static void init_dp_condition(struct nvbios_init *init) { struct nouveau_bios *bios = init->bios; - struct nvbios_dpout info; u8 cond = nv_ro08(bios, init->offset + 1); u8 unkn = nv_ro08(bios, init->offset + 2); - u8 ver, hdr, cnt, len; + u8 ver, len; u16 data; trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn); @@ -760,12 +759,10 @@ init_dp_condition(struct nvbios_init *init) case 1: case 2: if ( init->outp && - (data = nvbios_dpout_match(bios, DCB_OUTPUT_DP, - (init->outp->or << 0) | - (init->outp->sorconf.link << 6), - &ver, &hdr, &cnt, &len, &info))) - { - if (!(info.flags & cond)) + (data = dp_outp_match(bios, init->outp, &ver, &len))) { + if (ver <= 0x40 && !(nv_ro08(bios, data + 5) & cond)) + init_exec_set(init, false); + if (ver == 0x40 && !(nv_ro08(bios, data + 4) & cond)) init_exec_set(init, false); break; } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/base.c index f8a7ed4166cf..ca9a4648bd8a 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/base.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -60,24 +61,19 @@ struct nouveau_devobj { static const u64 disable_map[] = { [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS, - [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE, [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE, [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE, - [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO, - [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO, [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH, [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG, [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME, @@ -88,7 +84,7 @@ static const u64 disable_map[] = { [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0, [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1, [NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1, - [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC, + [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO, [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP, [NVDEV_SUBDEV_NR] = 0, }; @@ -212,7 +208,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent, /* determine frequency of timing crystal */ if ( device->chipset < 0x17 || - (device->chipset >= 0x20 && device->chipset < 0x25)) + (device->chipset >= 0x20 && device->chipset <= 0x25)) strap &= 0x00000040; else strap &= 0x00400040; @@ -360,37 +356,37 @@ nouveau_devobj_fini(struct nouveau_object *object, bool suspend) } static u8 -nouveau_devobj_rd08(struct nouveau_object *object, u64 addr) +nouveau_devobj_rd08(struct nouveau_object *object, u32 addr) { return nv_rd08(object->engine, addr); } static u16 -nouveau_devobj_rd16(struct nouveau_object *object, u64 addr) +nouveau_devobj_rd16(struct nouveau_object *object, u32 addr) { return nv_rd16(object->engine, addr); } static u32 -nouveau_devobj_rd32(struct nouveau_object *object, u64 addr) +nouveau_devobj_rd32(struct nouveau_object *object, u32 addr) { return nv_rd32(object->engine, addr); } static void -nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data) +nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data) { nv_wr08(object->engine, addr, data); } static void -nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data) +nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data) { nv_wr16(object->engine, addr, data); } static void -nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data) +nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data) { nv_wr32(object->engine, addr, data); } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c index 9c40b0fb23f6..f09accfd0e31 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c @@ -105,7 +105,7 @@ nv10_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -159,7 +159,7 @@ nv10_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c index 74f88f48e1c2..5fa58b7369b5 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c @@ -72,7 +72,7 @@ nv20_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -90,7 +90,7 @@ nv20_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -108,7 +108,7 @@ nv20_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c index 0ac1b2c4f61d..7f4b8fe6cccc 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c @@ -72,7 +72,7 @@ nv30_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -109,7 +109,7 @@ nv30_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -128,7 +128,7 @@ nv30_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c index 41d59689a021..42deadca0f0a 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c @@ -76,7 +76,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -96,7 +96,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -116,7 +116,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -156,7 +156,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -176,7 +176,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -196,7 +196,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -216,7 +216,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -236,7 +236,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -256,7 +256,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -276,7 +276,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -296,7 +296,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -316,7 +316,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -336,7 +336,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; @@ -356,7 +356,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; - device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c index 6ccfd8585ba2..fec3bcc9a6fc 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c @@ -98,7 +98,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0x86: device->cname = "G86"; @@ -123,7 +123,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0x92: device->cname = "G92"; @@ -148,7 +148,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0x94: device->cname = "G94"; @@ -173,7 +173,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0x96: device->cname = "G96"; @@ -198,7 +198,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0x98: device->cname = "G98"; @@ -223,7 +223,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xa0: device->cname = "G200"; @@ -248,7 +248,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xaa: device->cname = "MCP77/MCP78"; @@ -273,7 +273,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xac: device->cname = "MCP79/MCP7A"; @@ -298,7 +298,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xa3: device->cname = "GT215"; @@ -324,7 +324,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xa5: device->cname = "GT216"; @@ -349,7 +349,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xa8: device->cname = "GT218"; @@ -374,7 +374,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xaf: device->cname = "MCP89"; @@ -399,7 +399,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; default: nv_fatal(device, "unknown Tesla chipset\n"); diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c index f0461685a422..6697f0f9c293 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c @@ -74,12 +74,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xc4: device->cname = "GF104"; @@ -102,12 +102,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xc3: device->cname = "GF106"; @@ -130,12 +130,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xce: device->cname = "GF114"; @@ -158,12 +158,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xcf: device->cname = "GF116"; @@ -186,12 +186,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xc1: device->cname = "GF108"; @@ -214,12 +214,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xc8: device->cname = "GF110"; @@ -242,12 +242,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; break; case 0xd9: device->cname = "GF119"; @@ -266,13 +266,13 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; break; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c index 9b7881e76634..4a280b7ab853 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c @@ -45,9 +45,6 @@ #include #include #include -#include -#include -#include int nve0_identify(struct nouveau_device *device) @@ -70,16 +67,13 @@ nve0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; break; case 0xe7: device->cname = "GK107"; @@ -98,16 +92,13 @@ nve0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass; - device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; - device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; - device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; - device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; break; default: nv_fatal(device, "unknown Kepler chipset\n"); diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c index ae7249b09797..61becfa732e9 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c @@ -22,10 +22,6 @@ * Authors: Ben Skeggs */ -#include -#include -#include -#include #include #include @@ -59,12 +55,7 @@ nv50_devinit_dtor(struct nouveau_object *object) static int nv50_devinit_init(struct nouveau_object *object) { - struct nouveau_bios *bios = nouveau_bios(object); struct nv50_devinit_priv *priv = (void *)object; - struct nvbios_outp info; - struct dcb_output outp; - u8 ver = 0xff, hdr, cnt, len; - int ret, i = 0; if (!priv->base.post) { if (!nv_rdvgac(priv, 0, 0x00) && @@ -74,30 +65,7 @@ nv50_devinit_init(struct nouveau_object *object) } } - ret = nouveau_devinit_init(&priv->base); - if (ret) - return ret; - - /* if we ran the init tables, execute first script pointer for each - * display table output entry that has a matching dcb entry. - */ - while (priv->base.post && ver) { - u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info); - if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) { - struct nvbios_init init = { - .subdev = nv_subdev(priv), - .bios = bios, - .offset = info.script[0], - .outp = &outp, - .crtc = -1, - .execute = 1, - }; - - nvbios_exec(&init); - } - }; - - return 0; + return nouveau_devinit_init(&priv->base); } static int diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/base.c index d6d16007ec1a..f0086de8af31 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/base.c @@ -57,47 +57,27 @@ nouveau_fb_bios_memtype(struct nouveau_bios *bios) } int -nouveau_fb_preinit(struct nouveau_fb *pfb) +nouveau_fb_init(struct nouveau_fb *pfb) { - static const char *name[] = { - [NV_MEM_TYPE_UNKNOWN] = "unknown", - [NV_MEM_TYPE_STOLEN ] = "stolen system memory", - [NV_MEM_TYPE_SGRAM ] = "SGRAM", - [NV_MEM_TYPE_SDRAM ] = "SDRAM", - [NV_MEM_TYPE_DDR1 ] = "DDR1", - [NV_MEM_TYPE_DDR2 ] = "DDR2", - [NV_MEM_TYPE_DDR3 ] = "DDR3", - [NV_MEM_TYPE_GDDR2 ] = "GDDR2", - [NV_MEM_TYPE_GDDR3 ] = "GDDR3", - [NV_MEM_TYPE_GDDR4 ] = "GDDR4", - [NV_MEM_TYPE_GDDR5 ] = "GDDR5", - }; - int ret, tags; - - tags = pfb->ram.init(pfb); - if (tags < 0 || !pfb->ram.size) { - nv_fatal(pfb, "error detecting memory configuration!!\n"); - return (tags < 0) ? tags : -ERANGE; - } + int ret, i; - if (!nouveau_mm_initialised(&pfb->vram)) { - ret = nouveau_mm_init(&pfb->vram, 0, pfb->ram.size >> 12, 1); - if (ret) - return ret; - } + ret = nouveau_subdev_init(&pfb->base); + if (ret) + return ret; - if (!nouveau_mm_initialised(&pfb->tags) && tags) { - ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1); - if (ret) - return ret; - } + for (i = 0; i < pfb->tile.regions; i++) + pfb->tile.prog(pfb, i, &pfb->tile.region[i]); - nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]); - nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20)); - nv_info(pfb, " ZCOMP: %d tags\n", tags); return 0; } +int +_nouveau_fb_init(struct nouveau_object *object) +{ + struct nouveau_fb *pfb = (void *)object; + return nouveau_fb_init(pfb); +} + void nouveau_fb_destroy(struct nouveau_fb *pfb) { @@ -105,8 +85,12 @@ nouveau_fb_destroy(struct nouveau_fb *pfb) for (i = 0; i < pfb->tile.regions; i++) pfb->tile.fini(pfb, i, &pfb->tile.region[i]); - nouveau_mm_fini(&pfb->tags); - nouveau_mm_fini(&pfb->vram); + + if (pfb->tags.block_size) + nouveau_mm_fini(&pfb->tags); + + if (pfb->vram.block_size) + nouveau_mm_fini(&pfb->vram); nouveau_subdev_destroy(&pfb->base); } @@ -117,24 +101,30 @@ _nouveau_fb_dtor(struct nouveau_object *object) struct nouveau_fb *pfb = (void *)object; nouveau_fb_destroy(pfb); } + int -nouveau_fb_init(struct nouveau_fb *pfb) +nouveau_fb_created(struct nouveau_fb *pfb) { - int ret, i; - - ret = nouveau_subdev_init(&pfb->base); - if (ret) - return ret; + static const char *name[] = { + [NV_MEM_TYPE_UNKNOWN] = "unknown", + [NV_MEM_TYPE_STOLEN ] = "stolen system memory", + [NV_MEM_TYPE_SGRAM ] = "SGRAM", + [NV_MEM_TYPE_SDRAM ] = "SDRAM", + [NV_MEM_TYPE_DDR1 ] = "DDR1", + [NV_MEM_TYPE_DDR2 ] = "DDR2", + [NV_MEM_TYPE_DDR3 ] = "DDR3", + [NV_MEM_TYPE_GDDR2 ] = "GDDR2", + [NV_MEM_TYPE_GDDR3 ] = "GDDR3", + [NV_MEM_TYPE_GDDR4 ] = "GDDR4", + [NV_MEM_TYPE_GDDR5 ] = "GDDR5", + }; - for (i = 0; i < pfb->tile.regions; i++) - pfb->tile.prog(pfb, i, &pfb->tile.region[i]); + if (pfb->ram.size == 0) { + nv_fatal(pfb, "no vram detected!!\n"); + return -ERANGE; + } + nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]); + nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20)); return 0; } - -int -_nouveau_fb_init(struct nouveau_object *object) -{ - struct nouveau_fb *pfb = (void *)object; - return nouveau_fb_init(pfb); -} diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c index 6e369f85361e..eb06836b69f7 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c @@ -55,37 +55,6 @@ nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags) return false; } -static int -nv04_fb_vram_init(struct nouveau_fb *pfb) -{ - u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0); - if (boot0 & 0x00000100) { - pfb->ram.size = ((boot0 >> 12) & 0xf) * 2 + 2; - pfb->ram.size *= 1024 * 1024; - } else { - switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) { - case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB: - pfb->ram.size = 32 * 1024 * 1024; - break; - case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB: - pfb->ram.size = 16 * 1024 * 1024; - break; - case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB: - pfb->ram.size = 8 * 1024 * 1024; - break; - case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB: - pfb->ram.size = 4 * 1024 * 1024; - break; - } - } - - if ((boot0 & 0x00000038) <= 0x10) - pfb->ram.type = NV_MEM_TYPE_SGRAM; - else - pfb->ram.type = NV_MEM_TYPE_SDRAM; - return 0; -} - static int nv04_fb_init(struct nouveau_object *object) { @@ -110,6 +79,7 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_object **pobject) { struct nv04_fb_priv *priv; + u32 boot0; int ret; ret = nouveau_fb_create(parent, engine, oclass, &priv); @@ -117,9 +87,35 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; + boot0 = nv_rd32(priv, NV04_PFB_BOOT_0); + if (boot0 & 0x00000100) { + priv->base.ram.size = ((boot0 >> 12) & 0xf) * 2 + 2; + priv->base.ram.size *= 1024 * 1024; + } else { + switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) { + case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB: + priv->base.ram.size = 32 * 1024 * 1024; + break; + case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB: + priv->base.ram.size = 16 * 1024 * 1024; + break; + case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB: + priv->base.ram.size = 8 * 1024 * 1024; + break; + case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB: + priv->base.ram.size = 4 * 1024 * 1024; + break; + } + } + + if ((boot0 & 0x00000038) <= 0x10) + priv->base.ram.type = NV_MEM_TYPE_SGRAM; + else + priv->base.ram.type = NV_MEM_TYPE_SDRAM; + + priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv04_fb_vram_init; - return nouveau_fb_preinit(&priv->base); + return nouveau_fb_created(&priv->base); } struct nouveau_oclass diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c index edbbe26e858d..f037a422d2f4 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c @@ -30,20 +30,7 @@ struct nv10_fb_priv { struct nouveau_fb base; }; -static int -nv10_fb_vram_init(struct nouveau_fb *pfb) -{ - u32 cfg0 = nv_rd32(pfb, 0x100200); - if (cfg0 & 0x00000001) - pfb->ram.type = NV_MEM_TYPE_DDR1; - else - pfb->ram.type = NV_MEM_TYPE_SDRAM; - - pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; - return 0; -} - -void +static void nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nouveau_fb_tile *tile) { @@ -52,7 +39,7 @@ nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, tile->pitch = pitch; } -void +static void nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) { tile->addr = 0; @@ -67,7 +54,6 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit); nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch); nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr); - nv_rd32(pfb, 0x100240 + (i * 0x10)); } static int @@ -75,6 +61,7 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + struct nouveau_device *device = nv_device(parent); struct nv10_fb_priv *priv; int ret; @@ -83,13 +70,42 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; + if (device->chipset == 0x1a || device->chipset == 0x1f) { + struct pci_dev *bridge; + u32 mem, mib; + + bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); + if (!bridge) { + nv_fatal(device, "no bridge device\n"); + return 0; + } + + if (device->chipset == 0x1a) { + pci_read_config_dword(bridge, 0x7c, &mem); + mib = ((mem >> 6) & 31) + 1; + } else { + pci_read_config_dword(bridge, 0x84, &mem); + mib = ((mem >> 4) & 127) + 1; + } + + priv->base.ram.type = NV_MEM_TYPE_STOLEN; + priv->base.ram.size = mib * 1024 * 1024; + } else { + u32 cfg0 = nv_rd32(priv, 0x100200); + if (cfg0 & 0x00000001) + priv->base.ram.type = NV_MEM_TYPE_DDR1; + else + priv->base.ram.type = NV_MEM_TYPE_SDRAM; + + priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000; + } + priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv10_fb_vram_init; priv->base.tile.regions = 8; priv->base.tile.init = nv10_fb_tile_init; priv->base.tile.fini = nv10_fb_tile_fini; priv->base.tile.prog = nv10_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); + return nouveau_fb_created(&priv->base); } struct nouveau_oclass diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c deleted file mode 100644 index 48366841db4a..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv1a_fb_priv { - struct nouveau_fb base; -}; - -static int -nv1a_fb_vram_init(struct nouveau_fb *pfb) -{ - struct pci_dev *bridge; - u32 mem, mib; - - bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); - if (!bridge) { - nv_fatal(pfb, "no bridge device\n"); - return -ENODEV; - } - - if (nv_device(pfb)->chipset == 0x1a) { - pci_read_config_dword(bridge, 0x7c, &mem); - mib = ((mem >> 6) & 31) + 1; - } else { - pci_read_config_dword(bridge, 0x84, &mem); - mib = ((mem >> 4) & 127) + 1; - } - - pfb->ram.type = NV_MEM_TYPE_STOLEN; - pfb->ram.size = mib * 1024 * 1024; - return 0; -} - -static int -nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv1a_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv1a_fb_vram_init; - priv->base.tile.regions = 8; - priv->base.tile.init = nv10_fb_tile_init; - priv->base.tile.fini = nv10_fb_tile_fini; - priv->base.tile.prog = nv10_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - -struct nouveau_oclass -nv1a_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x1a), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv1a_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = _nouveau_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c index 5d14612a2c8e..4b3578fcb7fb 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c @@ -30,54 +30,43 @@ struct nv20_fb_priv { struct nouveau_fb base; }; -int -nv20_fb_vram_init(struct nouveau_fb *pfb) -{ - u32 pbus1218 = nv_rd32(pfb, 0x001218); - - switch (pbus1218 & 0x00000300) { - case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break; - case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break; - case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break; - case 0x00000300: pfb->ram.type = NV_MEM_TYPE_GDDR2; break; - } - pfb->ram.size = (nv_rd32(pfb, 0x10020c) & 0xff000000); - pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; - - return nv_rd32(pfb, 0x100320); -} - -void +static void nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nouveau_fb_tile *tile) { + struct nouveau_device *device = nv_device(pfb); + int bpp = (flags & 2) ? 32 : 16; + tile->addr = 0x00000001 | addr; tile->limit = max(1u, addr + size) - 1; tile->pitch = pitch; + + /* Allocate some of the on-die tag memory, used to store Z + * compression meta-data (most likely just a bitmap determining + * if a given tile is compressed or not). + */ + size /= 256; if (flags & 4) { - pfb->tile.comp(pfb, i, size, flags, tile); + if (!nouveau_mm_head(&pfb->tags, 1, size, size, 1, &tile->tag)) { + /* Enable Z compression */ + tile->zcomp = tile->tag->offset; + if (device->chipset >= 0x25) { + if (bpp == 16) + tile->zcomp |= 0x00100000; + else + tile->zcomp |= 0x00200000; + } else { + tile->zcomp |= 0x80000000; + if (bpp != 16) + tile->zcomp |= 0x04000000; + } + } + tile->addr |= 2; } } static void -nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, - struct nouveau_fb_tile *tile) -{ - u32 tiles = DIV_ROUND_UP(size, 0x40); - u32 tags = round_up(tiles / pfb->ram.parts, 0x40); - if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { - if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */ - else tile->zcomp = 0x04000000; /* Z24S8 */ - tile->zcomp |= tile->tag->offset; - tile->zcomp |= 0x80000000; /* enable */ -#ifdef __BIG_ENDIAN - tile->zcomp |= 0x08000000; -#endif - } -} - -void nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) { tile->addr = 0; @@ -87,13 +76,12 @@ nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) nouveau_mm_free(&pfb->tags, &tile->tag); } -void +static void nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) { nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit); nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch); nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr); - nv_rd32(pfb, 0x100240 + (i * 0x10)); nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp); } @@ -102,7 +90,9 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + struct nouveau_device *device = nv_device(parent); struct nv20_fb_priv *priv; + u32 pbus1218; int ret; ret = nouveau_fb_create(parent, engine, oclass, &priv); @@ -110,14 +100,28 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; + pbus1218 = nv_rd32(priv, 0x001218); + switch (pbus1218 & 0x00000300) { + case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break; + case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break; + case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break; + case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break; + } + priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000; + + if (device->chipset >= 0x25) + ret = nouveau_mm_init(&priv->base.tags, 0, 64 * 1024, 1); + else + ret = nouveau_mm_init(&priv->base.tags, 0, 32 * 1024, 1); + if (ret) + return ret; + priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv20_fb_vram_init; priv->base.tile.regions = 8; priv->base.tile.init = nv20_fb_tile_init; - priv->base.tile.comp = nv20_fb_tile_comp; priv->base.tile.fini = nv20_fb_tile_fini; priv->base.tile.prog = nv20_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); + return nouveau_fb_created(&priv->base); } struct nouveau_oclass diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c deleted file mode 100644 index 0042ace6bef9..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv25_fb_priv { - struct nouveau_fb base; -}; - -static void -nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, - struct nouveau_fb_tile *tile) -{ - u32 tiles = DIV_ROUND_UP(size, 0x40); - u32 tags = round_up(tiles / pfb->ram.parts, 0x40); - if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { - if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */ - else tile->zcomp = 0x00200000; /* Z24S8 */ - tile->zcomp |= tile->tag->offset; -#ifdef __BIG_ENDIAN - tile->zcomp |= 0x01000000; -#endif - } -} - -static int -nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv25_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv20_fb_vram_init; - priv->base.tile.regions = 8; - priv->base.tile.init = nv20_fb_tile_init; - priv->base.tile.comp = nv25_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv20_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - -struct nouveau_oclass -nv25_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x25), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv25_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = _nouveau_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c index a7ba0d048aec..cba67bc91390 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c @@ -34,36 +34,17 @@ void nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nouveau_fb_tile *tile) { - /* for performance, select alternate bank offset for zeta */ - if (!(flags & 4)) { - tile->addr = (0 << 4); - } else { - if (pfb->tile.comp) /* z compression */ - pfb->tile.comp(pfb, i, size, flags, tile); - tile->addr = (1 << 4); - } - - tile->addr |= 0x00000001; /* enable */ - tile->addr |= addr; + tile->addr = addr | 1; tile->limit = max(1u, addr + size) - 1; tile->pitch = pitch; } -static void -nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, - struct nouveau_fb_tile *tile) +void +nv30_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) { - u32 tiles = DIV_ROUND_UP(size, 0x40); - u32 tags = round_up(tiles / pfb->ram.parts, 0x40); - if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { - if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */ - else tile->zcomp |= 0x02000000; /* Z24S8 */ - tile->zcomp |= ((tile->tag->offset ) >> 6); - tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12; -#ifdef __BIG_ENDIAN - tile->zcomp |= 0x10000000; -#endif - } + tile->addr = 0; + tile->limit = 0; + tile->pitch = 0; } static int @@ -91,7 +72,7 @@ calc_ref(struct nv30_fb_priv *priv, int l, int k, int i) return x; } -int +static int nv30_fb_init(struct nouveau_object *object) { struct nouveau_device *device = nv_device(object); @@ -130,6 +111,7 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_object **pobject) { struct nv30_fb_priv *priv; + u32 pbus1218; int ret; ret = nouveau_fb_create(parent, engine, oclass, &priv); @@ -137,14 +119,21 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; + pbus1218 = nv_rd32(priv, 0x001218); + switch (pbus1218 & 0x00000300) { + case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break; + case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break; + case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break; + case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break; + } + priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000; + priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv20_fb_vram_init; priv->base.tile.regions = 8; priv->base.tile.init = nv30_fb_tile_init; - priv->base.tile.comp = nv30_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv20_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); + priv->base.tile.fini = nv30_fb_tile_fini; + priv->base.tile.prog = nv10_fb_tile_prog; + return nouveau_fb_created(&priv->base); } struct nouveau_oclass diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c deleted file mode 100644 index 092f6f4f3521..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv35_fb_priv { - struct nouveau_fb base; -}; - -static void -nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, - struct nouveau_fb_tile *tile) -{ - u32 tiles = DIV_ROUND_UP(size, 0x40); - u32 tags = round_up(tiles / pfb->ram.parts, 0x40); - if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { - if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */ - else tile->zcomp |= 0x08000000; /* Z24S8 */ - tile->zcomp |= ((tile->tag->offset ) >> 6); - tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13; -#ifdef __BIG_ENDIAN - tile->zcomp |= 0x40000000; -#endif - } -} - -static int -nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv35_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv20_fb_vram_init; - priv->base.tile.regions = 8; - priv->base.tile.init = nv30_fb_tile_init; - priv->base.tile.comp = nv35_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv20_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - -struct nouveau_oclass -nv35_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x35), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv35_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv30_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c deleted file mode 100644 index 797ab3b821b9..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv36_fb_priv { - struct nouveau_fb base; -}; - -static void -nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, - struct nouveau_fb_tile *tile) -{ - u32 tiles = DIV_ROUND_UP(size, 0x40); - u32 tags = round_up(tiles / pfb->ram.parts, 0x40); - if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { - if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */ - else tile->zcomp |= 0x20000000; /* Z24S8 */ - tile->zcomp |= ((tile->tag->offset ) >> 6); - tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14; -#ifdef __BIG_ENDIAN - tile->zcomp |= 0x80000000; -#endif - } -} - -static int -nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv36_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv20_fb_vram_init; - priv->base.tile.regions = 8; - priv->base.tile.init = nv30_fb_tile_init; - priv->base.tile.comp = nv36_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv20_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - -struct nouveau_oclass -nv36_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x36), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv36_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv30_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c index 65e131b90f37..347a496fcad8 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c @@ -30,37 +30,34 @@ struct nv40_fb_priv { struct nouveau_fb base; }; -static int -nv40_fb_vram_init(struct nouveau_fb *pfb) +static inline int +nv44_graph_class(struct nouveau_device *device) { - u32 pbus1218 = nv_rd32(pfb, 0x001218); - switch (pbus1218 & 0x00000300) { - case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break; - case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break; - case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break; - case 0x00000300: pfb->ram.type = NV_MEM_TYPE_DDR2; break; - } + if ((device->chipset & 0xf0) == 0x60) + return 1; - pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; - pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; - return nv_rd32(pfb, 0x100320); + return !(0x0baf & (1 << (device->chipset & 0x0f))); } -void -nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, - struct nouveau_fb_tile *tile) +static void +nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) { - u32 tiles = DIV_ROUND_UP(size, 0x80); - u32 tags = round_up(tiles / pfb->ram.parts, 0x100); - if ( (flags & 2) && - !nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { - tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */ - tile->zcomp |= ((tile->tag->offset ) >> 8); - tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13; -#ifdef __BIG_ENDIAN - tile->zcomp |= 0x40000000; -#endif - } + nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit); + nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch); + nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr); +} + +static void +nv40_fb_init_gart(struct nv40_fb_priv *priv) +{ + nv_wr32(priv, 0x100800, 0x00000001); +} + +static void +nv44_fb_init_gart(struct nv40_fb_priv *priv) +{ + nv_wr32(priv, 0x100850, 0x80000000); + nv_wr32(priv, 0x100800, 0x00000001); } static int @@ -73,7 +70,19 @@ nv40_fb_init(struct nouveau_object *object) if (ret) return ret; - nv_mask(priv, 0x10033c, 0x00008000, 0x00000000); + switch (nv_device(priv)->chipset) { + case 0x40: + case 0x45: + nv_mask(priv, 0x10033c, 0x00008000, 0x00000000); + break; + default: + if (nv44_graph_class(nv_device(priv))) + nv44_fb_init_gart(priv); + else + nv40_fb_init_gart(priv); + break; + } + return 0; } @@ -82,6 +91,7 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + struct nouveau_device *device = nv_device(parent); struct nv40_fb_priv *priv; int ret; @@ -90,14 +100,69 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; + /* 0x001218 is actually present on a few other NV4X I looked at, + * and even contains sane values matching 0x100474. From looking + * at various vbios images however, this isn't the case everywhere. + * So, I chose to use the same regs I've seen NVIDIA reading around + * the memory detection, hopefully that'll get us the right numbers + */ + if (device->chipset == 0x40) { + u32 pbus1218 = nv_rd32(priv, 0x001218); + switch (pbus1218 & 0x00000300) { + case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break; + case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break; + case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break; + case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_DDR2; break; + } + } else + if (device->chipset == 0x49 || device->chipset == 0x4b) { + u32 pfb914 = nv_rd32(priv, 0x100914); + switch (pfb914 & 0x00000003) { + case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_DDR1; break; + case 0x00000001: priv->base.ram.type = NV_MEM_TYPE_DDR2; break; + case 0x00000002: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break; + case 0x00000003: break; + } + } else + if (device->chipset != 0x4e) { + u32 pfb474 = nv_rd32(priv, 0x100474); + if (pfb474 & 0x00000004) + priv->base.ram.type = NV_MEM_TYPE_GDDR3; + if (pfb474 & 0x00000002) + priv->base.ram.type = NV_MEM_TYPE_DDR2; + if (pfb474 & 0x00000001) + priv->base.ram.type = NV_MEM_TYPE_DDR1; + } else { + priv->base.ram.type = NV_MEM_TYPE_STOLEN; + } + + priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000; + priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv40_fb_vram_init; - priv->base.tile.regions = 8; + switch (device->chipset) { + case 0x40: + case 0x45: + priv->base.tile.regions = 8; + break; + case 0x46: + case 0x47: + case 0x49: + case 0x4b: + case 0x4c: + priv->base.tile.regions = 15; + break; + default: + priv->base.tile.regions = 12; + break; + } priv->base.tile.init = nv30_fb_tile_init; - priv->base.tile.comp = nv40_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv20_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); + priv->base.tile.fini = nv30_fb_tile_fini; + if (device->chipset == 0x40) + priv->base.tile.prog = nv10_fb_tile_prog; + else + priv->base.tile.prog = nv40_fb_tile_prog; + + return nouveau_fb_created(&priv->base); } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c deleted file mode 100644 index e9e5a08c41a1..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv41_fb_priv { - struct nouveau_fb base; -}; - -int -nv41_fb_vram_init(struct nouveau_fb *pfb) -{ - u32 pfb474 = nv_rd32(pfb, 0x100474); - if (pfb474 & 0x00000004) - pfb->ram.type = NV_MEM_TYPE_GDDR3; - if (pfb474 & 0x00000002) - pfb->ram.type = NV_MEM_TYPE_DDR2; - if (pfb474 & 0x00000001) - pfb->ram.type = NV_MEM_TYPE_DDR1; - - pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; - pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; - return nv_rd32(pfb, 0x100320); -} - -void -nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) -{ - nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit); - nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch); - nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr); - nv_rd32(pfb, 0x100600 + (i * 0x10)); - nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp); -} - -int -nv41_fb_init(struct nouveau_object *object) -{ - struct nv41_fb_priv *priv = (void *)object; - int ret; - - ret = nouveau_fb_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x100800, 0x00000001); - return 0; -} - -static int -nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv41_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv41_fb_vram_init; - priv->base.tile.regions = 12; - priv->base.tile.init = nv30_fb_tile_init; - priv->base.tile.comp = nv40_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv41_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - - -struct nouveau_oclass -nv41_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x41), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv41_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv41_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c deleted file mode 100644 index ae89b5006f7a..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv44_fb_priv { - struct nouveau_fb base; -}; - -int -nv44_fb_vram_init(struct nouveau_fb *pfb) -{ - u32 pfb474 = nv_rd32(pfb, 0x100474); - if (pfb474 & 0x00000004) - pfb->ram.type = NV_MEM_TYPE_GDDR3; - if (pfb474 & 0x00000002) - pfb->ram.type = NV_MEM_TYPE_DDR2; - if (pfb474 & 0x00000001) - pfb->ram.type = NV_MEM_TYPE_DDR1; - - pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; - return 0; -} - -static void -nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, - u32 flags, struct nouveau_fb_tile *tile) -{ - tile->addr = 0x00000001; /* mode = vram */ - tile->addr |= addr; - tile->limit = max(1u, addr + size) - 1; - tile->pitch = pitch; -} - -void -nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) -{ - nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit); - nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch); - nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr); - nv_rd32(pfb, 0x100600 + (i * 0x10)); -} - -int -nv44_fb_init(struct nouveau_object *object) -{ - struct nv44_fb_priv *priv = (void *)object; - int ret; - - ret = nouveau_fb_init(&priv->base); - if (ret) - return ret; - - nv_wr32(priv, 0x100850, 0x80000000); - nv_wr32(priv, 0x100800, 0x00000001); - return 0; -} - -static int -nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv44_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv44_fb_vram_init; - priv->base.tile.regions = 12; - priv->base.tile.init = nv44_fb_tile_init; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv44_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - - -struct nouveau_oclass -nv44_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x44), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv44_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv44_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c deleted file mode 100644 index 589b93ea2994..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv46_fb_priv { - struct nouveau_fb base; -}; - -void -nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, - u32 flags, struct nouveau_fb_tile *tile) -{ - /* for performance, select alternate bank offset for zeta */ - if (!(flags & 4)) tile->addr = (0 << 3); - else tile->addr = (1 << 3); - - tile->addr |= 0x00000001; /* mode = vram */ - tile->addr |= addr; - tile->limit = max(1u, addr + size) - 1; - tile->pitch = pitch; -} - -static int -nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv46_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv44_fb_vram_init; - priv->base.tile.regions = 15; - priv->base.tile.init = nv46_fb_tile_init; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv44_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - - -struct nouveau_oclass -nv46_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x46), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv46_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv44_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c deleted file mode 100644 index 818bba35b368..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv47_fb_priv { - struct nouveau_fb base; -}; - -static int -nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv47_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv41_fb_vram_init; - priv->base.tile.regions = 15; - priv->base.tile.init = nv30_fb_tile_init; - priv->base.tile.comp = nv40_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv41_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - - -struct nouveau_oclass -nv47_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x47), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv47_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv41_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c deleted file mode 100644 index 84a31af16ab4..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv49_fb_priv { - struct nouveau_fb base; -}; - -static int -nv49_fb_vram_init(struct nouveau_fb *pfb) -{ - u32 pfb914 = nv_rd32(pfb, 0x100914); - - switch (pfb914 & 0x00000003) { - case 0x00000000: pfb->ram.type = NV_MEM_TYPE_DDR1; break; - case 0x00000001: pfb->ram.type = NV_MEM_TYPE_DDR2; break; - case 0x00000002: pfb->ram.type = NV_MEM_TYPE_GDDR3; break; - case 0x00000003: break; - } - - pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; - pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; - return nv_rd32(pfb, 0x100320); -} - -static int -nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv49_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv49_fb_vram_init; - priv->base.tile.regions = 15; - priv->base.tile.init = nv30_fb_tile_init; - priv->base.tile.comp = nv40_fb_tile_comp; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv41_fb_tile_prog; - - return nouveau_fb_preinit(&priv->base); -} - - -struct nouveau_oclass -nv49_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x49), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv49_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv41_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c deleted file mode 100644 index 797fd558170b..000000000000 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2010 Francisco Jerez. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include - -struct nv4e_fb_priv { - struct nouveau_fb base; -}; - -static int -nv4e_fb_vram_init(struct nouveau_fb *pfb) -{ - pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; - pfb->ram.type = NV_MEM_TYPE_STOLEN; - return 0; -} - -static int -nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv4e_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.memtype_valid = nv04_fb_memtype_valid; - priv->base.ram.init = nv4e_fb_vram_init; - priv->base.tile.regions = 12; - priv->base.tile.init = nv46_fb_tile_init; - priv->base.tile.fini = nv20_fb_tile_fini; - priv->base.tile.prog = nv44_fb_tile_prog; - return nouveau_fb_preinit(&priv->base); -} - -struct nouveau_oclass -nv4e_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x4e), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv4e_fb_ctor, - .dtor = _nouveau_fb_dtor, - .init = nv44_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c index 487cb8c6c204..5f570806143a 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c @@ -51,101 +51,6 @@ nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype) return types[(memtype & 0xff00) >> 8] != 0; } -static u32 -nv50_fb_vram_rblock(struct nouveau_fb *pfb) -{ - int i, parts, colbits, rowbitsa, rowbitsb, banks; - u64 rowsize, predicted; - u32 r0, r4, rt, ru, rblock_size; - - r0 = nv_rd32(pfb, 0x100200); - r4 = nv_rd32(pfb, 0x100204); - rt = nv_rd32(pfb, 0x100250); - ru = nv_rd32(pfb, 0x001540); - nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); - - for (i = 0, parts = 0; i < 8; i++) { - if (ru & (0x00010000 << i)) - parts++; - } - - colbits = (r4 & 0x0000f000) >> 12; - rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; - rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; - banks = 1 << (((r4 & 0x03000000) >> 24) + 2); - - rowsize = parts * banks * (1 << colbits) * 8; - predicted = rowsize << rowbitsa; - if (r0 & 0x00000004) - predicted += rowsize << rowbitsb; - - if (predicted != pfb->ram.size) { - nv_warn(pfb, "memory controller reports %d MiB VRAM\n", - (u32)(pfb->ram.size >> 20)); - } - - rblock_size = rowsize; - if (rt & 1) - rblock_size *= 3; - - nv_debug(pfb, "rblock %d bytes\n", rblock_size); - return rblock_size; -} - -static int -nv50_fb_vram_init(struct nouveau_fb *pfb) -{ - struct nouveau_device *device = nv_device(pfb); - struct nouveau_bios *bios = nouveau_bios(device); - const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ - const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ - u32 size; - int ret; - - pfb->ram.size = nv_rd32(pfb, 0x10020c); - pfb->ram.size = (pfb->ram.size & 0xffffff00) | - ((pfb->ram.size & 0x000000ff) << 32); - - size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail; - switch (device->chipset) { - case 0xaa: - case 0xac: - case 0xaf: /* IGPs, no reordering, no real VRAM */ - ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1); - if (ret) - return ret; - - pfb->ram.type = NV_MEM_TYPE_STOLEN; - pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; - break; - default: - switch (nv_rd32(pfb, 0x100714) & 0x00000007) { - case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break; - case 1: - if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3) - pfb->ram.type = NV_MEM_TYPE_DDR3; - else - pfb->ram.type = NV_MEM_TYPE_DDR2; - break; - case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break; - case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break; - case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break; - default: - break; - } - - ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, - nv50_fb_vram_rblock(pfb) >> 12); - if (ret) - return ret; - - pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1; - break; - } - - return nv_rd32(pfb, 0x100320); -} - static int nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, u32 memtype, struct nouveau_mem **pmem) @@ -235,6 +140,195 @@ nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem) kfree(mem); } +static u32 +nv50_vram_rblock(struct nv50_fb_priv *priv) +{ + int i, parts, colbits, rowbitsa, rowbitsb, banks; + u64 rowsize, predicted; + u32 r0, r4, rt, ru, rblock_size; + + r0 = nv_rd32(priv, 0x100200); + r4 = nv_rd32(priv, 0x100204); + rt = nv_rd32(priv, 0x100250); + ru = nv_rd32(priv, 0x001540); + nv_debug(priv, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); + + for (i = 0, parts = 0; i < 8; i++) { + if (ru & (0x00010000 << i)) + parts++; + } + + colbits = (r4 & 0x0000f000) >> 12; + rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; + rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; + banks = 1 << (((r4 & 0x03000000) >> 24) + 2); + + rowsize = parts * banks * (1 << colbits) * 8; + predicted = rowsize << rowbitsa; + if (r0 & 0x00000004) + predicted += rowsize << rowbitsb; + + if (predicted != priv->base.ram.size) { + nv_warn(priv, "memory controller reports %d MiB VRAM\n", + (u32)(priv->base.ram.size >> 20)); + } + + rblock_size = rowsize; + if (rt & 1) + rblock_size *= 3; + + nv_debug(priv, "rblock %d bytes\n", rblock_size); + return rblock_size; +} + +static int +nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nouveau_bios *bios = nouveau_bios(device); + const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ + const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ + struct nv50_fb_priv *priv; + u32 tags; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + switch (nv_rd32(priv, 0x100714) & 0x00000007) { + case 0: priv->base.ram.type = NV_MEM_TYPE_DDR1; break; + case 1: + if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3) + priv->base.ram.type = NV_MEM_TYPE_DDR3; + else + priv->base.ram.type = NV_MEM_TYPE_DDR2; + break; + case 2: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break; + case 3: priv->base.ram.type = NV_MEM_TYPE_GDDR4; break; + case 4: priv->base.ram.type = NV_MEM_TYPE_GDDR5; break; + default: + break; + } + + priv->base.ram.size = nv_rd32(priv, 0x10020c); + priv->base.ram.size = (priv->base.ram.size & 0xffffff00) | + ((priv->base.ram.size & 0x000000ff) << 32); + + tags = nv_rd32(priv, 0x100320); + ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1); + if (ret) + return ret; + + nv_debug(priv, "%d compression tags\n", tags); + + size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail; + switch (device->chipset) { + case 0xaa: + case 0xac: + case 0xaf: /* IGPs, no reordering, no real VRAM */ + ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 1); + if (ret) + return ret; + + priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12; + priv->base.ram.type = NV_MEM_TYPE_STOLEN; + break; + default: + ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, + nv50_vram_rblock(priv) >> 12); + if (ret) + return ret; + + priv->base.ram.ranks = (nv_rd32(priv, 0x100200) & 0x4) ? 2 : 1; + break; + } + + priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (priv->r100c08_page) { + priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page, + 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(device->pdev, priv->r100c08)) + nv_warn(priv, "failed 0x100c08 page map\n"); + } else { + nv_warn(priv, "failed 0x100c08 page alloc\n"); + } + + priv->base.memtype_valid = nv50_fb_memtype_valid; + priv->base.ram.get = nv50_fb_vram_new; + priv->base.ram.put = nv50_fb_vram_del; + return nouveau_fb_created(&priv->base); +} + +static void +nv50_fb_dtor(struct nouveau_object *object) +{ + struct nouveau_device *device = nv_device(object); + struct nv50_fb_priv *priv = (void *)object; + + if (priv->r100c08_page) { + pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + __free_page(priv->r100c08_page); + } + + nouveau_fb_destroy(&priv->base); +} + +static int +nv50_fb_init(struct nouveau_object *object) +{ + struct nouveau_device *device = nv_device(object); + struct nv50_fb_priv *priv = (void *)object; + int ret; + + ret = nouveau_fb_init(&priv->base); + if (ret) + return ret; + + /* Not a clue what this is exactly. Without pointing it at a + * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) + * cause IOMMU "read from address 0" errors (rh#561267) + */ + nv_wr32(priv, 0x100c08, priv->r100c08 >> 8); + + /* This is needed to get meaningful information from 100c90 + * on traps. No idea what these values mean exactly. */ + switch (device->chipset) { + case 0x50: + nv_wr32(priv, 0x100c90, 0x000707ff); + break; + case 0xa3: + case 0xa5: + case 0xa8: + nv_wr32(priv, 0x100c90, 0x000d0fff); + break; + case 0xaf: + nv_wr32(priv, 0x100c90, 0x089d1fff); + break; + default: + nv_wr32(priv, 0x100c90, 0x001d07ff); + break; + } + + return 0; +} + +struct nouveau_oclass +nv50_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_fb_ctor, + .dtor = nv50_fb_dtor, + .init = nv50_fb_init, + .fini = _nouveau_fb_fini, + }, +}; + static const struct nouveau_enum vm_dispatch_subclients[] = { { 0x00000000, "GRCTX", NULL }, { 0x00000001, "NOTIFY", NULL }, @@ -330,11 +424,11 @@ static const struct nouveau_enum vm_fault[] = { {} }; -static void -nv50_fb_intr(struct nouveau_subdev *subdev) +void +nv50_fb_trap(struct nouveau_fb *pfb, int display) { - struct nouveau_device *device = nv_device(subdev); - struct nv50_fb_priv *priv = (void *)subdev; + struct nouveau_device *device = nv_device(pfb); + struct nv50_fb_priv *priv = (void *)pfb; const struct nouveau_enum *en, *cl; u32 trap[6], idx, chan; u8 st0, st1, st2, st3; @@ -351,6 +445,9 @@ nv50_fb_intr(struct nouveau_subdev *subdev) } nv_wr32(priv, 0x100c90, idx | 0x80000000); + if (!display) + return; + /* decode status bits into something more useful */ if (device->chipset < 0xa3 || device->chipset == 0xaa || device->chipset == 0xac) { @@ -397,101 +494,3 @@ nv50_fb_intr(struct nouveau_subdev *subdev) else printk("0x%08x\n", st1); } - -static int -nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nouveau_device *device = nv_device(parent); - struct nv50_fb_priv *priv; - int ret; - - ret = nouveau_fb_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (priv->r100c08_page) { - priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page, - 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(device->pdev, priv->r100c08)) - nv_warn(priv, "failed 0x100c08 page map\n"); - } else { - nv_warn(priv, "failed 0x100c08 page alloc\n"); - } - - priv->base.memtype_valid = nv50_fb_memtype_valid; - priv->base.ram.init = nv50_fb_vram_init; - priv->base.ram.get = nv50_fb_vram_new; - priv->base.ram.put = nv50_fb_vram_del; - nv_subdev(priv)->intr = nv50_fb_intr; - return nouveau_fb_preinit(&priv->base); -} - -static void -nv50_fb_dtor(struct nouveau_object *object) -{ - struct nouveau_device *device = nv_device(object); - struct nv50_fb_priv *priv = (void *)object; - - if (priv->r100c08_page) { - pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - __free_page(priv->r100c08_page); - } - - nouveau_fb_destroy(&priv->base); -} - -static int -nv50_fb_init(struct nouveau_object *object) -{ - struct nouveau_device *device = nv_device(object); - struct nv50_fb_priv *priv = (void *)object; - int ret; - - ret = nouveau_fb_init(&priv->base); - if (ret) - return ret; - - /* Not a clue what this is exactly. Without pointing it at a - * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) - * cause IOMMU "read from address 0" errors (rh#561267) - */ - nv_wr32(priv, 0x100c08, priv->r100c08 >> 8); - - /* This is needed to get meaningful information from 100c90 - * on traps. No idea what these values mean exactly. */ - switch (device->chipset) { - case 0x50: - nv_wr32(priv, 0x100c90, 0x000707ff); - break; - case 0xa3: - case 0xa5: - case 0xa8: - nv_wr32(priv, 0x100c90, 0x000d0fff); - break; - case 0xaf: - nv_wr32(priv, 0x100c90, 0x089d1fff); - break; - default: - nv_wr32(priv, 0x100c90, 0x001d07ff); - break; - } - - return 0; -} - -struct nouveau_oclass -nv50_fb_oclass = { - .handle = NV_SUBDEV(FB, 0x50), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv50_fb_ctor, - .dtor = nv50_fb_dtor, - .init = nv50_fb_init, - .fini = _nouveau_fb_fini, - }, -}; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c index 306bdf121452..9f59f2bf0079 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c @@ -61,65 +61,6 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags) return likely((types[memtype] == 1)); } -static int -nvc0_fb_vram_init(struct nouveau_fb *pfb) -{ - struct nouveau_bios *bios = nouveau_bios(pfb); - const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ - const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ - u32 parts = nv_rd32(pfb, 0x022438); - u32 pmask = nv_rd32(pfb, 0x022554); - u32 bsize = nv_rd32(pfb, 0x10f20c); - u32 offset, length; - bool uniform = true; - int ret, part; - - nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800)); - nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask); - - pfb->ram.type = nouveau_fb_bios_memtype(bios); - pfb->ram.ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1; - - /* read amount of vram attached to each memory controller */ - for (part = 0; part < parts; part++) { - if (!(pmask & (1 << part))) { - u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000)); - if (psize != bsize) { - if (psize < bsize) - bsize = psize; - uniform = false; - } - - nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize); - pfb->ram.size += (u64)psize << 20; - } - } - - /* if all controllers have the same amount attached, there's no holes */ - if (uniform) { - offset = rsvd_head; - length = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail; - return nouveau_mm_init(&pfb->vram, offset, length, 1); - } - - /* otherwise, address lowest common amount from 0GiB */ - ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1); - if (ret) - return ret; - - /* and the rest starting from (8GiB + common_size) */ - offset = (0x0200000000ULL >> 12) + (bsize << 8); - length = (pfb->ram.size >> 12) - (bsize << 8) - rsvd_tail; - - ret = nouveau_mm_init(&pfb->vram, offset, length, 0); - if (ret) { - nouveau_mm_fini(&pfb->vram); - return ret; - } - - return 0; -} - static int nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, u32 memtype, struct nouveau_mem **pmem) @@ -197,6 +138,66 @@ nvc0_fb_dtor(struct nouveau_object *object) nouveau_fb_destroy(&priv->base); } +static int +nvc0_vram_detect(struct nvc0_fb_priv *priv) +{ + struct nouveau_bios *bios = nouveau_bios(priv); + struct nouveau_fb *pfb = &priv->base; + const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ + const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ + u32 parts = nv_rd32(priv, 0x022438); + u32 pmask = nv_rd32(priv, 0x022554); + u32 bsize = nv_rd32(priv, 0x10f20c); + u32 offset, length; + bool uniform = true; + int ret, part; + + nv_debug(priv, "0x100800: 0x%08x\n", nv_rd32(priv, 0x100800)); + nv_debug(priv, "parts 0x%08x mask 0x%08x\n", parts, pmask); + + priv->base.ram.type = nouveau_fb_bios_memtype(bios); + priv->base.ram.ranks = (nv_rd32(priv, 0x10f200) & 0x00000004) ? 2 : 1; + + /* read amount of vram attached to each memory controller */ + for (part = 0; part < parts; part++) { + if (!(pmask & (1 << part))) { + u32 psize = nv_rd32(priv, 0x11020c + (part * 0x1000)); + if (psize != bsize) { + if (psize < bsize) + bsize = psize; + uniform = false; + } + + nv_debug(priv, "%d: mem_amount 0x%08x\n", part, psize); + priv->base.ram.size += (u64)psize << 20; + } + } + + /* if all controllers have the same amount attached, there's no holes */ + if (uniform) { + offset = rsvd_head; + length = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail; + return nouveau_mm_init(&pfb->vram, offset, length, 1); + } + + /* otherwise, address lowest common amount from 0GiB */ + ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1); + if (ret) + return ret; + + /* and the rest starting from (8GiB + common_size) */ + offset = (0x0200000000ULL >> 12) + (bsize << 8); + length = (priv->base.ram.size >> 12) - (bsize << 8) - rsvd_tail; + + ret = nouveau_mm_init(&pfb->vram, offset, length, 0); + if (ret) { + nouveau_mm_fini(&pfb->vram); + return ret; + } + + return 0; +} + static int nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -212,10 +213,13 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return ret; priv->base.memtype_valid = nvc0_fb_memtype_valid; - priv->base.ram.init = nvc0_fb_vram_init; priv->base.ram.get = nvc0_fb_vram_new; priv->base.ram.put = nv50_fb_vram_del; + ret = nvc0_vram_detect(priv); + if (ret) + return ret; + priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!priv->r100c10_page) return -ENOMEM; @@ -225,7 +229,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (pci_dma_mapping_error(device->pdev, priv->r100c10)) return -EFAULT; - return nouveau_fb_preinit(&priv->base); + return nouveau_fb_created(&priv->base); } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c index dc27e794a851..fe1ebf199ba9 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c @@ -50,7 +50,7 @@ auxch_init(struct nouveau_i2c *aux, int ch) ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50)); udelay(1); if (!timeout--) { - AUX_ERR("begin idle timeout 0x%08x\n", ctrl); + AUX_ERR("begin idle timeout 0x%08x", ctrl); return -EBUSY; } } while (ctrl & 0x03010000); diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c index f5bbd3834116..ba4d28b50368 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c @@ -63,14 +63,14 @@ nv04_instobj_dtor(struct nouveau_object *object) } static u32 -nv04_instobj_rd32(struct nouveau_object *object, u64 addr) +nv04_instobj_rd32(struct nouveau_object *object, u32 addr) { struct nv04_instobj_priv *node = (void *)object; return nv_ro32(object->engine, node->mem->offset + addr); } static void -nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data) +nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data) { struct nv04_instobj_priv *node = (void *)object; nv_wo32(object->engine, node->mem->offset + addr, data); @@ -173,13 +173,13 @@ nv04_instmem_dtor(struct nouveau_object *object) } static u32 -nv04_instmem_rd32(struct nouveau_object *object, u64 addr) +nv04_instmem_rd32(struct nouveau_object *object, u32 addr) { return nv_rd32(object, 0x700000 + addr); } static void -nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data) +nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data) { return nv_wr32(object, 0x700000 + addr, data); } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c index da64253201ef..73c52ebd5932 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c @@ -111,14 +111,14 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, } static u32 -nv40_instmem_rd32(struct nouveau_object *object, u64 addr) +nv40_instmem_rd32(struct nouveau_object *object, u32 addr) { struct nv04_instmem_priv *priv = (void *)object; return ioread32_native(priv->iomem + addr); } static void -nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data) +nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data) { struct nv04_instmem_priv *priv = (void *)object; iowrite32_native(data, priv->iomem + addr); diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c index cfc7e31461de..27ef0891d10b 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c @@ -76,7 +76,7 @@ nv50_instobj_dtor(struct nouveau_object *object) } static u32 -nv50_instobj_rd32(struct nouveau_object *object, u64 offset) +nv50_instobj_rd32(struct nouveau_object *object, u32 offset) { struct nv50_instmem_priv *priv = (void *)object->engine; struct nv50_instobj_priv *node = (void *)object; @@ -96,7 +96,7 @@ nv50_instobj_rd32(struct nouveau_object *object, u64 offset) } static void -nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data) +nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data) { struct nv50_instmem_priv *priv = (void *)object->engine; struct nv50_instobj_priv *node = (void *)object; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/base.c index 8379aafa6e1b..de5721cfc4c2 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/base.c @@ -30,20 +30,20 @@ nouveau_mc_intr(struct nouveau_subdev *subdev) struct nouveau_mc *pmc = nouveau_mc(subdev); const struct nouveau_mc_intr *map = pmc->intr_map; struct nouveau_subdev *unit; - u32 stat, intr; + u32 stat; - intr = stat = nv_rd32(pmc, 0x000100); + stat = nv_rd32(pmc, 0x000100); while (stat && map->stat) { if (stat & map->stat) { unit = nouveau_subdev(subdev, map->unit); if (unit && unit->intr) unit->intr(unit); - intr &= ~map->stat; + stat &= ~map->stat; } map++; } - if (intr) { + if (stat) { nv_error(pmc, "unknown intr 0x%08x\n", stat); } } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c index 8d759f830323..cedf33b02977 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c @@ -39,7 +39,6 @@ nv50_mc_intr[] = { { 0x00200000, NVDEV_SUBDEV_GPIO }, { 0x04000000, NVDEV_ENGINE_DISP }, { 0x80000000, NVDEV_ENGINE_SW }, - { 0x0000d101, NVDEV_SUBDEV_FB }, {}, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c index ceb5c83f9459..a001e4c4d38d 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c @@ -40,7 +40,6 @@ nv98_mc_intr[] = { { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */ { 0x04000000, NVDEV_ENGINE_DISP }, { 0x80000000, NVDEV_ENGINE_SW }, - { 0x0040d101, NVDEV_SUBDEV_FB }, {}, }; diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c index 92796682722d..c2b81e30a17d 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c @@ -36,7 +36,6 @@ nvc0_mc_intr[] = { { 0x00000100, NVDEV_ENGINE_FIFO }, { 0x00001000, NVDEV_ENGINE_GR }, { 0x00008000, NVDEV_ENGINE_BSP }, - { 0x00020000, NVDEV_ENGINE_VP }, { 0x00100000, NVDEV_SUBDEV_TIMER }, { 0x00200000, NVDEV_SUBDEV_GPIO }, { 0x02000000, NVDEV_SUBDEV_LTCG }, diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c b/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c index 41241922263f..cbf1fc60a386 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -246,26 +246,14 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) return nouveau_abi16_put(abi16, -ENODEV); client = nv_client(abi16->client); - device = nv_device(abi16->device); - imem = nouveau_instmem(device); - pfb = nouveau_fb(device); - - /* hack to allow channel engine type specification on kepler */ - if (device->card_type >= NV_E0) { - if (init->fb_ctxdma_handle != ~0) - init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR; - else - init->fb_ctxdma_handle = init->tt_ctxdma_handle; - - /* allow flips to be executed if this is a graphics channel */ - init->tt_ctxdma_handle = 0; - if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR) - init->tt_ctxdma_handle = 1; - } if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) return nouveau_abi16_put(abi16, -EINVAL); + device = nv_device(abi16->device); + imem = nouveau_instmem(device); + pfb = nouveau_fb(device); + /* allocate "abi16 channel" data and make up a handle for it */ init->channel = ffsll(~abi16->handles); if (!init->channel--) @@ -280,6 +268,11 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) abi16->handles |= (1 << init->channel); /* create channel object and initialise dma and fence management */ + if (device->card_type >= NV_E0) { + init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR; + init->tt_ctxdma_handle = 0; + } + ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN | init->channel, init->fb_ctxdma_handle, init->tt_ctxdma_handle, &chan->chan); @@ -389,7 +382,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) struct nouveau_abi16_chan *chan, *temp; struct nouveau_abi16_ntfy *ntfy; struct nouveau_object *object; - struct nv_dma_class args = {}; + struct nv_dma_class args; int ret; if (unlikely(!abi16)) diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c b/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c index d97f20069d3e..48783e14114c 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -35,14 +35,6 @@ static struct nouveau_dsm_priv { acpi_handle rom_handle; } nouveau_dsm_priv; -bool nouveau_is_optimus(void) { - return nouveau_dsm_priv.optimus_detected; -} - -bool nouveau_is_v1_dsm(void) { - return nouveau_dsm_priv.dsm_detected; -} - #define NOUVEAU_DSM_HAS_MUX 0x1 #define NOUVEAU_DSM_HAS_OPT 0x2 @@ -191,7 +183,9 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) { - if (!nouveau_dsm_priv.dsm_detected) + /* perhaps the _DSM functions are mutually exclusive, but prepare for + * the future */ + if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected) return 0; if (id == VGA_SWITCHEROO_IGD) return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); @@ -207,7 +201,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, /* Optimus laptops have the card already disabled in * nouveau_switcheroo_set_state */ - if (!nouveau_dsm_priv.dsm_detected) + if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected) return 0; return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); @@ -289,24 +283,24 @@ static bool nouveau_dsm_detect(void) has_optimus = 1; } - /* find the optimus DSM or the old v1 DSM */ - if (has_optimus == 1) { + if (vga_count == 2 && has_dsm && guid_valid) { acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); - printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", + printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", acpi_method_name); - nouveau_dsm_priv.optimus_detected = true; + nouveau_dsm_priv.dsm_detected = true; ret = true; - } else if (vga_count == 2 && has_dsm && guid_valid) { + } + + if (has_optimus == 1) { acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); - printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", + printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", acpi_method_name); - nouveau_dsm_priv.dsm_detected = true; + nouveau_dsm_priv.optimus_detected = true; ret = true; } - return ret; } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.h b/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.h index d0da230d7706..08af67722b57 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.h @@ -4,8 +4,6 @@ #define ROM_BIOS_PAGE 4096 #if defined(CONFIG_ACPI) -bool nouveau_is_optimus(void); -bool nouveau_is_v1_dsm(void); void nouveau_register_dsm_handler(void); void nouveau_unregister_dsm_handler(void); void nouveau_switcheroo_optimus_dsm(void); @@ -13,8 +11,6 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); bool nouveau_acpi_rom_supported(struct pci_dev *pdev); void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *); #else -static inline bool nouveau_is_optimus(void) { return false; }; -static inline bool nouveau_is_v1_dsm(void) { return false; }; static inline void nouveau_register_dsm_handler(void) {} static inline void nouveau_unregister_dsm_handler(void) {} static inline void nouveau_switcheroo_optimus_dsm(void) {} diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c index 865eddfa30a7..09fdef235882 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -624,6 +624,206 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b return 0; } +/* BIT 'U'/'d' table encoder subtables have hashes matching them to + * a particular set of encoders. + * + * This function returns true if a particular DCB entry matches. + */ +bool +bios_encoder_match(struct dcb_output *dcb, u32 hash) +{ + if ((hash & 0x000000f0) != (dcb->location << 4)) + return false; + if ((hash & 0x0000000f) != dcb->type) + return false; + if (!(hash & (dcb->or << 16))) + return false; + + switch (dcb->type) { + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_LVDS: + case DCB_OUTPUT_DP: + if (hash & 0x00c00000) { + if (!(hash & (dcb->sorconf.link << 22))) + return false; + } + default: + return true; + } +} + +int +nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk, + struct dcb_output *dcbent, int crtc) +{ + /* + * The display script table is located by the BIT 'U' table. + * + * It contains an array of pointers to various tables describing + * a particular output type. The first 32-bits of the output + * tables contains similar information to a DCB entry, and is + * used to decide whether that particular table is suitable for + * the output you want to access. + * + * The "record header length" field here seems to indicate the + * offset of the first configuration entry in the output tables. + * This is 10 on most cards I've seen, but 12 has been witnessed + * on DP cards, and there's another script pointer within the + * header. + * + * offset + 0 ( 8 bits): version + * offset + 1 ( 8 bits): header length + * offset + 2 ( 8 bits): record length + * offset + 3 ( 8 bits): number of records + * offset + 4 ( 8 bits): record header length + * offset + 5 (16 bits): pointer to first output script table + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvbios *bios = &drm->vbios; + uint8_t *table = &bios->data[bios->display.script_table_ptr]; + uint8_t *otable = NULL; + uint16_t script; + int i; + + if (!bios->display.script_table_ptr) { + NV_ERROR(drm, "No pointer to output script table\n"); + return 1; + } + + /* + * Nothing useful has been in any of the pre-2.0 tables I've seen, + * so until they are, we really don't need to care. + */ + if (table[0] < 0x20) + return 1; + + if (table[0] != 0x20 && table[0] != 0x21) { + NV_ERROR(drm, "Output script table version 0x%02x unknown\n", + table[0]); + return 1; + } + + /* + * The output script tables describing a particular output type + * look as follows: + * + * offset + 0 (32 bits): output this table matches (hash of DCB) + * offset + 4 ( 8 bits): unknown + * offset + 5 ( 8 bits): number of configurations + * offset + 6 (16 bits): pointer to some script + * offset + 8 (16 bits): pointer to some script + * + * headerlen == 10 + * offset + 10 : configuration 0 + * + * headerlen == 12 + * offset + 10 : pointer to some script + * offset + 12 : configuration 0 + * + * Each config entry is as follows: + * + * offset + 0 (16 bits): unknown, assumed to be a match value + * offset + 2 (16 bits): pointer to script table (clock set?) + * offset + 4 (16 bits): pointer to script table (reset?) + * + * There doesn't appear to be a count value to say how many + * entries exist in each script table, instead, a 0 value in + * the first 16-bit word seems to indicate both the end of the + * list and the default entry. The second 16-bit word in the + * script tables is a pointer to the script to execute. + */ + + NV_DEBUG(drm, "Searching for output entry for %d %d %d\n", + dcbent->type, dcbent->location, dcbent->or); + for (i = 0; i < table[3]; i++) { + otable = ROMPTR(dev, table[table[1] + (i * table[2])]); + if (otable && bios_encoder_match(dcbent, ROM32(otable[0]))) + break; + } + + if (!otable) { + NV_DEBUG(drm, "failed to match any output table\n"); + return 1; + } + + if (pclk < -2 || pclk > 0) { + /* Try to find matching script table entry */ + for (i = 0; i < otable[5]; i++) { + if (ROM16(otable[table[4] + i*6]) == type) + break; + } + + if (i == otable[5]) { + NV_ERROR(drm, "Table 0x%04x not found for %d/%d, " + "using first\n", + type, dcbent->type, dcbent->or); + i = 0; + } + } + + if (pclk == 0) { + script = ROM16(otable[6]); + if (!script) { + NV_DEBUG(drm, "output script 0 not found\n"); + return 1; + } + + NV_DEBUG(drm, "0x%04X: parsing output script 0\n", script); + nouveau_bios_run_init_table(dev, script, dcbent, crtc); + } else + if (pclk == -1) { + script = ROM16(otable[8]); + if (!script) { + NV_DEBUG(drm, "output script 1 not found\n"); + return 1; + } + + NV_DEBUG(drm, "0x%04X: parsing output script 1\n", script); + nouveau_bios_run_init_table(dev, script, dcbent, crtc); + } else + if (pclk == -2) { + if (table[4] >= 12) + script = ROM16(otable[10]); + else + script = 0; + if (!script) { + NV_DEBUG(drm, "output script 2 not found\n"); + return 1; + } + + NV_DEBUG(drm, "0x%04X: parsing output script 2\n", script); + nouveau_bios_run_init_table(dev, script, dcbent, crtc); + } else + if (pclk > 0) { + script = ROM16(otable[table[4] + i*6 + 2]); + if (script) + script = clkcmptable(bios, script, pclk); + if (!script) { + NV_DEBUG(drm, "clock script 0 not found\n"); + return 1; + } + + NV_DEBUG(drm, "0x%04X: parsing clock script 0\n", script); + nouveau_bios_run_init_table(dev, script, dcbent, crtc); + } else + if (pclk < 0) { + script = ROM16(otable[table[4] + i*6 + 4]); + if (script) + script = clkcmptable(bios, script, -pclk); + if (!script) { + NV_DEBUG(drm, "clock script 1 not found\n"); + return 1; + } + + NV_DEBUG(drm, "0x%04X: parsing clock script 1\n", script); + nouveau_bios_run_init_table(dev, script, dcbent, crtc); + } + + return 0; +} + + int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk) { /* @@ -1012,6 +1212,31 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, return 0; } +static int +parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios, + struct bit_entry *bitentry) +{ + /* + * Parses the pointer to the G80 output script tables + * + * Starting at bitentry->offset: + * + * offset + 0 (16 bits): output script table pointer + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + uint16_t outputscripttableptr; + + if (bitentry->length != 3) { + NV_ERROR(drm, "Do not understand BIT U table\n"); + return -EINVAL; + } + + outputscripttableptr = ROM16(bios->data[bitentry->offset]); + bios->display.script_table_ptr = outputscripttableptr; + return 0; +} + struct bit_table { const char id; int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); @@ -1088,6 +1313,7 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset) parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */ parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds)); parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds)); + parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U)); return 0; } @@ -2098,7 +2324,7 @@ nouveau_run_vbios_init(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; - int ret = 0; + int i, ret = 0; /* Reset the BIOS head to 0. */ bios->state.crtchead = 0; @@ -2111,6 +2337,13 @@ nouveau_run_vbios_init(struct drm_device *dev) bios->fp.lvds_init_run = false; } + if (nv_device(drm->device)->card_type >= NV_50) { + for (i = 0; bios->execute && i < bios->dcb.entries; i++) { + nouveau_bios_run_display_table(dev, 0, 0, + &bios->dcb.entry[i], -1); + } + } + return ret; } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.h b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.h index f68c54ca422f..3befbb821a56 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -127,6 +127,12 @@ struct nvbios { int crtchead; } state; + struct { + struct dcb_output *output; + int crtc; + uint16_t script_table_ptr; + } display; + struct { uint16_t fptablepointer; /* also used by tmds */ uint16_t fpxlatetableptr; @@ -179,6 +185,8 @@ void nouveau_bios_takedown(struct drm_device *dev); int nouveau_run_vbios_init(struct drm_device *); struct dcb_connector_table_entry * nouveau_bios_connector_entry(struct drm_device *, int index); +int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk, + struct dcb_output *, int crtc); bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *); uint8_t *nouveau_bios_embedded_edid(struct drm_device *); int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk, @@ -187,5 +195,6 @@ int run_tmds_table(struct drm_device *, struct dcb_output *, int head, int pxclk); int call_lvds_script(struct drm_device *, struct dcb_output *, int head, enum LVDS_script, int pxclk); +bool bios_encoder_match(struct dcb_output *, u32 hash); #endif diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c b/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c index 5614c89148cb..35ac57f0aab6 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -225,7 +225,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, type, &nvbo->placement, - align >> PAGE_SHIFT, false, NULL, acc_size, sg, + align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg, nouveau_bo_del_ttm); if (ret) { /* ttm will call nouveau_bo_del_ttm if it fails.. */ @@ -315,7 +315,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) nouveau_bo_placement_set(nvbo, memtype, 0); - ret = nouveau_bo_validate(nvbo, false, false); + ret = nouveau_bo_validate(nvbo, false, false, false); if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: @@ -351,7 +351,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); - ret = nouveau_bo_validate(nvbo, false, false); + ret = nouveau_bo_validate(nvbo, false, false, false); if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: @@ -392,12 +392,12 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo) int nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, - bool no_wait_gpu) + bool no_wait_reserve, bool no_wait_gpu) { int ret; - ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, - interruptible, no_wait_gpu); + ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible, + no_wait_reserve, no_wait_gpu); if (ret) return ret; @@ -556,7 +556,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) static int nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, struct nouveau_bo *nvbo, bool evict, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { struct nouveau_fence *fence = NULL; int ret; @@ -565,8 +566,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, if (ret) return ret; - ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict, - no_wait_gpu, new_mem); + ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, + no_wait_reserve, no_wait_gpu, new_mem); nouveau_fence_unref(&fence); return ret; } @@ -964,7 +965,8 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo, static int nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_channel *chan = chan = drm->channel; @@ -993,6 +995,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); if (ret == 0) { ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, + no_wait_reserve, no_wait_gpu, new_mem); } @@ -1061,7 +1064,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm) static int nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; struct ttm_placement placement; @@ -1074,7 +1078,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); if (ret) return ret; @@ -1082,11 +1086,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) goto out; - ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem); + ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); if (ret) goto out; - ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); + ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); out: ttm_bo_mem_put(bo, &tmp_mem); return ret; @@ -1094,7 +1098,8 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, static int nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; struct ttm_placement placement; @@ -1107,15 +1112,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); if (ret) return ret; - ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); + ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); if (ret) goto out; - ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem); + ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem); if (ret) goto out; @@ -1190,7 +1195,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, static int nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); @@ -1214,26 +1220,23 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, /* CPU copy if we have no accelerated method available */ if (!drm->ttm.move) { - ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); goto out; } /* Hardware assisted copy. */ if (new_mem->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flipd(bo, evict, intr, - no_wait_gpu, new_mem); + ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); else if (old_mem->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flips(bo, evict, intr, - no_wait_gpu, new_mem); + ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); else - ret = nouveau_bo_move_m2mf(bo, evict, intr, - no_wait_gpu, new_mem); + ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); if (!ret) goto out; /* Fallback to software copy. */ - ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); out: if (nv_device(drm->device)->card_type < NV_50) { @@ -1340,7 +1343,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) nvbo->placement.fpfn = 0; nvbo->placement.lpfn = mappable; nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); - return nouveau_bo_validate(nvbo, false, false); + return nouveau_bo_validate(nvbo, false, true, false); } static int @@ -1469,19 +1472,19 @@ nouveau_bo_fence_ref(void *sync_obj) } static bool -nouveau_bo_fence_signalled(void *sync_obj) +nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg) { return nouveau_fence_done(sync_obj); } static int -nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr) +nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) { return nouveau_fence_wait(sync_obj, lazy, intr); } static int -nouveau_bo_fence_flush(void *sync_obj) +nouveau_bo_fence_flush(void *sync_obj, void *sync_arg) { return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_bo.h b/trunk/drivers/gpu/drm/nouveau/nouveau_bo.h index 25ca37989d2c..dec51b1098fe 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -76,7 +76,7 @@ u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index); void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val); void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, - bool no_wait_gpu); + bool no_wait_reserve, bool no_wait_gpu); struct nouveau_vma * nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *); diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_chan.c b/trunk/drivers/gpu/drm/nouveau/nouveau_chan.c index 174300b6a02e..c1d7301c0e9c 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -76,8 +76,6 @@ nouveau_channel_del(struct nouveau_channel **pchan) nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle); nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); nouveau_bo_unmap(chan->push.buffer); - if (chan->push.buffer && chan->push.buffer->pin_refcnt) - nouveau_bo_unpin(chan->push.buffer); nouveau_bo_ref(NULL, &chan->push.buffer); kfree(chan); } @@ -269,7 +267,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) struct nouveau_fb *pfb = nouveau_fb(device); struct nouveau_software_chan *swch; struct nouveau_object *object; - struct nv_dma_class args = {}; + struct nv_dma_class args; int ret, i; /* allocate dma objects to cover all allowed vram, and gart */ @@ -348,7 +346,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) /* allocate software object class (used for fences on <= nv05, and * to signal flip completion), bind it to a subchannel. */ - if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) { + if (chan != chan->drm->cechan) { ret = nouveau_object_new(nv_object(client), chan->handle, NvSw, nouveau_abi16_swclass(chan->drm), NULL, 0, &object); diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c b/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c index ac340ba32017..d3595b23434a 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -110,6 +110,7 @@ nouveau_connector_destroy(struct drm_connector *connector) dev = nv_connector->base.dev; drm = nouveau_drm(dev); gpio = nouveau_gpio(drm->device); + NV_DEBUG(drm, "\n"); if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) { gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff, @@ -220,7 +221,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector, } if (nv_connector->type == DCB_CONNECTOR_DVI_I) { - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, dev->mode_config.dvi_i_subconnector_property, nv_encoder->dcb->type == DCB_OUTPUT_TMDS ? DRM_MODE_SUBCONNECTOR_DVID : @@ -928,6 +929,8 @@ nouveau_connector_create(struct drm_device *dev, int index) int type, ret = 0; bool dummy; + NV_DEBUG(drm, "\n"); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { nv_connector = nouveau_connector(connector); if (nv_connector->index == index) @@ -1040,7 +1043,7 @@ nouveau_connector_create(struct drm_device *dev, int index) /* Init DVI-I specific properties */ if (nv_connector->type == DCB_CONNECTOR_DVI_I) - drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0); + drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0); /* Add overscan compensation options to digital outputs */ if (disp->underscan_property && @@ -1048,31 +1051,31 @@ nouveau_connector_create(struct drm_device *dev, int index) type == DRM_MODE_CONNECTOR_DVII || type == DRM_MODE_CONNECTOR_HDMIA || type == DRM_MODE_CONNECTOR_DisplayPort)) { - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, disp->underscan_property, UNDERSCAN_OFF); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, disp->underscan_hborder_property, 0); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, disp->underscan_vborder_property, 0); } /* Add hue and saturation options */ if (disp->vibrant_hue_property) - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, disp->vibrant_hue_property, 90); if (disp->color_vibrance_property) - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, disp->color_vibrance_property, 150); switch (nv_connector->type) { case DCB_CONNECTOR_VGA: if (nv_device(drm->device)->card_type >= NV_50) { - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, nv_connector->scaling_mode); } @@ -1085,18 +1088,18 @@ nouveau_connector_create(struct drm_device *dev, int index) default: nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, nv_connector->scaling_mode); if (disp->dithering_mode) { nv_connector->dithering_mode = DITHERING_MODE_AUTO; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, disp->dithering_mode, nv_connector->dithering_mode); } if (disp->dithering_depth) { nv_connector->dithering_depth = DITHERING_DEPTH_AUTO; - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, disp->dithering_depth, nv_connector->dithering_depth); } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_connector.h b/trunk/drivers/gpu/drm/nouveau/nouveau_connector.h index 20eb84cce9e6..ebdb87670a8f 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -28,7 +28,6 @@ #define __NOUVEAU_CONNECTOR_H__ #include -#include "nouveau_crtc.h" struct nouveau_i2c_port; @@ -81,21 +80,6 @@ static inline struct nouveau_connector *nouveau_connector( return container_of(con, struct nouveau_connector, base); } -static inline struct nouveau_connector * -nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) -{ - struct drm_device *dev = nv_crtc->base.dev; - struct drm_connector *connector; - struct drm_crtc *crtc = to_drm_crtc(nv_crtc); - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder && connector->encoder->crtc == crtc) - return nouveau_connector(connector); - } - - return NULL; -} - struct drm_connector * nouveau_connector_create(struct drm_device *, int index); diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_crtc.h b/trunk/drivers/gpu/drm/nouveau/nouveau_crtc.h index d1e5890784d7..e6d0d1eb0133 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_crtc.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_crtc.h @@ -82,6 +82,16 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc) return &crtc->base; } +int nv50_crtc_create(struct drm_device *dev, int index); +int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv, + uint32_t buffer_handle, uint32_t width, + uint32_t height); +int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y); + int nv04_cursor_init(struct nouveau_crtc *); +int nv50_cursor_init(struct nouveau_crtc *); + +struct nouveau_connector * +nouveau_crtc_connector_get(struct nouveau_crtc *crtc); #endif /* __NOUVEAU_CRTC_H__ */ diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_display.c b/trunk/drivers/gpu/drm/nouveau/nouveau_display.c index e4188f24fc75..86124b131f4f 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_display.c @@ -98,12 +98,12 @@ nouveau_framebuffer_init(struct drm_device *dev, nv_fb->r_dma = NvEvoVRAM_LP; switch (fb->depth) { - case 8: nv_fb->r_format = 0x1e00; break; - case 15: nv_fb->r_format = 0xe900; break; - case 16: nv_fb->r_format = 0xe800; break; + case 8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break; + case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break; + case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break; case 24: - case 32: nv_fb->r_format = 0xcf00; break; - case 30: nv_fb->r_format = 0xd100; break; + case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break; + case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break; default: NV_ERROR(drm, "unknown depth %d\n", fb->depth); return -EINVAL; @@ -324,7 +324,7 @@ nouveau_display_create(struct drm_device *dev) disp->underscan_vborder_property = drm_property_create_range(dev, 0, "underscan vborder", 0, 128); - if (gen >= 1) { + if (gen == 1) { disp->vibrant_hue_property = drm_property_create(dev, DRM_MODE_PROP_RANGE, "vibrant hue", 2); @@ -366,7 +366,10 @@ nouveau_display_create(struct drm_device *dev) if (nv_device(drm->device)->card_type < NV_50) ret = nv04_display_create(dev); else + if (nv_device(drm->device)->card_type < NV_D0) ret = nv50_display_create(dev); + else + ret = nvd0_display_create(dev); if (ret) goto disp_create_err; @@ -397,12 +400,11 @@ nouveau_display_destroy(struct drm_device *dev) nouveau_backlight_exit(dev); drm_vblank_cleanup(dev); - drm_kms_helper_poll_fini(dev); - drm_mode_config_cleanup(dev); - if (disp->dtor) disp->dtor(dev); + drm_kms_helper_poll_fini(dev); + drm_mode_config_cleanup(dev); nouveau_drm(dev)->display = NULL; kfree(disp); } @@ -657,7 +659,10 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, /* Emit a page flip */ if (nv_device(drm->device)->card_type >= NV_50) { - ret = nv50_display_flip_next(crtc, fb, chan, 0); + if (nv_device(drm->device)->card_type >= NV_D0) + ret = nvd0_display_flip_next(crtc, fb, chan, 0); + else + ret = nv50_display_flip_next(crtc, fb, chan); if (ret) { mutex_unlock(&chan->cli->mutex); goto fail_unreserve; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c b/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c index 59838651ee8f..978a108ba7a1 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -30,17 +30,60 @@ #include "nouveau_encoder.h" #include "nouveau_crtc.h" -#include - #include #include +u8 * +nouveau_dp_bios_data(struct drm_device *dev, struct dcb_output *dcb, u8 **entry) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct bit_entry d; + u8 *table; + int i; + + if (bit_table(dev, 'd', &d)) { + NV_ERROR(drm, "BIT 'd' table not found\n"); + return NULL; + } + + if (d.version != 1) { + NV_ERROR(drm, "BIT 'd' table version %d unknown\n", d.version); + return NULL; + } + + table = ROMPTR(dev, d.data[0]); + if (!table) { + NV_ERROR(drm, "displayport table pointer invalid\n"); + return NULL; + } + + switch (table[0]) { + case 0x20: + case 0x21: + case 0x30: + case 0x40: + break; + default: + NV_ERROR(drm, "displayport table 0x%02x unknown\n", table[0]); + return NULL; + } + + for (i = 0; i < table[3]; i++) { + *entry = ROMPTR(dev, table[table[1] + (i * table[2])]); + if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0]))) + return table; + } + + NV_ERROR(drm, "displayport encoder table not found\n"); + return NULL; +} + /****************************************************************************** * link training *****************************************************************************/ struct dp_state { struct nouveau_i2c_port *auxch; - struct nouveau_object *core; + struct dp_train_func *func; struct dcb_output *dcb; int crtc; u8 *dpcd; @@ -54,20 +97,13 @@ static void dp_set_link_config(struct drm_device *dev, struct dp_state *dp) { struct nouveau_drm *drm = nouveau_drm(dev); - struct dcb_output *dcb = dp->dcb; - const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); - const u32 moff = (dp->crtc << 3) | (link << 2) | or; u8 sink[2]; - u32 data; NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); /* set desired link configuration on the source */ - data = ((dp->link_bw / 27000) << 8) | dp->link_nr; - if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP) - data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH; - - nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data); + dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw, + dp->dpcd[2] & DP_ENHANCED_FRAME_CAP); /* inform the sink of the new configuration */ sink[0] = dp->link_bw / 27000; @@ -82,14 +118,11 @@ static void dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern) { struct nouveau_drm *drm = nouveau_drm(dev); - struct dcb_output *dcb = dp->dcb; - const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); - const u32 moff = (dp->crtc << 3) | (link << 2) | or; u8 sink_tp; NV_DEBUG(drm, "training pattern %d\n", pattern); - nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern); + dp->func->train_set(dev, dp->dcb, pattern); nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1); sink_tp &= ~DP_TRAINING_PATTERN_MASK; @@ -101,9 +134,6 @@ static int dp_link_train_commit(struct drm_device *dev, struct dp_state *dp) { struct nouveau_drm *drm = nouveau_drm(dev); - struct dcb_output *dcb = dp->dcb; - const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); - const u32 moff = (dp->crtc << 3) | (link << 2) | or; int i; for (i = 0; i < dp->link_nr; i++) { @@ -118,8 +148,7 @@ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp) dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]); - - nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre); + dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre); } return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4); @@ -205,32 +234,59 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp) } static void -dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread) +dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable) { - struct dcb_output *dcb = dp->dcb; - const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); - const u32 moff = (dp->crtc << 3) | (link << 2) | or; - - nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ? - NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON : - NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) | - NV94_DISP_SOR_DP_TRAIN_OP_INIT); + u16 script = 0x0000; + u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); + if (table) { + if (table[0] >= 0x20 && table[0] <= 0x30) { + if (enable) script = ROM16(entry[12]); + else script = ROM16(entry[14]); + } else + if (table[0] == 0x40) { + if (enable) script = ROM16(entry[11]); + else script = ROM16(entry[13]); + } + } + + nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); +} + +static void +dp_link_train_init(struct drm_device *dev, struct dp_state *dp) +{ + u16 script = 0x0000; + u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); + if (table) { + if (table[0] >= 0x20 && table[0] <= 0x30) + script = ROM16(entry[6]); + else + if (table[0] == 0x40) + script = ROM16(entry[5]); + } + + nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); } static void dp_link_train_fini(struct drm_device *dev, struct dp_state *dp) { - struct dcb_output *dcb = dp->dcb; - const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); - const u32 moff = (dp->crtc << 3) | (link << 2) | or; + u16 script = 0x0000; + u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); + if (table) { + if (table[0] >= 0x20 && table[0] <= 0x30) + script = ROM16(entry[8]); + else + if (table[0] == 0x40) + script = ROM16(entry[7]); + } - nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, - NV94_DISP_SOR_DP_TRAIN_OP_FINI); + nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); } static bool nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate, - struct nouveau_object *core) + struct dp_train_func *func) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); @@ -248,7 +304,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate, if (!dp.auxch) return false; - dp.core = core; + dp.func = func; dp.dcb = nv_encoder->dcb; dp.crtc = nv_crtc->index; dp.dpcd = nv_encoder->dp.dpcd; @@ -262,8 +318,11 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate, */ gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false); - /* enable down-spreading and execute pre-train script from vbios */ - dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1); + /* enable down-spreading, if possible */ + dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1); + + /* execute pre-train script from vbios */ + dp_link_train_init(dev, &dp); /* start off at highest link rate supported by encoder and display */ while (*link_bw > nv_encoder->dp.link_bw) @@ -306,7 +365,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate, void nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate, - struct nouveau_object *core) + struct dp_train_func *func) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_drm *drm = nouveau_drm(encoder->dev); @@ -326,7 +385,7 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate, nv_wraux(auxch, DP_SET_POWER, &status, 1); if (mode == DRM_MODE_DPMS_ON) - nouveau_dp_link_train(encoder, datarate, core); + nouveau_dp_link_train(encoder, datarate, func); } static void diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c index 01c403ddb99b..8503b2ea570a 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -49,6 +49,8 @@ #include "nouveau_fbcon.h" #include "nouveau_fence.h" +#include "nouveau_ttm.h" + MODULE_PARM_DESC(config, "option string to pass to driver core"); static char *nouveau_config; module_param_named(config, nouveau_config, charp, 0400); @@ -147,7 +149,7 @@ nouveau_accel_init(struct nouveau_drm *drm) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); arg0 = NVE0_CHANNEL_IND_ENGINE_GR; - arg1 = 1; + arg1 = 0; } else { arg0 = NvDmaFB; arg1 = NvDmaTT; @@ -222,7 +224,6 @@ nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent) boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; #endif remove_conflicting_framebuffers(aper, "nouveaufb", boot); - kfree(aper); ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev), nouveau_config, nouveau_debug, &device); @@ -394,12 +395,17 @@ nouveau_drm_remove(struct pci_dev *pdev) } int -nouveau_do_suspend(struct drm_device *dev) +nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state) { + struct drm_device *dev = pci_get_drvdata(pdev); struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_cli *cli; int ret; + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF || + pm_state.event == PM_EVENT_PRETHAW) + return 0; + if (dev->mode_config.num_crtc) { NV_INFO(drm, "suspending fbcon...\n"); nouveau_fbcon_set_suspend(dev, 1); @@ -430,6 +436,13 @@ nouveau_do_suspend(struct drm_device *dev) goto fail_client; nouveau_agp_fini(drm); + + pci_save_state(pdev); + if (pm_state.event == PM_EVENT_SUSPEND) { + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + } + return 0; fail_client: @@ -444,33 +457,24 @@ nouveau_do_suspend(struct drm_device *dev) return ret; } -int nouveau_pmops_suspend(struct device *dev) +int +nouveau_drm_resume(struct pci_dev *pdev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_device *dev = pci_get_drvdata(pdev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_cli *cli; int ret; - if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - ret = nouveau_do_suspend(drm_dev); + NV_INFO(drm, "re-enabling device...\n"); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); if (ret) return ret; - - pci_save_state(pdev); - pci_disable_device(pdev); - pci_set_power_state(pdev, PCI_D3hot); - - return 0; -} - -int -nouveau_do_resume(struct drm_device *dev) -{ - struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_cli *cli; - - NV_INFO(drm, "re-enabling device...\n"); + pci_set_master(pdev); nouveau_agp_reset(drm); @@ -496,42 +500,6 @@ nouveau_do_resume(struct drm_device *dev) return 0; } -int nouveau_pmops_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); - int ret; - - if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) - return 0; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - ret = pci_enable_device(pdev); - if (ret) - return ret; - pci_set_master(pdev); - - return nouveau_do_resume(drm_dev); -} - -static int nouveau_pmops_freeze(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); - - return nouveau_do_suspend(drm_dev); -} - -static int nouveau_pmops_thaw(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); - - return nouveau_do_resume(drm_dev); -} - - static int nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) { @@ -684,22 +652,14 @@ nouveau_drm_pci_table[] = { {} }; -static const struct dev_pm_ops nouveau_pm_ops = { - .suspend = nouveau_pmops_suspend, - .resume = nouveau_pmops_resume, - .freeze = nouveau_pmops_freeze, - .thaw = nouveau_pmops_thaw, - .poweroff = nouveau_pmops_freeze, - .restore = nouveau_pmops_resume, -}; - static struct pci_driver nouveau_drm_pci_driver = { .name = "nouveau", .id_table = nouveau_drm_pci_table, .probe = nouveau_drm_probe, .remove = nouveau_drm_remove, - .driver.pm = &nouveau_pm_ops, + .suspend = nouveau_drm_suspend, + .resume = nouveau_drm_resume, }; static int __init diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h index aa89eb938b47..a10169927086 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h @@ -129,8 +129,8 @@ nouveau_dev(struct drm_device *dev) return nv_device(nouveau_drm(dev)->device); } -int nouveau_pmops_suspend(struct device *); -int nouveau_pmops_resume(struct device *); +int nouveau_drm_suspend(struct pci_dev *, pm_message_t); +int nouveau_drm_resume(struct pci_dev *); #define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args) #define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_encoder.h b/trunk/drivers/gpu/drm/nouveau/nouveau_encoder.h index d0d95bd511ab..6a17bf2ba9a4 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_encoder.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_encoder.h @@ -93,9 +93,14 @@ get_slave_funcs(struct drm_encoder *enc) /* nouveau_dp.c */ bool nouveau_dp_detect(struct drm_encoder *); void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate, - struct nouveau_object *); + struct dp_train_func *); +u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_output *, u8 **); struct nouveau_connector * nouveau_encoder_connector_get(struct nouveau_encoder *encoder); +int nv50_sor_create(struct drm_connector *, struct dcb_output *); +void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32); +int nv50_dac_create(struct drm_connector *, struct dcb_output *); + #endif /* __NOUVEAU_ENCODER_H__ */ diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_gem.c b/trunk/drivers/gpu/drm/nouveau/nouveau_gem.c index 8bf695c52f95..5e2f52158f19 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -433,7 +433,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, return ret; } - ret = nouveau_bo_validate(nvbo, true, false); + ret = nouveau_bo_validate(nvbo, true, false, false); if (unlikely(ret)) { if (ret != -ERESTARTSYS) NV_ERROR(drm, "fail ttm_validate\n"); diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/trunk/drivers/gpu/drm/nouveau/nouveau_hdmi.c new file mode 100644 index 000000000000..2c672cebc889 --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_hdmi.c @@ -0,0 +1,261 @@ +/* + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include +#include "nouveau_drm.h" +#include "nouveau_connector.h" +#include "nouveau_encoder.h" +#include "nouveau_crtc.h" + +static bool +hdmi_sor(struct drm_encoder *encoder) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + if (nv_device(drm->device)->chipset < 0xa3 || + nv_device(drm->device)->chipset == 0xaa || + nv_device(drm->device)->chipset == 0xac) + return false; + return true; +} + +static inline u32 +hdmi_base(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc); + if (!hdmi_sor(encoder)) + return 0x616500 + (nv_crtc->index * 0x800); + return 0x61c500 + (nv_encoder->or * 0x800); +} + +static void +hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val) +{ + struct nouveau_device *device = nouveau_dev(encoder->dev); + nv_wr32(device, hdmi_base(encoder) + reg, val); +} + +static u32 +hdmi_rd32(struct drm_encoder *encoder, u32 reg) +{ + struct nouveau_device *device = nouveau_dev(encoder->dev); + return nv_rd32(device, hdmi_base(encoder) + reg); +} + +static u32 +hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val) +{ + u32 tmp = hdmi_rd32(encoder, reg); + hdmi_wr32(encoder, reg, (tmp & ~mask) | val); + return tmp; +} + +static void +nouveau_audio_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_device *device = nouveau_dev(encoder->dev); + u32 or = nv_encoder->or * 0x800; + + if (hdmi_sor(encoder)) + nv_mask(device, 0x61c448 + or, 0x00000003, 0x00000000); +} + +static void +nouveau_audio_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_device *device = nouveau_dev(encoder->dev); + struct nouveau_connector *nv_connector; + u32 or = nv_encoder->or * 0x800; + int i; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (!drm_detect_monitor_audio(nv_connector->edid)) { + nouveau_audio_disconnect(encoder); + return; + } + + if (hdmi_sor(encoder)) { + nv_mask(device, 0x61c448 + or, 0x00000001, 0x00000001); + + drm_edid_to_eld(&nv_connector->base, nv_connector->edid); + if (nv_connector->base.eld[0]) { + u8 *eld = nv_connector->base.eld; + for (i = 0; i < eld[2] * 4; i++) + nv_wr32(device, 0x61c440 + or, (i << 8) | eld[i]); + for (i = eld[2] * 4; i < 0x60; i++) + nv_wr32(device, 0x61c440 + or, (i << 8) | 0x00); + nv_mask(device, 0x61c448 + or, 0x00000002, 0x00000002); + } + } +} + +static void +nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame) +{ + /* calculate checksum for the infoframe */ + u8 sum = 0, i; + for (i = 0; i < frame[2]; i++) + sum += frame[i]; + frame[3] = 256 - sum; + + /* disable infoframe, and write header */ + hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000); + hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff); + + /* register scans tell me the audio infoframe has only one set of + * subpack regs, according to tegra (gee nvidia, it'd be nice if we + * could get those docs too!), the hdmi block pads out the rest of + * the packet on its own. + */ + if (ctrl == 0x020) + frame[2] = 6; + + /* write out checksum and data, weird weird 7 byte register pairs */ + for (i = 0; i < frame[2] + 1; i += 7) { + u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8); + u32 *subpack = (u32 *)&frame[3 + i]; + hdmi_wr32(encoder, rsubpack + 0, subpack[0]); + hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff); + } + + /* enable the infoframe */ + hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001); +} + +static void +nouveau_hdmi_video_infoframe(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0; + const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0; + const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0; + u8 frame[20]; + + frame[0x00] = 0x82; /* AVI infoframe */ + frame[0x01] = 0x02; /* version */ + frame[0x02] = 0x0d; /* length */ + frame[0x03] = 0x00; + frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S; + frame[0x05] = (C << 6) | (M << 4) | R; + frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC; + frame[0x07] = VIC; + frame[0x08] = PR; + frame[0x09] = bar_top & 0xff; + frame[0x0a] = bar_top >> 8; + frame[0x0b] = bar_bottom & 0xff; + frame[0x0c] = bar_bottom >> 8; + frame[0x0d] = bar_left & 0xff; + frame[0x0e] = bar_left >> 8; + frame[0x0f] = bar_right & 0xff; + frame[0x10] = bar_right >> 8; + frame[0x11] = 0x00; + frame[0x12] = 0x00; + frame[0x13] = 0x00; + + nouveau_hdmi_infoframe(encoder, 0x020, frame); +} + +static void +nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00; + const u8 CA = 0x00, DM_INH = 0, LSV = 0x00; + u8 frame[12]; + + frame[0x00] = 0x84; /* Audio infoframe */ + frame[0x01] = 0x01; /* version */ + frame[0x02] = 0x0a; /* length */ + frame[0x03] = 0x00; + frame[0x04] = (CT << 4) | CC; + frame[0x05] = (SF << 2) | ceaSS; + frame[0x06] = FMT; + frame[0x07] = CA; + frame[0x08] = (DM_INH << 7) | (LSV << 3); + frame[0x09] = 0x00; + frame[0x0a] = 0x00; + frame[0x0b] = 0x00; + + nouveau_hdmi_infoframe(encoder, 0x000, frame); +} + +static void +nouveau_hdmi_disconnect(struct drm_encoder *encoder) +{ + nouveau_audio_disconnect(encoder); + + /* disable audio and avi infoframes */ + hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000); + hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000); + + /* disable hdmi */ + hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000); +} + +void +nouveau_hdmi_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct nouveau_device *device = nouveau_dev(encoder->dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector; + u32 max_ac_packet, rekey; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (!mode || !nv_connector || !nv_connector->edid || + !drm_detect_hdmi_monitor(nv_connector->edid)) { + nouveau_hdmi_disconnect(encoder); + return; + } + + nouveau_hdmi_video_infoframe(encoder, mode); + nouveau_hdmi_audio_infoframe(encoder, mode); + + hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */ + hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */ + hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */ + + nv_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ + nv_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ + nv_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ + + /* value matches nvidia binary driver, and tegra constant */ + rekey = 56; + + max_ac_packet = mode->htotal - mode->hdisplay; + max_ac_packet -= rekey; + max_ac_packet -= 18; /* constant from tegra */ + max_ac_packet /= 32; + + /* enable hdmi */ + hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */ + 0x1f000000 | /* unknown */ + max_ac_packet << 16 | + rekey); + + nouveau_audio_mode_set(encoder, mode); +} diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_irq.c b/trunk/drivers/gpu/drm/nouveau/nouveau_irq.c index 1303680affd3..1d8cb506a28a 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -60,6 +60,18 @@ nouveau_irq_handler(DRM_IRQ_ARGS) return IRQ_NONE; nv_subdev(pmc)->intr(nv_subdev(pmc)); + + if (dev->mode_config.num_crtc) { + if (device->card_type >= NV_D0) { + if (nv_rd32(device, 0x000100) & 0x04000000) + nvd0_display_intr(dev); + } else + if (device->card_type >= NV_50) { + if (nv_rd32(device, 0x000100) & 0x04000000) + nv50_display_intr(dev); + } + } + return IRQ_HANDLED; } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_prime.c b/trunk/drivers/gpu/drm/nouveau/nouveau_prime.c index 3543fec2355e..366462cf8a2c 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -155,6 +155,10 @@ nouveau_prime_new(struct drm_device *dev, return ret; nvbo = *pnvbo; + /* we restrict allowed domains on nv50+ to only the types + * that were requested at creation time. not possibly on + * earlier chips without busting the ABI. + */ nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); if (!nvbo->gem) { diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_vga.c b/trunk/drivers/gpu/drm/nouveau/nouveau_vga.c index 25d3495725eb..6f0ac64873df 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -31,11 +31,12 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); + pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (state == VGA_SWITCHEROO_ON) { printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - nouveau_pmops_resume(&pdev->dev); + nouveau_drm_resume(pdev); drm_kms_helper_poll_enable(dev); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { @@ -43,7 +44,7 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev, dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(dev); nouveau_switcheroo_optimus_dsm(); - nouveau_pmops_suspend(&pdev->dev); + nouveau_drm_suspend(pdev, pmm); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } diff --git a/trunk/drivers/gpu/drm/nouveau/nv04_crtc.c b/trunk/drivers/gpu/drm/nouveau/nv04_crtc.c index 6578cd28c556..82a0d9c6cda3 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/trunk/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -730,7 +730,6 @@ static void nv_crtc_destroy(struct drm_crtc *crtc) drm_crtc_cleanup(crtc); nouveau_bo_unmap(nv_crtc->cursor.nvbo); - nouveau_bo_unpin(nv_crtc->cursor.nvbo); nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); kfree(nv_crtc); } @@ -1057,11 +1056,8 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) 0, 0x0000, NULL, &nv_crtc->cursor.nvbo); if (!ret) { ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); - if (!ret) { + if (!ret) ret = nouveau_bo_map(nv_crtc->cursor.nvbo); - if (ret) - nouveau_bo_unpin(nv_crtc->cursor.nvbo); - } if (ret) nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); } diff --git a/trunk/drivers/gpu/drm/nouveau/nv04_display.c b/trunk/drivers/gpu/drm/nouveau/nv04_display.c index 2cd6fb8c548e..846050f04c23 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv04_display.c +++ b/trunk/drivers/gpu/drm/nouveau/nv04_display.c @@ -60,6 +60,8 @@ nv04_display_create(struct drm_device *dev) struct nv04_display *disp; int i, ret; + NV_DEBUG(drm, "\n"); + disp = kzalloc(sizeof(*disp), GFP_KERNEL); if (!disp) return -ENOMEM; @@ -130,10 +132,13 @@ nv04_display_create(struct drm_device *dev) void nv04_display_destroy(struct drm_device *dev) { + struct nouveau_drm *drm = nouveau_drm(dev); struct nv04_display *disp = nv04_display(dev); struct drm_encoder *encoder; struct drm_crtc *crtc; + NV_DEBUG(drm, "\n"); + /* Turn every CRTC off. */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct drm_mode_set modeset = { diff --git a/trunk/drivers/gpu/drm/nouveau/nv10_fence.c b/trunk/drivers/gpu/drm/nouveau/nv10_fence.c index 7ae7f97a6d4d..ce752bf5cc4e 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv10_fence.c +++ b/trunk/drivers/gpu/drm/nouveau/nv10_fence.c @@ -155,8 +155,6 @@ nv10_fence_destroy(struct nouveau_drm *drm) { struct nv10_fence_priv *priv = drm->fence; nouveau_bo_unmap(priv->bo); - if (priv->bo) - nouveau_bo_unpin(priv->bo); nouveau_bo_ref(NULL, &priv->bo); drm->fence = NULL; kfree(priv); @@ -185,11 +183,8 @@ nv10_fence_create(struct nouveau_drm *drm) 0, 0x0000, NULL, &priv->bo); if (!ret) { ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); - if (!ret) { + if (!ret) ret = nouveau_bo_map(priv->bo); - if (ret) - nouveau_bo_unpin(priv->bo); - } if (ret) nouveau_bo_ref(NULL, &priv->bo); } diff --git a/trunk/drivers/gpu/drm/nouveau/nv17_tv.c b/trunk/drivers/gpu/drm/nouveau/nv17_tv.c index 2ca276ada507..897b63621e2d 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/trunk/drivers/gpu/drm/nouveau/nv17_tv.c @@ -195,7 +195,7 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) break; } - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, conf->tv_subconnector_property, tv_enc->subconnector); @@ -672,25 +672,25 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder, drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, conf->tv_select_subconnector_property, tv_enc->select_subconnector); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, conf->tv_subconnector_property, tv_enc->subconnector); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, conf->tv_mode_property, tv_enc->tv_norm); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, conf->tv_flicker_reduction_property, tv_enc->flicker); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, conf->tv_saturation_property, tv_enc->saturation); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, conf->tv_hue_property, tv_enc->hue); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, conf->tv_overscan_property, tv_enc->overscan); diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_crtc.c b/trunk/drivers/gpu/drm/nouveau/nv50_crtc.c new file mode 100644 index 000000000000..222de77d6269 --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -0,0 +1,764 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include + +#include "nouveau_reg.h" +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_gem.h" +#include "nouveau_hw.h" +#include "nouveau_encoder.h" +#include "nouveau_crtc.h" +#include "nouveau_connector.h" +#include "nv50_display.h" + +#include + +static void +nv50_crtc_lut_load(struct drm_crtc *crtc) +{ + struct nouveau_drm *drm = nouveau_drm(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); + int i; + + NV_DEBUG(drm, "\n"); + + for (i = 0; i < 256; i++) { + writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0); + writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2); + writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4); + } + + if (nv_crtc->lut.depth == 30) { + writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0); + writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2); + writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4); + } +} + +int +nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *evo = nv50_display(dev)->master; + int index = nv_crtc->index, ret; + + NV_DEBUG(drm, "index %d\n", nv_crtc->index); + NV_DEBUG(drm, "%s\n", blanked ? "blanked" : "unblanked"); + + if (blanked) { + nv_crtc->cursor.hide(nv_crtc, false); + + ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 7 : 5); + if (ret) { + NV_ERROR(drm, "no space while blanking crtc\n"); + return ret; + } + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); + OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK); + OUT_RING(evo, 0); + if (nv_device(drm->device)->chipset != 0x50) { + BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); + OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE); + } + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); + OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE); + } else { + if (nv_crtc->cursor.visible) + nv_crtc->cursor.show(nv_crtc, false); + else + nv_crtc->cursor.hide(nv_crtc, false); + + ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 10 : 8); + if (ret) { + NV_ERROR(drm, "no space while unblanking crtc\n"); + return ret; + } + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); + OUT_RING(evo, nv_crtc->lut.depth == 8 ? + NV50_EVO_CRTC_CLUT_MODE_OFF : + NV50_EVO_CRTC_CLUT_MODE_ON); + OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8); + if (nv_device(drm->device)->chipset != 0x50) { + BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); + OUT_RING(evo, NvEvoVRAM); + } + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2); + OUT_RING(evo, nv_crtc->fb.offset >> 8); + OUT_RING(evo, 0); + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); + if (nv_device(drm->device)->chipset != 0x50) + if (nv_crtc->fb.tile_flags == 0x7a00 || + nv_crtc->fb.tile_flags == 0xfe00) + OUT_RING(evo, NvEvoFB32); + else + if (nv_crtc->fb.tile_flags == 0x7000) + OUT_RING(evo, NvEvoFB16); + else + OUT_RING(evo, NvEvoVRAM_LP); + else + OUT_RING(evo, NvEvoVRAM_LP); + } + + nv_crtc->fb.blanked = blanked; + return 0; +} + +static int +nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update) +{ + struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master; + struct nouveau_connector *nv_connector; + struct drm_connector *connector; + int head = nv_crtc->index, ret; + u32 mode = 0x00; + + nv_connector = nouveau_crtc_connector_get(nv_crtc); + connector = &nv_connector->base; + if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) { + if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3) + mode = DITHERING_MODE_DYNAMIC2X2; + } else { + mode = nv_connector->dithering_mode; + } + + if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) { + if (connector->display_info.bpc >= 8) + mode |= DITHERING_DEPTH_8BPC; + } else { + mode |= nv_connector->dithering_depth; + } + + ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); + if (ret == 0) { + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1); + OUT_RING (evo, mode); + if (update) { + BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING (evo, 0); + FIRE_RING (evo); + } + } + + return ret; +} + +static int +nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *evo = nv50_display(dev)->master; + int ret; + int adj; + u32 hue, vib; + + NV_DEBUG(drm, "vibrance = %i, hue = %i\n", + nv_crtc->color_vibrance, nv_crtc->vibrant_hue); + + ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); + if (ret) { + NV_ERROR(drm, "no space while setting color vibrance\n"); + return ret; + } + + adj = (nv_crtc->color_vibrance > 0) ? 50 : 0; + vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff; + + hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff; + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1); + OUT_RING (evo, (hue << 20) | (vib << 8)); + + if (update) { + BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING (evo, 0); + FIRE_RING (evo); + } + + return 0; +} + +struct nouveau_connector * +nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct drm_connector *connector; + struct drm_crtc *crtc = to_drm_crtc(nv_crtc); + + /* The safest approach is to find an encoder with the right crtc, that + * is also linked to a connector. */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder) + if (connector->encoder->crtc == crtc) + return nouveau_connector(connector); + } + + return NULL; +} + +static int +nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update) +{ + struct nouveau_connector *nv_connector; + struct drm_crtc *crtc = &nv_crtc->base; + struct drm_device *dev = crtc->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *evo = nv50_display(dev)->master; + struct drm_display_mode *umode = &crtc->mode; + struct drm_display_mode *omode; + int scaling_mode, ret; + u32 ctrl = 0, oX, oY; + + NV_DEBUG(drm, "\n"); + + nv_connector = nouveau_crtc_connector_get(nv_crtc); + if (!nv_connector || !nv_connector->native_mode) { + NV_ERROR(drm, "no native mode, forcing panel scaling\n"); + scaling_mode = DRM_MODE_SCALE_NONE; + } else { + scaling_mode = nv_connector->scaling_mode; + } + + /* start off at the resolution we programmed the crtc for, this + * effectively handles NONE/FULL scaling + */ + if (scaling_mode != DRM_MODE_SCALE_NONE) + omode = nv_connector->native_mode; + else + omode = umode; + + oX = omode->hdisplay; + oY = omode->vdisplay; + if (omode->flags & DRM_MODE_FLAG_DBLSCAN) + oY *= 2; + + /* add overscan compensation if necessary, will keep the aspect + * ratio the same as the backend mode unless overridden by the + * user setting both hborder and vborder properties. + */ + if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON || + (nv_connector->underscan == UNDERSCAN_AUTO && + nv_connector->edid && + drm_detect_hdmi_monitor(nv_connector->edid)))) { + u32 bX = nv_connector->underscan_hborder; + u32 bY = nv_connector->underscan_vborder; + u32 aspect = (oY << 19) / oX; + + if (bX) { + oX -= (bX * 2); + if (bY) oY -= (bY * 2); + else oY = ((oX * aspect) + (aspect / 2)) >> 19; + } else { + oX -= (oX >> 4) + 32; + if (bY) oY -= (bY * 2); + else oY = ((oX * aspect) + (aspect / 2)) >> 19; + } + } + + /* handle CENTER/ASPECT scaling, taking into account the areas + * removed already for overscan compensation + */ + switch (scaling_mode) { + case DRM_MODE_SCALE_CENTER: + oX = min((u32)umode->hdisplay, oX); + oY = min((u32)umode->vdisplay, oY); + /* fall-through */ + case DRM_MODE_SCALE_ASPECT: + if (oY < oX) { + u32 aspect = (umode->hdisplay << 19) / umode->vdisplay; + oX = ((oY * aspect) + (aspect / 2)) >> 19; + } else { + u32 aspect = (umode->vdisplay << 19) / umode->hdisplay; + oY = ((oX * aspect) + (aspect / 2)) >> 19; + } + break; + default: + break; + } + + if (umode->hdisplay != oX || umode->vdisplay != oY || + umode->flags & DRM_MODE_FLAG_INTERLACE || + umode->flags & DRM_MODE_FLAG_DBLSCAN) + ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE; + + ret = RING_SPACE(evo, 5); + if (ret) + return ret; + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1); + OUT_RING (evo, ctrl); + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2); + OUT_RING (evo, oY << 16 | oX); + OUT_RING (evo, oY << 16 | oX); + + if (update) { + nv50_display_flip_stop(crtc); + nv50_display_sync(dev); + nv50_display_flip_next(crtc, crtc->fb, NULL); + } + + return 0; +} + +int +nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_clock *clk = nouveau_clock(device); + + return clk->pll_set(clk, PLL_VPLL0 + head, pclk); +} + +static void +nv50_crtc_destroy(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nouveau_drm *drm = nouveau_drm(crtc->dev); + + NV_DEBUG(drm, "\n"); + + nouveau_bo_unmap(nv_crtc->lut.nvbo); + nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); + nouveau_bo_unmap(nv_crtc->cursor.nvbo); + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + drm_crtc_cleanup(&nv_crtc->base); + kfree(nv_crtc); +} + +int +nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t buffer_handle, uint32_t width, uint32_t height) +{ + struct drm_device *dev = crtc->dev; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nouveau_bo *cursor = NULL; + struct drm_gem_object *gem; + int ret = 0, i; + + if (!buffer_handle) { + nv_crtc->cursor.hide(nv_crtc, true); + return 0; + } + + if (width != 64 || height != 64) + return -EINVAL; + + gem = drm_gem_object_lookup(dev, file_priv, buffer_handle); + if (!gem) + return -ENOENT; + cursor = nouveau_gem_object(gem); + + ret = nouveau_bo_map(cursor); + if (ret) + goto out; + + /* The simple will do for now. */ + for (i = 0; i < 64 * 64; i++) + nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i)); + + nouveau_bo_unmap(cursor); + + nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset); + nv_crtc->cursor.show(nv_crtc, true); + +out: + drm_gem_object_unreference_unlocked(gem); + return ret; +} + +int +nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + nv_crtc->cursor.set_pos(nv_crtc, x, y); + return 0; +} + +static void +nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, + uint32_t start, uint32_t size) +{ + int end = (start + size > 256) ? 256 : start + size, i; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + for (i = start; i < end; i++) { + nv_crtc->lut.r[i] = r[i]; + nv_crtc->lut.g[i] = g[i]; + nv_crtc->lut.b[i] = b[i]; + } + + /* We need to know the depth before we upload, but it's possible to + * get called before a framebuffer is bound. If this is the case, + * mark the lut values as dirty by setting depth==0, and it'll be + * uploaded on the first mode_set_base() + */ + if (!nv_crtc->base.fb) { + nv_crtc->lut.depth = 0; + return; + } + + nv50_crtc_lut_load(crtc); +} + +static void +nv50_crtc_save(struct drm_crtc *crtc) +{ + struct nouveau_drm *drm = nouveau_drm(crtc->dev); + NV_ERROR(drm, "!!\n"); +} + +static void +nv50_crtc_restore(struct drm_crtc *crtc) +{ + struct nouveau_drm *drm = nouveau_drm(crtc->dev); + NV_ERROR(drm, "!!\n"); +} + +static const struct drm_crtc_funcs nv50_crtc_funcs = { + .save = nv50_crtc_save, + .restore = nv50_crtc_restore, + .cursor_set = nv50_crtc_cursor_set, + .cursor_move = nv50_crtc_cursor_move, + .gamma_set = nv50_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, + .page_flip = nouveau_crtc_page_flip, + .destroy = nv50_crtc_destroy, +}; + +static void +nv50_crtc_dpms(struct drm_crtc *crtc, int mode) +{ +} + +static void +nv50_crtc_prepare(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + + NV_DEBUG(drm, "index %d\n", nv_crtc->index); + + nv50_display_flip_stop(crtc); + drm_vblank_pre_modeset(dev, nv_crtc->index); + nv50_crtc_blank(nv_crtc, true); +} + +static void +nv50_crtc_commit(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + NV_DEBUG(drm, "index %d\n", nv_crtc->index); + + nv50_crtc_blank(nv_crtc, false); + drm_vblank_post_modeset(dev, nv_crtc->index); + nv50_display_sync(dev); + nv50_display_flip_next(crtc, crtc->fb, NULL); +} + +static bool +nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static int +nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *passed_fb, + int x, int y, bool atomic) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = nv_crtc->base.dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *evo = nv50_display(dev)->master; + struct drm_framebuffer *drm_fb; + struct nouveau_framebuffer *fb; + int ret; + + NV_DEBUG(drm, "index %d\n", nv_crtc->index); + + /* no fb bound */ + if (!atomic && !crtc->fb) { + NV_DEBUG(drm, "No FB bound\n"); + return 0; + } + + /* If atomic, we want to switch to the fb we were passed, so + * now we update pointers to do that. (We don't pin; just + * assume we're already pinned and update the base address.) + */ + if (atomic) { + drm_fb = passed_fb; + fb = nouveau_framebuffer(passed_fb); + } else { + drm_fb = crtc->fb; + fb = nouveau_framebuffer(crtc->fb); + /* If not atomic, we can go ahead and pin, and unpin the + * old fb we were passed. + */ + ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + if (passed_fb) { + struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); + nouveau_bo_unpin(ofb->nvbo); + } + } + + nv_crtc->fb.offset = fb->nvbo->bo.offset; + nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); + nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; + if (!nv_crtc->fb.blanked && nv_device(drm->device)->chipset != 0x50) { + ret = RING_SPACE(evo, 2); + if (ret) + return ret; + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1); + OUT_RING (evo, fb->r_dma); + } + + ret = RING_SPACE(evo, 12); + if (ret) + return ret; + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5); + OUT_RING (evo, nv_crtc->fb.offset >> 8); + OUT_RING (evo, 0); + OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width); + OUT_RING (evo, fb->r_pitch); + OUT_RING (evo, fb->r_format); + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1); + OUT_RING (evo, fb->base.depth == 8 ? + NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON); + + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1); + OUT_RING (evo, (y << 16) | x); + + if (nv_crtc->lut.depth != fb->base.depth) { + nv_crtc->lut.depth = fb->base.depth; + nv50_crtc_lut_load(crtc); + } + + return 0; +} + +static int +nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, + struct drm_display_mode *mode, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct drm_device *dev = crtc->dev; + struct nouveau_channel *evo = nv50_display(dev)->master; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + u32 head = nv_crtc->index * 0x400; + u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1; + u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1; + u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks; + u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks; + u32 vblan2e = 0, vblan2s = 1; + int ret; + + /* hw timing description looks like this: + * + * <---------display---------> + * ______ + * |____________|---------------------------|____________| + * + * ^ synce ^ blanke ^ blanks ^ active + * + * interlaced modes also have 2 additional values pointing at the end + * and start of the next field's blanking period. + */ + + hactive = mode->htotal; + hsynce = mode->hsync_end - mode->hsync_start - 1; + hbackp = mode->htotal - mode->hsync_end; + hblanke = hsynce + hbackp; + hfrontp = mode->hsync_start - mode->hdisplay; + hblanks = mode->htotal - hfrontp - 1; + + vactive = mode->vtotal * vscan / ilace; + vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1; + vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace; + vblanke = vsynce + vbackp; + vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; + vblanks = vactive - vfrontp - 1; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { + vblan2e = vactive + vsynce + vbackp; + vblan2s = vblan2e + (mode->vdisplay * vscan / ilace); + vactive = (vactive * 2) + 1; + } + + ret = RING_SPACE(evo, 18); + if (ret == 0) { + BEGIN_NV04(evo, 0, 0x0804 + head, 2); + OUT_RING (evo, 0x00800000 | mode->clock); + OUT_RING (evo, (ilace == 2) ? 2 : 0); + BEGIN_NV04(evo, 0, 0x0810 + head, 6); + OUT_RING (evo, 0x00000000); /* border colour */ + OUT_RING (evo, (vactive << 16) | hactive); + OUT_RING (evo, ( vsynce << 16) | hsynce); + OUT_RING (evo, (vblanke << 16) | hblanke); + OUT_RING (evo, (vblanks << 16) | hblanks); + OUT_RING (evo, (vblan2e << 16) | vblan2s); + BEGIN_NV04(evo, 0, 0x082c + head, 1); + OUT_RING (evo, 0x00000000); + BEGIN_NV04(evo, 0, 0x0900 + head, 1); + OUT_RING (evo, 0x00000311); /* makes sync channel work */ + BEGIN_NV04(evo, 0, 0x08c8 + head, 1); + OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay); + BEGIN_NV04(evo, 0, 0x08d4 + head, 1); + OUT_RING (evo, 0x00000000); /* screen position */ + } + + nv_crtc->set_dither(nv_crtc, false); + nv_crtc->set_scale(nv_crtc, false); + nv_crtc->set_color_vibrance(nv_crtc, false); + + return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false); +} + +static int +nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + int ret; + + nv50_display_flip_stop(crtc); + ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false); + if (ret) + return ret; + + ret = nv50_display_sync(crtc->dev); + if (ret) + return ret; + + return nv50_display_flip_next(crtc, crtc->fb, NULL); +} + +static int +nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + int ret; + + nv50_display_flip_stop(crtc); + ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true); + if (ret) + return ret; + + return nv50_display_sync(crtc->dev); +} + +static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = { + .dpms = nv50_crtc_dpms, + .prepare = nv50_crtc_prepare, + .commit = nv50_crtc_commit, + .mode_fixup = nv50_crtc_mode_fixup, + .mode_set = nv50_crtc_mode_set, + .mode_set_base = nv50_crtc_mode_set_base, + .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, + .load_lut = nv50_crtc_lut_load, +}; + +int +nv50_crtc_create(struct drm_device *dev, int index) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_crtc *nv_crtc = NULL; + int ret, i; + + NV_DEBUG(drm, "\n"); + + nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); + if (!nv_crtc) + return -ENOMEM; + + nv_crtc->index = index; + nv_crtc->set_dither = nv50_crtc_set_dither; + nv_crtc->set_scale = nv50_crtc_set_scale; + nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance; + nv_crtc->color_vibrance = 50; + nv_crtc->vibrant_hue = 0; + nv_crtc->lut.depth = 0; + for (i = 0; i < 256; i++) { + nv_crtc->lut.r[i] = i << 8; + nv_crtc->lut.g[i] = i << 8; + nv_crtc->lut.b[i] = i << 8; + } + + drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs); + drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs); + drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); + + ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &nv_crtc->lut.nvbo); + if (!ret) { + ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(nv_crtc->lut.nvbo); + if (ret) + nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); + } + + if (ret) + goto out; + + + ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &nv_crtc->cursor.nvbo); + if (!ret) { + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(nv_crtc->cursor.nvbo); + if (ret) + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + } + + if (ret) + goto out; + + nv50_cursor_init(nv_crtc); +out: + if (ret) + nv50_crtc_destroy(&nv_crtc->base); + return ret; +} diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_cursor.c b/trunk/drivers/gpu/drm/nouveau/nv50_cursor.c new file mode 100644 index 000000000000..223da113ceee --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nv50_cursor.c @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_crtc.h" +#include "nv50_display.h" + +static void +nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *evo = nv50_display(dev)->master; + int ret; + + NV_DEBUG(drm, "\n"); + + if (update && nv_crtc->cursor.visible) + return; + + ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2); + if (ret) { + NV_ERROR(drm, "no space while unhiding cursor\n"); + return; + } + + if (nv_device(drm->device)->chipset != 0x50) { + BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); + OUT_RING(evo, NvEvoVRAM); + } + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); + OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW); + OUT_RING(evo, nv_crtc->cursor.offset >> 8); + + if (update) { + BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING(evo, 0); + FIRE_RING(evo); + nv_crtc->cursor.visible = true; + } +} + +static void +nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *evo = nv50_display(dev)->master; + int ret; + + NV_DEBUG(drm, "\n"); + + if (update && !nv_crtc->cursor.visible) + return; + + ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2); + if (ret) { + NV_ERROR(drm, "no space while hiding cursor\n"); + return; + } + BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); + OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE); + OUT_RING(evo, 0); + if (nv_device(drm->device)->chipset != 0x50) { + BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); + OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE); + } + + if (update) { + BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING(evo, 0); + FIRE_RING(evo); + nv_crtc->cursor.visible = false; + } +} + +static void +nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) +{ + struct nouveau_device *device = nouveau_dev(nv_crtc->base.dev); + + nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; + nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), + ((y & 0xFFFF) << 16) | (x & 0xFFFF)); + /* Needed to make the cursor move. */ + nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0); +} + +static void +nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) +{ + if (offset == nv_crtc->cursor.offset) + return; + + nv_crtc->cursor.offset = offset; + if (nv_crtc->cursor.visible) { + nv_crtc->cursor.visible = false; + nv_crtc->cursor.show(nv_crtc, true); + } +} + +int +nv50_cursor_init(struct nouveau_crtc *nv_crtc) +{ + nv_crtc->cursor.set_offset = nv50_cursor_set_offset; + nv_crtc->cursor.set_pos = nv50_cursor_set_pos; + nv_crtc->cursor.hide = nv50_cursor_hide; + nv_crtc->cursor.show = nv50_cursor_show; + return 0; +} diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_dac.c b/trunk/drivers/gpu/drm/nouveau/nv50_dac.c new file mode 100644 index 000000000000..6a30a1748573 --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nv50_dac.c @@ -0,0 +1,321 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include + +#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) +#include "nouveau_reg.h" +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "nv50_display.h" + +#include + +static void +nv50_dac_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *evo = nv50_display(dev)->master; + int ret; + + if (!nv_encoder->crtc) + return; + nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); + + NV_DEBUG(drm, "Disconnecting DAC %d\n", nv_encoder->or); + + ret = RING_SPACE(evo, 4); + if (ret) { + NV_ERROR(drm, "no space while disconnecting DAC\n"); + return; + } + BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1); + OUT_RING (evo, 0); + BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING (evo, 0); + + nv_encoder->crtc = NULL; +} + +static enum drm_connector_status +nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_device *device = nouveau_dev(encoder->dev); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + enum drm_connector_status status = connector_status_disconnected; + uint32_t dpms_state, load_pattern, load_state; + int or = nv_encoder->or; + + nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001); + dpms_state = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)); + + nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), + 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); + if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { + NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); + NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, + nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); + return status; + } + + /* Use bios provided value if possible. */ + if (drm->vbios.dactestval) { + load_pattern = drm->vbios.dactestval; + NV_DEBUG(drm, "Using bios provided load_pattern of %d\n", + load_pattern); + } else { + load_pattern = 340; + NV_DEBUG(drm, "Using default load_pattern of %d\n", + load_pattern); + } + + nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), + NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern); + mdelay(45); /* give it some time to process */ + load_state = nv_rd32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or)); + + nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0); + nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state | + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); + + if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) == + NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) + status = connector_status_connected; + + if (status == connector_status_connected) + NV_DEBUG(drm, "Load was detected on output with or %d\n", or); + else + NV_DEBUG(drm, "Load was not detected on output with or %d\n", or); + + return status; +} + +static void +nv50_dac_dpms(struct drm_encoder *encoder, int mode) +{ + struct nouveau_device *device = nouveau_dev(encoder->dev); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + uint32_t val; + int or = nv_encoder->or; + + NV_DEBUG(drm, "or %d mode %d\n", or, mode); + + /* wait for it to be done */ + if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { + NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); + NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, + nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); + return; + } + + val = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F; + + if (mode != DRM_MODE_DPMS_ON) + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED; + + switch (mode) { + case DRM_MODE_DPMS_STANDBY: + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF; + break; + case DRM_MODE_DPMS_SUSPEND: + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF; + break; + case DRM_MODE_DPMS_OFF: + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF; + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF; + val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF; + break; + default: + break; + } + + nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val | + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); +} + +static void +nv50_dac_save(struct drm_encoder *encoder) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + NV_ERROR(drm, "!!\n"); +} + +static void +nv50_dac_restore(struct drm_encoder *encoder) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + NV_ERROR(drm, "!!\n"); +} + +static bool +nv50_dac_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *connector; + + NV_DEBUG(drm, "or %d\n", nv_encoder->or); + + connector = nouveau_encoder_connector_get(nv_encoder); + if (!connector) { + NV_ERROR(drm, "Encoder has no connector\n"); + return false; + } + + if (connector->scaling_mode != DRM_MODE_SCALE_NONE && + connector->native_mode) + drm_mode_copy(adjusted_mode, connector->native_mode); + + return true; +} + +static void +nv50_dac_commit(struct drm_encoder *encoder) +{ +} + +static void +nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct drm_device *dev = encoder->dev; + struct nouveau_channel *evo = nv50_display(dev)->master; + struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); + uint32_t mode_ctl = 0, mode_ctl2 = 0; + int ret; + + NV_DEBUG(drm, "or %d type %d crtc %d\n", + nv_encoder->or, nv_encoder->dcb->type, crtc->index); + + nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); + + if (crtc->index == 1) + mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1; + else + mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0; + + /* Lacking a working tv-out, this is not a 100% sure. */ + if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG) + mode_ctl |= 0x40; + else + if (nv_encoder->dcb->type == DCB_OUTPUT_TV) + mode_ctl |= 0x100; + + if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) + mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC; + + if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) + mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC; + + ret = RING_SPACE(evo, 3); + if (ret) { + NV_ERROR(drm, "no space while connecting DAC\n"); + return; + } + BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2); + OUT_RING(evo, mode_ctl); + OUT_RING(evo, mode_ctl2); + + nv_encoder->crtc = encoder->crtc; +} + +static struct drm_crtc * +nv50_dac_crtc_get(struct drm_encoder *encoder) +{ + return nouveau_encoder(encoder)->crtc; +} + +static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = { + .dpms = nv50_dac_dpms, + .save = nv50_dac_save, + .restore = nv50_dac_restore, + .mode_fixup = nv50_dac_mode_fixup, + .prepare = nv50_dac_disconnect, + .commit = nv50_dac_commit, + .mode_set = nv50_dac_mode_set, + .get_crtc = nv50_dac_crtc_get, + .detect = nv50_dac_detect, + .disable = nv50_dac_disconnect +}; + +static void +nv50_dac_destroy(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + + if (!encoder) + return; + + NV_DEBUG(drm, "\n"); + + drm_encoder_cleanup(encoder); + kfree(nv_encoder); +} + +static const struct drm_encoder_funcs nv50_dac_encoder_funcs = { + .destroy = nv50_dac_destroy, +}; + +int +nv50_dac_create(struct drm_connector *connector, struct dcb_output *entry) +{ + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + encoder = to_drm_encoder(nv_encoder); + + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; + + drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs, + DRM_MODE_ENCODER_DAC); + drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + + drm_mode_connector_attach_encoder(connector, encoder); + return 0; +} + diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_display.c b/trunk/drivers/gpu/drm/nouveau/nv50_display.c index 35874085a61e..f97b42cbb6bb 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_display.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_display.c @@ -1,2058 +1,969 @@ - /* - * Copyright 2011 Red Hat Inc. +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * - * Authors: Ben Skeggs */ -#include - -#include -#include - #include "nouveau_drm.h" #include "nouveau_dma.h" -#include "nouveau_gem.h" -#include "nouveau_connector.h" -#include "nouveau_encoder.h" + +#include "nv50_display.h" #include "nouveau_crtc.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_fbcon.h" +#include #include "nouveau_fence.h" -#include "nv50_display.h" -#include #include -#include - #include -#include -#include - -#define EVO_DMA_NR 9 - -#define EVO_MASTER (0x00) -#define EVO_FLIP(c) (0x01 + (c)) -#define EVO_OVLY(c) (0x05 + (c)) -#define EVO_OIMM(c) (0x09 + (c)) -#define EVO_CURS(c) (0x0d + (c)) - -/* offsets in shared sync bo of various structures */ -#define EVO_SYNC(c, o) ((c) * 0x0100 + (o)) -#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00) -#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00) -#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10) - -#define EVO_CORE_HANDLE (0xd1500000) -#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i)) -#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff)) -#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) | \ - (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8)) - -/****************************************************************************** - * EVO channel - *****************************************************************************/ - -struct nv50_chan { - struct nouveau_object *user; - u32 handle; -}; - -static int -nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head, - void *data, u32 size, struct nv50_chan *chan) + +static void nv50_display_bh(unsigned long); + +static inline int +nv50_sor_nr(struct drm_device *dev) { - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - const u32 oclass = EVO_CHAN_OCLASS(bclass, core); - const u32 handle = EVO_CHAN_HANDLE(bclass, head); - int ret; + struct nouveau_device *device = nouveau_dev(dev); - ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle, - oclass, data, size, &chan->user); - if (ret) - return ret; + if (device->chipset < 0x90 || + device->chipset == 0x92 || + device->chipset == 0xa0) + return 2; - chan->handle = handle; - return 0; + return 4; } -static void -nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan) +u32 +nv50_display_active_crtcs(struct drm_device *dev) { - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - if (chan->handle) - nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle); -} + struct nouveau_device *device = nouveau_dev(dev); + u32 mask = 0; + int i; -/****************************************************************************** - * PIO EVO channel - *****************************************************************************/ + if (device->chipset < 0x90 || + device->chipset == 0x92 || + device->chipset == 0xa0) { + for (i = 0; i < 2; i++) + mask |= nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i)); + } else { + for (i = 0; i < 4; i++) + mask |= nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i)); + } -struct nv50_pioc { - struct nv50_chan base; -}; + for (i = 0; i < 3; i++) + mask |= nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); -static void -nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc) + return mask & 3; +} + +int +nv50_display_early_init(struct drm_device *dev) { - nv50_chan_destroy(core, &pioc->base); + return 0; } -static int -nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head, - void *data, u32 size, struct nv50_pioc *pioc) +void +nv50_display_late_takedown(struct drm_device *dev) { - return nv50_chan_create(core, bclass, head, data, size, &pioc->base); } -/****************************************************************************** - * DMA EVO channel - *****************************************************************************/ +int +nv50_display_sync(struct drm_device *dev) +{ + struct nv50_display *disp = nv50_display(dev); + struct nouveau_channel *evo = disp->master; + int ret; -struct nv50_dmac { - struct nv50_chan base; - dma_addr_t handle; - u32 *ptr; -}; + ret = RING_SPACE(evo, 6); + if (ret == 0) { + BEGIN_NV04(evo, 0, 0x0084, 1); + OUT_RING (evo, 0x80000000); + BEGIN_NV04(evo, 0, 0x0080, 1); + OUT_RING (evo, 0); + BEGIN_NV04(evo, 0, 0x0084, 1); + OUT_RING (evo, 0x00000000); -static void -nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac) -{ - if (dmac->ptr) { - struct pci_dev *pdev = nv_device(core)->pdev; - pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle); + nv_wo32(disp->ramin, 0x2000, 0x00000000); + FIRE_RING (evo); + + if (nv_wait_ne(disp->ramin, 0x2000, 0xffffffff, 0x00000000)) + return 0; } - nv50_chan_destroy(core, &dmac->base); + return 0; } -static int -nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent) +int +nv50_display_init(struct drm_device *dev) { - struct nouveau_fb *pfb = nouveau_fb(core); - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - struct nouveau_object *object; - int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NV50_DMA_CONF0_ENABLE | - NV50_DMA_CONF0_PART_256, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_channel *evo; + int ret, i; + u32 val; - ret = nouveau_object_new(client, parent, NvEvoFB16, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 | - NV50_DMA_CONF0_PART_256, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; + NV_DEBUG(drm, "\n"); - ret = nouveau_object_new(client, parent, NvEvoFB32, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a | - NV50_DMA_CONF0_PART_256, - }, sizeof(struct nv_dma_class), &object); - return ret; -} + nv_wr32(device, 0x00610184, nv_rd32(device, 0x00614004)); -static int -nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent) -{ - struct nouveau_fb *pfb = nouveau_fb(core); - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - struct nouveau_object *object; - int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NVC0_DMA_CONF0_ENABLE, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; + /* + * I think the 0x006101XX range is some kind of main control area + * that enables things. + */ + /* CRTC? */ + for (i = 0; i < 2; i++) { + val = nv_rd32(device, 0x00616100 + (i * 0x800)); + nv_wr32(device, 0x00610190 + (i * 0x10), val); + val = nv_rd32(device, 0x00616104 + (i * 0x800)); + nv_wr32(device, 0x00610194 + (i * 0x10), val); + val = nv_rd32(device, 0x00616108 + (i * 0x800)); + nv_wr32(device, 0x00610198 + (i * 0x10), val); + val = nv_rd32(device, 0x0061610c + (i * 0x800)); + nv_wr32(device, 0x0061019c + (i * 0x10), val); + } + + /* DAC */ + for (i = 0; i < 3; i++) { + val = nv_rd32(device, 0x0061a000 + (i * 0x800)); + nv_wr32(device, 0x006101d0 + (i * 0x04), val); + } + + /* SOR */ + for (i = 0; i < nv50_sor_nr(dev); i++) { + val = nv_rd32(device, 0x0061c000 + (i * 0x800)); + nv_wr32(device, 0x006101e0 + (i * 0x04), val); + } + + /* EXT */ + for (i = 0; i < 3; i++) { + val = nv_rd32(device, 0x0061e000 + (i * 0x800)); + nv_wr32(device, 0x006101f0 + (i * 0x04), val); + } + + for (i = 0; i < 3; i++) { + nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 | + NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); + nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001); + } - ret = nouveau_object_new(client, parent, NvEvoFB16, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe, - }, sizeof(struct nv_dma_class), &object); + /* The precise purpose is unknown, i suspect it has something to do + * with text mode. + */ + if (nv_rd32(device, NV50_PDISPLAY_INTR_1) & 0x100) { + nv_wr32(device, NV50_PDISPLAY_INTR_1, 0x100); + nv_wr32(device, 0x006194e8, nv_rd32(device, 0x006194e8) & ~1); + if (!nv_wait(device, 0x006194e8, 2, 0)) { + NV_ERROR(drm, "timeout: (0x6194e8 & 2) != 0\n"); + NV_ERROR(drm, "0x6194e8 = 0x%08x\n", + nv_rd32(device, 0x6194e8)); + return -EBUSY; + } + } + + for (i = 0; i < 2; i++) { + nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); + if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { + NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n"); + NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n", + nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); + return -EBUSY; + } + + nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON); + if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) { + NV_ERROR(drm, "timeout: " + "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i); + NV_ERROR(drm, "CURSOR_CTRL2(%d) = 0x%08x\n", i, + nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); + return -EBUSY; + } + } + + nv_wr32(device, NV50_PDISPLAY_PIO_CTRL, 0x00000000); + nv_mask(device, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000); + nv_wr32(device, NV50_PDISPLAY_INTR_EN_0, 0x00000000); + nv_mask(device, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000); + nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, + NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 | + NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 | + NV50_PDISPLAY_INTR_EN_1_CLK_UNK40); + + ret = nv50_evo_init(dev); if (ret) return ret; + evo = nv50_display(dev)->master; - ret = nouveau_object_new(client, parent, NvEvoFB32, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe, - }, sizeof(struct nv_dma_class), &object); - return ret; -} + nv_wr32(device, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9); -static int -nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent) -{ - struct nouveau_fb *pfb = nouveau_fb(core); - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - struct nouveau_object *object; - int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NVD0_DMA_CONF0_ENABLE | - NVD0_DMA_CONF0_PAGE_LP, - }, sizeof(struct nv_dma_class), &object); + ret = RING_SPACE(evo, 3); if (ret) return ret; + BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2); + OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED); + OUT_RING (evo, NvEvoSync); - ret = nouveau_object_new(client, parent, NvEvoFB32, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe | - NVD0_DMA_CONF0_PAGE_LP, - }, sizeof(struct nv_dma_class), &object); - return ret; + return nv50_display_sync(dev); } -static int -nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head, - void *data, u32 size, u64 syncbuf, - struct nv50_dmac *dmac) +void +nv50_display_fini(struct drm_device *dev) { - struct nouveau_fb *pfb = nouveau_fb(core); - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - struct nouveau_object *object; - u32 pushbuf = *(u32 *)data; - int ret; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nouveau_dev(dev); + struct nv50_display *disp = nv50_display(dev); + struct nouveau_channel *evo = disp->master; + struct drm_crtc *drm_crtc; + int ret, i; - dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE, - &dmac->handle); - if (!dmac->ptr) - return -ENOMEM; + NV_DEBUG(drm, "\n"); - ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf, - NV_DMA_FROM_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_PCI_US | - NV_DMA_ACCESS_RD, - .start = dmac->handle + 0x0000, - .limit = dmac->handle + 0x0fff, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; + list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); - ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base); - if (ret) - return ret; + nv50_crtc_blank(crtc, true); + } - ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = syncbuf + 0x0000, - .limit = syncbuf + 0x0fff, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; + ret = RING_SPACE(evo, 2); + if (ret == 0) { + BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING(evo, 0); + } + FIRE_RING(evo); - ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram.size - 1, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; + /* Almost like ack'ing a vblank interrupt, maybe in the spirit of + * cleaning up? + */ + list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); + uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index); - if (nv_device(core)->card_type < NV_C0) - ret = nv50_dmac_create_fbdma(core, dmac->base.handle); - else - if (nv_device(core)->card_type < NV_D0) - ret = nvc0_dmac_create_fbdma(core, dmac->base.handle); - else - ret = nvd0_dmac_create_fbdma(core, dmac->base.handle); - return ret; -} + if (!crtc->base.enabled) + continue; -struct nv50_mast { - struct nv50_dmac base; -}; - -struct nv50_curs { - struct nv50_pioc base; -}; - -struct nv50_sync { - struct nv50_dmac base; - struct { - u32 offset; - u16 value; - } sem; -}; - -struct nv50_ovly { - struct nv50_dmac base; -}; - -struct nv50_oimm { - struct nv50_pioc base; -}; - -struct nv50_head { - struct nouveau_crtc base; - struct nv50_curs curs; - struct nv50_sync sync; - struct nv50_ovly ovly; - struct nv50_oimm oimm; -}; - -#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c)) -#define nv50_curs(c) (&nv50_head(c)->curs) -#define nv50_sync(c) (&nv50_head(c)->sync) -#define nv50_ovly(c) (&nv50_head(c)->ovly) -#define nv50_oimm(c) (&nv50_head(c)->oimm) -#define nv50_chan(c) (&(c)->base.base) -#define nv50_vers(c) nv_mclass(nv50_chan(c)->user) - -struct nv50_disp { - struct nouveau_object *core; - struct nv50_mast mast; - - u32 modeset; - - struct nouveau_bo *sync; -}; - -static struct nv50_disp * -nv50_disp(struct drm_device *dev) -{ - return nouveau_display(dev)->priv; -} + nv_wr32(device, NV50_PDISPLAY_INTR_1, mask); + if (!nv_wait(device, NV50_PDISPLAY_INTR_1, mask, mask)) { + NV_ERROR(drm, "timeout: (0x610024 & 0x%08x) == " + "0x%08x\n", mask, mask); + NV_ERROR(drm, "0x610024 = 0x%08x\n", + nv_rd32(device, NV50_PDISPLAY_INTR_1)); + } + } + + for (i = 0; i < 2; i++) { + nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0); + if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { + NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n"); + NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n", + nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); + } + } -#define nv50_mast(d) (&nv50_disp(d)->mast) + nv50_evo_fini(dev); -static struct drm_crtc * -nv50_display_crtc_get(struct drm_encoder *encoder) -{ - return nouveau_encoder(encoder)->crtc; + for (i = 0; i < 3; i++) { + if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(i), + NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { + NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i); + NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", i, + nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(i))); + } + } + + /* disable interrupts. */ + nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, 0x00000000); } -/****************************************************************************** - * EVO channel helpers - *****************************************************************************/ -static u32 * -evo_wait(void *evoc, int nr) +int +nv50_display_create(struct drm_device *dev) { - struct nv50_dmac *dmac = evoc; - u32 put = nv_ro32(dmac->base.user, 0x0000) / 4; + struct nouveau_drm *drm = nouveau_drm(dev); + struct dcb_table *dcb = &drm->vbios.dcb; + struct drm_connector *connector, *ct; + struct nv50_display *priv; + int ret, i; + + NV_DEBUG(drm, "\n"); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + nouveau_display(dev)->priv = priv; + nouveau_display(dev)->dtor = nv50_display_destroy; + nouveau_display(dev)->init = nv50_display_init; + nouveau_display(dev)->fini = nv50_display_fini; + + /* Create CRTC objects */ + for (i = 0; i < 2; i++) { + ret = nv50_crtc_create(dev, i); + if (ret) + return ret; + } - if (put + nr >= (PAGE_SIZE / 4) - 8) { - dmac->ptr[put] = 0x20000000; + /* We setup the encoders from the BIOS table */ + for (i = 0 ; i < dcb->entries; i++) { + struct dcb_output *entry = &dcb->entry[i]; - nv_wo32(dmac->base.user, 0x0000, 0x00000000); - if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) { - NV_ERROR(dmac->base.user, "channel stalled\n"); - return NULL; + if (entry->location != DCB_LOC_ON_CHIP) { + NV_WARN(drm, "Off-chip encoder %d/%d unsupported\n", + entry->type, ffs(entry->or) - 1); + continue; } - put = 0; + connector = nouveau_connector_create(dev, entry->connector); + if (IS_ERR(connector)) + continue; + + switch (entry->type) { + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_LVDS: + case DCB_OUTPUT_DP: + nv50_sor_create(connector, entry); + break; + case DCB_OUTPUT_ANALOG: + nv50_dac_create(connector, entry); + break; + default: + NV_WARN(drm, "DCB encoder %d unknown\n", entry->type); + continue; + } } - return dmac->ptr + put; -} + list_for_each_entry_safe(connector, ct, + &dev->mode_config.connector_list, head) { + if (!connector->encoder_ids[0]) { + NV_WARN(drm, "%s has no encoders, removing\n", + drm_get_connector_name(connector)); + connector->funcs->destroy(connector); + } + } -static void -evo_kick(u32 *push, void *evoc) -{ - struct nv50_dmac *dmac = evoc; - nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2); -} + tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev); -#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m)) -#define evo_data(p,d) *((p)++) = (d) + ret = nv50_evo_create(dev); + if (ret) { + nv50_display_destroy(dev); + return ret; + } -static bool -evo_sync_wait(void *data) -{ - return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000; + return 0; } -static int -evo_sync(struct drm_device *dev) +void +nv50_display_destroy(struct drm_device *dev) { - struct nouveau_device *device = nouveau_dev(dev); - struct nv50_disp *disp = nv50_disp(dev); - struct nv50_mast *mast = nv50_mast(dev); - u32 *push = evo_wait(mast, 8); - if (push) { - nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000); - evo_mthd(push, 0x0084, 1); - evo_data(push, 0x80000000 | EVO_MAST_NTFY); - evo_mthd(push, 0x0080, 2); - evo_data(push, 0x00000000); - evo_data(push, 0x00000000); - evo_kick(push, mast); - if (nv_wait_cb(device, evo_sync_wait, disp->sync)) - return 0; - } + struct nv50_display *disp = nv50_display(dev); - return -EBUSY; + nv50_evo_destroy(dev); + kfree(disp); } -/****************************************************************************** - * Page flipping channel - *****************************************************************************/ struct nouveau_bo * nv50_display_crtc_sema(struct drm_device *dev, int crtc) { - return nv50_disp(dev)->sync; + return nv50_display(dev)->crtc[crtc].sem.bo; } void nv50_display_flip_stop(struct drm_crtc *crtc) { - struct nv50_sync *sync = nv50_sync(crtc); - u32 *push; - - push = evo_wait(sync, 8); - if (push) { - evo_mthd(push, 0x0084, 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x0094, 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x00c0, 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); - evo_kick(push, sync); + struct nv50_display *disp = nv50_display(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index]; + struct nouveau_channel *evo = dispc->sync; + int ret; + + ret = RING_SPACE(evo, 8); + if (ret) { + WARN_ON(1); + return; } + + BEGIN_NV04(evo, 0, 0x0084, 1); + OUT_RING (evo, 0x00000000); + BEGIN_NV04(evo, 0, 0x0094, 1); + OUT_RING (evo, 0x00000000); + BEGIN_NV04(evo, 0, 0x00c0, 1); + OUT_RING (evo, 0x00000000); + BEGIN_NV04(evo, 0, 0x0080, 1); + OUT_RING (evo, 0x00000000); + FIRE_RING (evo); } int nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct nouveau_channel *chan, u32 swap_interval) + struct nouveau_channel *chan) { + struct nouveau_drm *drm = nouveau_drm(crtc->dev); struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); - struct nv50_disp *disp = nv50_disp(crtc->dev); + struct nv50_display *disp = nv50_display(crtc->dev); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - struct nv50_sync *sync = nv50_sync(crtc); - u32 *push; + struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index]; + struct nouveau_channel *evo = dispc->sync; int ret; - swap_interval <<= 4; - if (swap_interval == 0) - swap_interval |= 0x100; - - push = evo_wait(sync, 128); - if (unlikely(push == NULL)) - return -EBUSY; + ret = RING_SPACE(evo, chan ? 25 : 27); + if (unlikely(ret)) + return ret; /* synchronise with the rendering channel, if necessary */ if (likely(chan)) { ret = RING_SPACE(chan, 10); - if (ret) + if (ret) { + WIND_RING(evo); return ret; + } - if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { - BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); + if (nv_device(drm->device)->chipset < 0xc0) { + BEGIN_NV04(chan, 0, 0x0060, 2); OUT_RING (chan, NvEvoSema0 + nv_crtc->index); - OUT_RING (chan, sync->sem.offset); - BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); - OUT_RING (chan, 0xf00d0000 | sync->sem.value); - BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2); - OUT_RING (chan, sync->sem.offset ^ 0x10); + OUT_RING (chan, dispc->sem.offset); + BEGIN_NV04(chan, 0, 0x006c, 1); + OUT_RING (chan, 0xf00d0000 | dispc->sem.value); + BEGIN_NV04(chan, 0, 0x0064, 2); + OUT_RING (chan, dispc->sem.offset ^ 0x10); OUT_RING (chan, 0x74b1e000); - BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); - if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS) + BEGIN_NV04(chan, 0, 0x0060, 1); + if (nv_device(drm->device)->chipset < 0x84) OUT_RING (chan, NvSema); else OUT_RING (chan, chan->vram); } else { u64 offset = nvc0_fence_crtc(chan, nv_crtc->index); - offset += sync->sem.offset; - - BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + offset += dispc->sem.offset; + BEGIN_NVC0(chan, 0, 0x0010, 4); OUT_RING (chan, upper_32_bits(offset)); OUT_RING (chan, lower_32_bits(offset)); - OUT_RING (chan, 0xf00d0000 | sync->sem.value); + OUT_RING (chan, 0xf00d0000 | dispc->sem.value); OUT_RING (chan, 0x1002); - BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + BEGIN_NVC0(chan, 0, 0x0010, 4); OUT_RING (chan, upper_32_bits(offset)); OUT_RING (chan, lower_32_bits(offset ^ 0x10)); OUT_RING (chan, 0x74b1e000); OUT_RING (chan, 0x1001); } - FIRE_RING (chan); } else { - nouveau_bo_wr32(disp->sync, sync->sem.offset / 4, - 0xf00d0000 | sync->sem.value); - evo_sync(crtc->dev); + nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4, + 0xf00d0000 | dispc->sem.value); } - /* queue the flip */ - evo_mthd(push, 0x0100, 1); - evo_data(push, 0xfffe0000); - evo_mthd(push, 0x0084, 1); - evo_data(push, swap_interval); - if (!(swap_interval & 0x00000100)) { - evo_mthd(push, 0x00e0, 1); - evo_data(push, 0x40000000); - } - evo_mthd(push, 0x0088, 4); - evo_data(push, sync->sem.offset); - evo_data(push, 0xf00d0000 | sync->sem.value); - evo_data(push, 0x74b1e000); - evo_data(push, NvEvoSync); - evo_mthd(push, 0x00a0, 2); - evo_data(push, 0x00000000); - evo_data(push, 0x00000000); - evo_mthd(push, 0x00c0, 1); - evo_data(push, nv_fb->r_dma); - evo_mthd(push, 0x0110, 2); - evo_data(push, 0x00000000); - evo_data(push, 0x00000000); - if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) { - evo_mthd(push, 0x0800, 5); - evo_data(push, nv_fb->nvbo->bo.offset >> 8); - evo_data(push, 0); - evo_data(push, (fb->height << 16) | fb->width); - evo_data(push, nv_fb->r_pitch); - evo_data(push, nv_fb->r_format); + /* queue the flip on the crtc's "display sync" channel */ + BEGIN_NV04(evo, 0, 0x0100, 1); + OUT_RING (evo, 0xfffe0000); + if (chan) { + BEGIN_NV04(evo, 0, 0x0084, 1); + OUT_RING (evo, 0x00000100); } else { - evo_mthd(push, 0x0400, 5); - evo_data(push, nv_fb->nvbo->bo.offset >> 8); - evo_data(push, 0); - evo_data(push, (fb->height << 16) | fb->width); - evo_data(push, nv_fb->r_pitch); - evo_data(push, nv_fb->r_format); - } - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); - evo_kick(push, sync); - - sync->sem.offset ^= 0x10; - sync->sem.value++; + BEGIN_NV04(evo, 0, 0x0084, 1); + OUT_RING (evo, 0x00000010); + /* allows gamma somehow, PDISP will bitch at you if + * you don't wait for vblank before changing this.. + */ + BEGIN_NV04(evo, 0, 0x00e0, 1); + OUT_RING (evo, 0x40000000); + } + BEGIN_NV04(evo, 0, 0x0088, 4); + OUT_RING (evo, dispc->sem.offset); + OUT_RING (evo, 0xf00d0000 | dispc->sem.value); + OUT_RING (evo, 0x74b1e000); + OUT_RING (evo, NvEvoSync); + BEGIN_NV04(evo, 0, 0x00a0, 2); + OUT_RING (evo, 0x00000000); + OUT_RING (evo, 0x00000000); + BEGIN_NV04(evo, 0, 0x00c0, 1); + OUT_RING (evo, nv_fb->r_dma); + BEGIN_NV04(evo, 0, 0x0110, 2); + OUT_RING (evo, 0x00000000); + OUT_RING (evo, 0x00000000); + BEGIN_NV04(evo, 0, 0x0800, 5); + OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8); + OUT_RING (evo, 0); + OUT_RING (evo, (fb->height << 16) | fb->width); + OUT_RING (evo, nv_fb->r_pitch); + OUT_RING (evo, nv_fb->r_format); + BEGIN_NV04(evo, 0, 0x0080, 1); + OUT_RING (evo, 0x00000000); + FIRE_RING (evo); + + dispc->sem.offset ^= 0x10; + dispc->sem.value++; return 0; } -/****************************************************************************** - * CRTC - *****************************************************************************/ -static int -nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update) +static u16 +nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb, + u32 mc, int pxclk) { - struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - struct nouveau_connector *nv_connector; - struct drm_connector *connector; - u32 *push, mode = 0x00; - - nv_connector = nouveau_crtc_connector_get(nv_crtc); - connector = &nv_connector->base; - if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) { - if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3) - mode = DITHERING_MODE_DYNAMIC2X2; - } else { - mode = nv_connector->dithering_mode; - } + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_connector *nv_connector = NULL; + struct drm_encoder *encoder; + struct nvbios *bios = &drm->vbios; + u32 script = 0, or; - if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) { - if (connector->display_info.bpc >= 8) - mode |= DITHERING_DEPTH_8BPC; - } else { - mode |= nv_connector->dithering_depth; - } + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - push = evo_wait(mast, 4); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1); - evo_data(push, mode); - } else - if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1); - evo_data(push, mode); - } else { - evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1); - evo_data(push, mode); - } + if (nv_encoder->dcb != dcb) + continue; - if (update) { - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); - } - evo_kick(push, mast); + nv_connector = nouveau_encoder_connector_get(nv_encoder); + break; } - return 0; -} - -static int -nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update) -{ - struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - struct drm_display_mode *omode, *umode = &nv_crtc->base.mode; - struct drm_crtc *crtc = &nv_crtc->base; - struct nouveau_connector *nv_connector; - int mode = DRM_MODE_SCALE_NONE; - u32 oX, oY, *push; - - /* start off at the resolution we programmed the crtc for, this - * effectively handles NONE/FULL scaling - */ - nv_connector = nouveau_crtc_connector_get(nv_crtc); - if (nv_connector && nv_connector->native_mode) - mode = nv_connector->scaling_mode; - - if (mode != DRM_MODE_SCALE_NONE) - omode = nv_connector->native_mode; - else - omode = umode; - - oX = omode->hdisplay; - oY = omode->vdisplay; - if (omode->flags & DRM_MODE_FLAG_DBLSCAN) - oY *= 2; - - /* add overscan compensation if necessary, will keep the aspect - * ratio the same as the backend mode unless overridden by the - * user setting both hborder and vborder properties. - */ - if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON || - (nv_connector->underscan == UNDERSCAN_AUTO && - nv_connector->edid && - drm_detect_hdmi_monitor(nv_connector->edid)))) { - u32 bX = nv_connector->underscan_hborder; - u32 bY = nv_connector->underscan_vborder; - u32 aspect = (oY << 19) / oX; - - if (bX) { - oX -= (bX * 2); - if (bY) oY -= (bY * 2); - else oY = ((oX * aspect) + (aspect / 2)) >> 19; + or = ffs(dcb->or) - 1; + switch (dcb->type) { + case DCB_OUTPUT_LVDS: + script = (mc >> 8) & 0xf; + if (bios->fp_no_ddc) { + if (bios->fp.dual_link) + script |= 0x0100; + if (bios->fp.if_is_24bit) + script |= 0x0200; } else { - oX -= (oX >> 4) + 32; - if (bY) oY -= (bY * 2); - else oY = ((oX * aspect) + (aspect / 2)) >> 19; - } - } + /* determine number of lvds links */ + if (nv_connector && nv_connector->edid && + nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { + /* http://www.spwg.org */ + if (((u8 *)nv_connector->edid)[121] == 2) + script |= 0x0100; + } else + if (pxclk >= bios->fp.duallink_transition_clk) { + script |= 0x0100; + } - /* handle CENTER/ASPECT scaling, taking into account the areas - * removed already for overscan compensation - */ - switch (mode) { - case DRM_MODE_SCALE_CENTER: - oX = min((u32)umode->hdisplay, oX); - oY = min((u32)umode->vdisplay, oY); - /* fall-through */ - case DRM_MODE_SCALE_ASPECT: - if (oY < oX) { - u32 aspect = (umode->hdisplay << 19) / umode->vdisplay; - oX = ((oY * aspect) + (aspect / 2)) >> 19; - } else { - u32 aspect = (umode->vdisplay << 19) / umode->hdisplay; - oY = ((oX * aspect) + (aspect / 2)) >> 19; + /* determine panel depth */ + if (script & 0x0100) { + if (bios->fp.strapless_is_24bit & 2) + script |= 0x0200; + } else { + if (bios->fp.strapless_is_24bit & 1) + script |= 0x0200; + } + + if (nv_connector && nv_connector->edid && + (nv_connector->edid->revision >= 4) && + (nv_connector->edid->input & 0x70) >= 0x20) + script |= 0x0200; } break; + case DCB_OUTPUT_TMDS: + script = (mc >> 8) & 0xf; + if (pxclk >= 165000) + script |= 0x0100; + break; + case DCB_OUTPUT_DP: + script = (mc >> 8) & 0xf; + break; + case DCB_OUTPUT_ANALOG: + script = 0xff; + break; default: + NV_ERROR(drm, "modeset on unsupported output type!\n"); break; } - push = evo_wait(mast, 8); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - /*XXX: SCALE_CTRL_ACTIVE??? */ - evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2); - evo_data(push, (oY << 16) | oX); - evo_data(push, (oY << 16) | oX); - evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1); - evo_data(push, umode->vdisplay << 16 | umode->hdisplay); - } else { - evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3); - evo_data(push, (oY << 16) | oX); - evo_data(push, (oY << 16) | oX); - evo_data(push, (oY << 16) | oX); - evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1); - evo_data(push, umode->vdisplay << 16 | umode->hdisplay); - } + return script; +} - evo_kick(push, mast); +static void +nv50_display_unk10_handler(struct drm_device *dev) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv50_display *disp = nv50_display(dev); + u32 unk30 = nv_rd32(device, 0x610030), mc; + int i, crtc, or = 0, type = DCB_OUTPUT_ANY; - if (update) { - nv50_display_flip_stop(crtc); - nv50_display_flip_next(crtc, crtc->fb, NULL, 1); - } - } + NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30); + disp->irq.dcb = NULL; - return 0; -} + nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) & ~8); -static int -nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update) -{ - struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - u32 *push, hue, vib; - int adj; - - adj = (nv_crtc->color_vibrance > 0) ? 50 : 0; - vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff; - hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff; - - push = evo_wait(mast, 16); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1); - evo_data(push, (hue << 20) | (vib << 8)); - } else { - evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1); - evo_data(push, (hue << 20) | (vib << 8)); - } + /* Determine which CRTC we're dealing with, only 1 ever will be + * signalled at the same time with the current nouveau code. + */ + crtc = ffs((unk30 & 0x00000060) >> 5) - 1; + if (crtc < 0) + goto ack; + + /* Nothing needs to be done for the encoder */ + crtc = ffs((unk30 & 0x00000180) >> 7) - 1; + if (crtc < 0) + goto ack; + + /* Find which encoder was connected to the CRTC */ + for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) { + mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); + NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc); + if (!(mc & (1 << crtc))) + continue; - if (update) { - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); + switch ((mc & 0x00000f00) >> 8) { + case 0: type = DCB_OUTPUT_ANALOG; break; + case 1: type = DCB_OUTPUT_TV; break; + default: + NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc); + goto ack; } - evo_kick(push, mast); + + or = i; } - return 0; -} + for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { + if (nv_device(drm->device)->chipset < 0x90 || + nv_device(drm->device)->chipset == 0x92 || + nv_device(drm->device)->chipset == 0xa0) + mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i)); + else + mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i)); -static int -nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, - int x, int y, bool update) -{ - struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb); - struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - u32 *push; - - push = evo_wait(mast, 16); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1); - evo_data(push, nvfb->nvbo->bo.offset >> 8); - evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3); - evo_data(push, (fb->height << 16) | fb->width); - evo_data(push, nvfb->r_pitch); - evo_data(push, nvfb->r_format); - evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1); - evo_data(push, (y << 16) | x); - if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) { - evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); - evo_data(push, nvfb->r_dma); - } - } else { - evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1); - evo_data(push, nvfb->nvbo->bo.offset >> 8); - evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4); - evo_data(push, (fb->height << 16) | fb->width); - evo_data(push, nvfb->r_pitch); - evo_data(push, nvfb->r_format); - evo_data(push, nvfb->r_dma); - evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1); - evo_data(push, (y << 16) | x); - } + NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc); + if (!(mc & (1 << crtc))) + continue; - if (update) { - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); + switch ((mc & 0x00000f00) >> 8) { + case 0: type = DCB_OUTPUT_LVDS; break; + case 1: type = DCB_OUTPUT_TMDS; break; + case 2: type = DCB_OUTPUT_TMDS; break; + case 5: type = DCB_OUTPUT_TMDS; break; + case 8: type = DCB_OUTPUT_DP; break; + case 9: type = DCB_OUTPUT_DP; break; + default: + NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc); + goto ack; } - evo_kick(push, mast); + + or = i; } - nv_crtc->fb.tile_flags = nvfb->r_dma; - return 0; -} + /* There was no encoder to disable */ + if (type == DCB_OUTPUT_ANY) + goto ack; -static void -nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc) -{ - struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - u32 *push = evo_wait(mast, 16); - if (push) { - if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { - evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); - evo_data(push, 0x85000000); - evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); - } else - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); - evo_data(push, 0x85000000); - evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); - evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); - evo_data(push, NvEvoVRAM); - } else { - evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2); - evo_data(push, 0x85000000); - evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); - evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); - evo_data(push, NvEvoVRAM); - } - evo_kick(push, mast); - } -} + /* Disable the encoder */ + for (i = 0; i < drm->vbios.dcb.entries; i++) { + struct dcb_output *dcb = &drm->vbios.dcb.entry[i]; -static void -nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc) -{ - struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - u32 *push = evo_wait(mast, 16); - if (push) { - if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { - evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x05000000); - } else - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x05000000); - evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x00000000); - } else { - evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1); - evo_data(push, 0x05000000); - evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); - evo_data(push, 0x00000000); + if (dcb->type == type && (dcb->or & (1 << or))) { + nouveau_bios_run_display_table(dev, 0, -1, dcb, -1); + disp->irq.dcb = dcb; + goto ack; } - evo_kick(push, mast); } + + NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc); +ack: + nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10); + nv_wr32(device, 0x610030, 0x80000000); } static void -nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update) +nv50_display_unk20_handler(struct drm_device *dev) { - struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - - if (show) - nv50_crtc_cursor_show(nv_crtc); - else - nv50_crtc_cursor_hide(nv_crtc); - - if (update) { - u32 *push = evo_wait(mast, 2); - if (push) { - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); - evo_kick(push, mast); + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv50_display *disp = nv50_display(dev); + u32 unk30 = nv_rd32(device, 0x610030), tmp, pclk, script, mc = 0; + struct dcb_output *dcb; + int i, crtc, or = 0, type = DCB_OUTPUT_ANY; + + NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30); + dcb = disp->irq.dcb; + if (dcb) { + nouveau_bios_run_display_table(dev, 0, -2, dcb, -1); + disp->irq.dcb = NULL; + } + + /* CRTC clock change requested? */ + crtc = ffs((unk30 & 0x00000600) >> 9) - 1; + if (crtc >= 0) { + pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)); + pclk &= 0x003fffff; + if (pclk) + nv50_crtc_set_clock(dev, crtc, pclk); + + tmp = nv_rd32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc)); + tmp &= ~0x000000f; + nv_wr32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp); + } + + /* Nothing needs to be done for the encoder */ + crtc = ffs((unk30 & 0x00000180) >> 7) - 1; + if (crtc < 0) + goto ack; + pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff; + + /* Find which encoder is connected to the CRTC */ + for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) { + mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_P(i)); + NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc); + if (!(mc & (1 << crtc))) + continue; + + switch ((mc & 0x00000f00) >> 8) { + case 0: type = DCB_OUTPUT_ANALOG; break; + case 1: type = DCB_OUTPUT_TV; break; + default: + NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc); + goto ack; } + + or = i; } -} -static void -nv50_crtc_dpms(struct drm_crtc *crtc, int mode) -{ -} + for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { + if (nv_device(drm->device)->chipset < 0x90 || + nv_device(drm->device)->chipset == 0x92 || + nv_device(drm->device)->chipset == 0xa0) + mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_P(i)); + else + mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_P(i)); -static void -nv50_crtc_prepare(struct drm_crtc *crtc) -{ - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - struct nv50_mast *mast = nv50_mast(crtc->dev); - u32 *push; - - nv50_display_flip_stop(crtc); - - push = evo_wait(mast, 2); - if (push) { - if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { - evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x40000000); - } else - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x40000000); - evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x00000000); - } else { - evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1); - evo_data(push, 0x03000000); - evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); - evo_data(push, 0x00000000); + NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc); + if (!(mc & (1 << crtc))) + continue; + + switch ((mc & 0x00000f00) >> 8) { + case 0: type = DCB_OUTPUT_LVDS; break; + case 1: type = DCB_OUTPUT_TMDS; break; + case 2: type = DCB_OUTPUT_TMDS; break; + case 5: type = DCB_OUTPUT_TMDS; break; + case 8: type = DCB_OUTPUT_DP; break; + case 9: type = DCB_OUTPUT_DP; break; + default: + NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc); + goto ack; } - evo_kick(push, mast); + or = i; } - nv50_crtc_cursor_show_hide(nv_crtc, false, false); -} - -static void -nv50_crtc_commit(struct drm_crtc *crtc) -{ - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - struct nv50_mast *mast = nv50_mast(crtc->dev); - u32 *push; - - push = evo_wait(mast, 32); - if (push) { - if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { - evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); - evo_data(push, NvEvoVRAM_LP); - evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2); - evo_data(push, 0xc0000000); - evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); - } else - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); - evo_data(push, nv_crtc->fb.tile_flags); - evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2); - evo_data(push, 0xc0000000); - evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); - evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1); - evo_data(push, NvEvoVRAM); - } else { - evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); - evo_data(push, nv_crtc->fb.tile_flags); - evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4); - evo_data(push, 0x83000000); - evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); - evo_data(push, 0x00000000); - evo_data(push, 0x00000000); - evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); - evo_data(push, NvEvoVRAM); - evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1); - evo_data(push, 0xffffff00); - } - - evo_kick(push, mast); - } - - nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true); - nv50_display_flip_next(crtc, crtc->fb, NULL, 1); -} - -static bool -nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - -static int -nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) -{ - struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); - int ret; - - ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); - if (ret) - return ret; - - if (old_fb) { - nvfb = nouveau_framebuffer(old_fb); - nouveau_bo_unpin(nvfb->nvbo); - } - - return 0; -} - -static int -nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, - struct drm_display_mode *mode, int x, int y, - struct drm_framebuffer *old_fb) -{ - struct nv50_mast *mast = nv50_mast(crtc->dev); - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - struct nouveau_connector *nv_connector; - u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1; - u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1; - u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks; - u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks; - u32 vblan2e = 0, vblan2s = 1; - u32 *push; - int ret; - - hactive = mode->htotal; - hsynce = mode->hsync_end - mode->hsync_start - 1; - hbackp = mode->htotal - mode->hsync_end; - hblanke = hsynce + hbackp; - hfrontp = mode->hsync_start - mode->hdisplay; - hblanks = mode->htotal - hfrontp - 1; - - vactive = mode->vtotal * vscan / ilace; - vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1; - vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace; - vblanke = vsynce + vbackp; - vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; - vblanks = vactive - vfrontp - 1; - if (mode->flags & DRM_MODE_FLAG_INTERLACE) { - vblan2e = vactive + vsynce + vbackp; - vblan2s = vblan2e + (mode->vdisplay * vscan / ilace); - vactive = (vactive * 2) + 1; - } - - ret = nv50_crtc_swap_fbs(crtc, old_fb); - if (ret) - return ret; - - push = evo_wait(mast, 64); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2); - evo_data(push, 0x00800000 | mode->clock); - evo_data(push, (ilace == 2) ? 2 : 0); - evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6); - evo_data(push, 0x00000000); - evo_data(push, (vactive << 16) | hactive); - evo_data(push, ( vsynce << 16) | hsynce); - evo_data(push, (vblanke << 16) | hblanke); - evo_data(push, (vblanks << 16) | hblanks); - evo_data(push, (vblan2e << 16) | vblan2s); - evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1); - evo_data(push, 0x00000000); - evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2); - evo_data(push, 0x00000311); - evo_data(push, 0x00000100); - } else { - evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6); - evo_data(push, 0x00000000); - evo_data(push, (vactive << 16) | hactive); - evo_data(push, ( vsynce << 16) | hsynce); - evo_data(push, (vblanke << 16) | hblanke); - evo_data(push, (vblanks << 16) | hblanks); - evo_data(push, (vblan2e << 16) | vblan2s); - evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1); - evo_data(push, 0x00000000); /* ??? */ - evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3); - evo_data(push, mode->clock * 1000); - evo_data(push, 0x00200000); /* ??? */ - evo_data(push, mode->clock * 1000); - evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2); - evo_data(push, 0x00000311); - evo_data(push, 0x00000100); - } - - evo_kick(push, mast); - } - - nv_connector = nouveau_crtc_connector_get(nv_crtc); - nv50_crtc_set_dither(nv_crtc, false); - nv50_crtc_set_scale(nv_crtc, false); - nv50_crtc_set_color_vibrance(nv_crtc, false); - nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false); - return 0; -} - -static int -nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) -{ - struct nouveau_drm *drm = nouveau_drm(crtc->dev); - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - int ret; - - if (!crtc->fb) { - NV_DEBUG(drm, "No FB bound\n"); - return 0; - } - - ret = nv50_crtc_swap_fbs(crtc, old_fb); - if (ret) - return ret; - - nv50_display_flip_stop(crtc); - nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true); - nv50_display_flip_next(crtc, crtc->fb, NULL, 1); - return 0; -} - -static int -nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc, - struct drm_framebuffer *fb, int x, int y, - enum mode_set_atomic state) -{ - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - nv50_display_flip_stop(crtc); - nv50_crtc_set_image(nv_crtc, fb, x, y, true); - return 0; -} - -static void -nv50_crtc_lut_load(struct drm_crtc *crtc) -{ - struct nv50_disp *disp = nv50_disp(crtc->dev); - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); - int i; - - for (i = 0; i < 256; i++) { - u16 r = nv_crtc->lut.r[i] >> 2; - u16 g = nv_crtc->lut.g[i] >> 2; - u16 b = nv_crtc->lut.b[i] >> 2; - - if (nv_mclass(disp->core) < NVD0_DISP_CLASS) { - writew(r + 0x0000, lut + (i * 0x08) + 0); - writew(g + 0x0000, lut + (i * 0x08) + 2); - writew(b + 0x0000, lut + (i * 0x08) + 4); - } else { - writew(r + 0x6000, lut + (i * 0x20) + 0); - writew(g + 0x6000, lut + (i * 0x20) + 2); - writew(b + 0x6000, lut + (i * 0x20) + 4); - } - } -} - -static int -nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, - uint32_t handle, uint32_t width, uint32_t height) -{ - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct drm_gem_object *gem; - struct nouveau_bo *nvbo; - bool visible = (handle != 0); - int i, ret = 0; - - if (visible) { - if (width != 64 || height != 64) - return -EINVAL; - - gem = drm_gem_object_lookup(dev, file_priv, handle); - if (unlikely(!gem)) - return -ENOENT; - nvbo = nouveau_gem_object(gem); - - ret = nouveau_bo_map(nvbo); - if (ret == 0) { - for (i = 0; i < 64 * 64; i++) { - u32 v = nouveau_bo_rd32(nvbo, i); - nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v); - } - nouveau_bo_unmap(nvbo); - } - - drm_gem_object_unreference_unlocked(gem); - } - - if (visible != nv_crtc->cursor.visible) { - nv50_crtc_cursor_show_hide(nv_crtc, visible, true); - nv_crtc->cursor.visible = visible; - } - - return ret; -} - -static int -nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) -{ - struct nv50_curs *curs = nv50_curs(crtc); - struct nv50_chan *chan = nv50_chan(curs); - nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff)); - nv_wo32(chan->user, 0x0080, 0x00000000); - return 0; -} - -static void -nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size) -{ - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - u32 end = max(start + size, (u32)256); - u32 i; - - for (i = start; i < end; i++) { - nv_crtc->lut.r[i] = r[i]; - nv_crtc->lut.g[i] = g[i]; - nv_crtc->lut.b[i] = b[i]; - } - - nv50_crtc_lut_load(crtc); -} - -static void -nv50_crtc_destroy(struct drm_crtc *crtc) -{ - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - struct nv50_disp *disp = nv50_disp(crtc->dev); - struct nv50_head *head = nv50_head(crtc); - nv50_dmac_destroy(disp->core, &head->ovly.base); - nv50_pioc_destroy(disp->core, &head->oimm.base); - nv50_dmac_destroy(disp->core, &head->sync.base); - nv50_pioc_destroy(disp->core, &head->curs.base); - nouveau_bo_unmap(nv_crtc->cursor.nvbo); - if (nv_crtc->cursor.nvbo) - nouveau_bo_unpin(nv_crtc->cursor.nvbo); - nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); - nouveau_bo_unmap(nv_crtc->lut.nvbo); - if (nv_crtc->lut.nvbo) - nouveau_bo_unpin(nv_crtc->lut.nvbo); - nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); - drm_crtc_cleanup(crtc); - kfree(crtc); -} + if (type == DCB_OUTPUT_ANY) + goto ack; -static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = { - .dpms = nv50_crtc_dpms, - .prepare = nv50_crtc_prepare, - .commit = nv50_crtc_commit, - .mode_fixup = nv50_crtc_mode_fixup, - .mode_set = nv50_crtc_mode_set, - .mode_set_base = nv50_crtc_mode_set_base, - .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, - .load_lut = nv50_crtc_lut_load, -}; - -static const struct drm_crtc_funcs nv50_crtc_func = { - .cursor_set = nv50_crtc_cursor_set, - .cursor_move = nv50_crtc_cursor_move, - .gamma_set = nv50_crtc_gamma_set, - .set_config = drm_crtc_helper_set_config, - .destroy = nv50_crtc_destroy, - .page_flip = nouveau_crtc_page_flip, -}; - -static void -nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) -{ -} - -static void -nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) -{ -} - -static int -nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index) -{ - struct nv50_disp *disp = nv50_disp(dev); - struct nv50_head *head; - struct drm_crtc *crtc; - int ret, i; - - head = kzalloc(sizeof(*head), GFP_KERNEL); - if (!head) - return -ENOMEM; - - head->base.index = index; - head->base.set_dither = nv50_crtc_set_dither; - head->base.set_scale = nv50_crtc_set_scale; - head->base.set_color_vibrance = nv50_crtc_set_color_vibrance; - head->base.color_vibrance = 50; - head->base.vibrant_hue = 0; - head->base.cursor.set_offset = nv50_cursor_set_offset; - head->base.cursor.set_pos = nv50_cursor_set_pos; - for (i = 0; i < 256; i++) { - head->base.lut.r[i] = i << 8; - head->base.lut.g[i] = i << 8; - head->base.lut.b[i] = i << 8; + /* Enable the encoder */ + for (i = 0; i < drm->vbios.dcb.entries; i++) { + dcb = &drm->vbios.dcb.entry[i]; + if (dcb->type == type && (dcb->or & (1 << or))) + break; } - crtc = &head->base.base; - drm_crtc_init(dev, crtc, &nv50_crtc_func); - drm_crtc_helper_add(crtc, &nv50_crtc_hfunc); - drm_mode_crtc_set_gamma_size(crtc, 256); - - ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, - 0, 0x0000, NULL, &head->base.lut.nvbo); - if (!ret) { - ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM); - if (!ret) { - ret = nouveau_bo_map(head->base.lut.nvbo); - if (ret) - nouveau_bo_unpin(head->base.lut.nvbo); - } - if (ret) - nouveau_bo_ref(NULL, &head->base.lut.nvbo); + if (i == drm->vbios.dcb.entries) { + NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc); + goto ack; } - if (ret) - goto out; - - nv50_crtc_lut_load(crtc); + script = nv50_display_script_select(dev, dcb, mc, pclk); + nouveau_bios_run_display_table(dev, script, pclk, dcb, -1); - /* allocate cursor resources */ - ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index, - &(struct nv50_display_curs_class) { - .head = index, - }, sizeof(struct nv50_display_curs_class), - &head->curs.base); - if (ret) - goto out; - - ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM, - 0, 0x0000, NULL, &head->base.cursor.nvbo); - if (!ret) { - ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM); - if (!ret) { - ret = nouveau_bo_map(head->base.cursor.nvbo); - if (ret) - nouveau_bo_unpin(head->base.lut.nvbo); - } - if (ret) - nouveau_bo_ref(NULL, &head->base.cursor.nvbo); - } - - if (ret) - goto out; - - /* allocate page flip / sync resources */ - ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index, - &(struct nv50_display_sync_class) { - .pushbuf = EVO_PUSH_HANDLE(SYNC, index), - .head = index, - }, sizeof(struct nv50_display_sync_class), - disp->sync->bo.offset, &head->sync.base); - if (ret) - goto out; - - head->sync.sem.offset = EVO_SYNC(1 + index, 0x00); - - /* allocate overlay resources */ - ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index, - &(struct nv50_display_oimm_class) { - .head = index, - }, sizeof(struct nv50_display_oimm_class), - &head->oimm.base); - if (ret) - goto out; - - ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index, - &(struct nv50_display_ovly_class) { - .pushbuf = EVO_PUSH_HANDLE(OVLY, index), - .head = index, - }, sizeof(struct nv50_display_ovly_class), - disp->sync->bo.offset, &head->ovly.base); - if (ret) - goto out; - -out: - if (ret) - nv50_crtc_destroy(crtc); - return ret; -} - -/****************************************************************************** - * DAC - *****************************************************************************/ -static void -nv50_dac_dpms(struct drm_encoder *encoder, int mode) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nv50_disp *disp = nv50_disp(encoder->dev); - int or = nv_encoder->or; - u32 dpms_ctrl; - - dpms_ctrl = 0x00000000; - if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF) - dpms_ctrl |= 0x00000001; - if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF) - dpms_ctrl |= 0x00000004; - - nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl); -} - -static bool -nv50_dac_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_connector *nv_connector; - - nv_connector = nouveau_encoder_connector_get(nv_encoder); - if (nv_connector && nv_connector->native_mode) { - if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { - int id = adjusted_mode->base.id; - *adjusted_mode = *nv_connector->native_mode; - adjusted_mode->base.id = id; - } - } - - return true; -} - -static void -nv50_dac_commit(struct drm_encoder *encoder) -{ -} - -static void -nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct nv50_mast *mast = nv50_mast(encoder->dev); - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); - u32 *push; - - nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); - - push = evo_wait(mast, 8); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - u32 syncs = 0x00000000; - - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - syncs |= 0x00000001; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - syncs |= 0x00000002; - - evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2); - evo_data(push, 1 << nv_crtc->index); - evo_data(push, syncs); - } else { - u32 magic = 0x31ec6000 | (nv_crtc->index << 25); - u32 syncs = 0x00000001; - - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - syncs |= 0x00000008; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - syncs |= 0x00000010; - - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - magic |= 0x00000001; - - evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); - evo_data(push, syncs); - evo_data(push, magic); - evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1); - evo_data(push, 1 << nv_crtc->index); - } - - evo_kick(push, mast); + if (type == DCB_OUTPUT_DP) { + int link = !(dcb->dpconf.sor.link & 1); + if ((mc & 0x000f0000) == 0x00020000) + nv50_sor_dp_calc_tu(dev, or, link, pclk, 18); + else + nv50_sor_dp_calc_tu(dev, or, link, pclk, 24); } - nv_encoder->crtc = encoder->crtc; -} - -static void -nv50_dac_disconnect(struct drm_encoder *encoder) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nv50_mast *mast = nv50_mast(encoder->dev); - const int or = nv_encoder->or; - u32 *push; - - if (nv_encoder->crtc) { - nv50_crtc_prepare(nv_encoder->crtc); - - push = evo_wait(mast, 4); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0400 + (or * 0x080), 1); - evo_data(push, 0x00000000); - } else { - evo_mthd(push, 0x0180 + (or * 0x020), 1); - evo_data(push, 0x00000000); - } - - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); - evo_kick(push, mast); - } + if (dcb->type != DCB_OUTPUT_ANALOG) { + tmp = nv_rd32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or)); + tmp &= ~0x00000f0f; + if (script & 0x0100) + tmp |= 0x00000101; + nv_wr32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp); + } else { + nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0); } - nv_encoder->crtc = NULL; -} - -static enum drm_connector_status -nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) -{ - struct nv50_disp *disp = nv50_disp(encoder->dev); - int ret, or = nouveau_encoder(encoder)->or; - u32 load = 0; - - ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load)); - if (ret || load != 7) - return connector_status_disconnected; + disp->irq.dcb = dcb; + disp->irq.pclk = pclk; + disp->irq.script = script; - return connector_status_connected; +ack: + nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20); + nv_wr32(device, 0x610030, 0x80000000); } +/* If programming a TMDS output on a SOR that can also be configured for + * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off. + * + * It looks like the VBIOS TMDS scripts make an attempt at this, however, + * the VBIOS scripts on at least one board I have only switch it off on + * link 0, causing a blank display if the output has previously been + * programmed for DisplayPort. + */ static void -nv50_dac_destroy(struct drm_encoder *encoder) +nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_output *dcb) { - drm_encoder_cleanup(encoder); - kfree(encoder); -} - -static const struct drm_encoder_helper_funcs nv50_dac_hfunc = { - .dpms = nv50_dac_dpms, - .mode_fixup = nv50_dac_mode_fixup, - .prepare = nv50_dac_disconnect, - .commit = nv50_dac_commit, - .mode_set = nv50_dac_mode_set, - .disable = nv50_dac_disconnect, - .get_crtc = nv50_display_crtc_get, - .detect = nv50_dac_detect -}; - -static const struct drm_encoder_funcs nv50_dac_func = { - .destroy = nv50_dac_destroy, -}; - -static int -nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) -{ - struct drm_device *dev = connector->dev; - struct nouveau_encoder *nv_encoder; + struct nouveau_device *device = nouveau_dev(dev); + int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1); struct drm_encoder *encoder; + u32 tmp; - nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); - if (!nv_encoder) - return -ENOMEM; - nv_encoder->dcb = dcbe; - nv_encoder->or = ffs(dcbe->or) - 1; - - encoder = to_drm_encoder(nv_encoder); - encoder->possible_crtcs = dcbe->heads; - encoder->possible_clones = 0; - drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC); - drm_encoder_helper_add(encoder, &nv50_dac_hfunc); - - drm_mode_connector_attach_encoder(connector, encoder); - return 0; -} - -/****************************************************************************** - * Audio - *****************************************************************************/ -static void -nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_connector *nv_connector; - struct nv50_disp *disp = nv50_disp(encoder->dev); - - nv_connector = nouveau_encoder_connector_get(nv_encoder); - if (!drm_detect_monitor_audio(nv_connector->edid)) + if (dcb->type != DCB_OUTPUT_TMDS) return; - drm_edid_to_eld(&nv_connector->base, nv_connector->edid); - - nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, - nv_connector->base.eld, - nv_connector->base.eld[2] * 4); -} - -static void -nv50_audio_disconnect(struct drm_encoder *encoder) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nv50_disp *disp = nv50_disp(encoder->dev); - - nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0); -} - -/****************************************************************************** - * HDMI - *****************************************************************************/ -static void -nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); - struct nouveau_connector *nv_connector; - struct nv50_disp *disp = nv50_disp(encoder->dev); - const u32 moff = (nv_crtc->index << 3) | nv_encoder->or; - u32 rekey = 56; /* binary driver, and tegra constant */ - u32 max_ac_packet; - - nv_connector = nouveau_encoder_connector_get(nv_encoder); - if (!drm_detect_hdmi_monitor(nv_connector->edid)) - return; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - max_ac_packet = mode->htotal - mode->hdisplay; - max_ac_packet -= rekey; - max_ac_packet -= 18; /* constant from tegra */ - max_ac_packet /= 32; - - nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, - NV84_DISP_SOR_HDMI_PWR_STATE_ON | - (max_ac_packet << 16) | rekey); - - nv50_audio_mode_set(encoder, mode); -} - -static void -nv50_hdmi_disconnect(struct drm_encoder *encoder) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc); - struct nv50_disp *disp = nv50_disp(encoder->dev); - const u32 moff = (nv_crtc->index << 3) | nv_encoder->or; - - nv50_audio_disconnect(encoder); - - nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000); -} - -/****************************************************************************** - * SOR - *****************************************************************************/ -static void -nv50_sor_dpms(struct drm_encoder *encoder, int mode) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct drm_device *dev = encoder->dev; - struct nv50_disp *disp = nv50_disp(dev); - struct drm_encoder *partner; - int or = nv_encoder->or; - - nv_encoder->last_dpms = mode; - - list_for_each_entry(partner, &dev->mode_config.encoder_list, head) { - struct nouveau_encoder *nv_partner = nouveau_encoder(partner); - - if (partner->encoder_type != DRM_MODE_ENCODER_TMDS) - continue; - - if (nv_partner != nv_encoder && - nv_partner->dcb->or == nv_encoder->dcb->or) { - if (nv_partner->last_dpms == DRM_MODE_DPMS_ON) - return; + if (nv_encoder->dcb->type == DCB_OUTPUT_DP && + nv_encoder->dcb->or & (1 << or)) { + tmp = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)); + tmp &= ~NV50_SOR_DP_CTRL_ENABLED; + nv_wr32(device, NV50_SOR_DP_CTRL(or, link), tmp); break; } } - - nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON)); - - if (nv_encoder->dcb->type == DCB_OUTPUT_DP) - nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core); -} - -static bool -nv50_sor_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_connector *nv_connector; - - nv_connector = nouveau_encoder_connector_get(nv_encoder); - if (nv_connector && nv_connector->native_mode) { - if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { - int id = adjusted_mode->base.id; - *adjusted_mode = *nv_connector->native_mode; - adjusted_mode->base.id = id; - } - } - - return true; } static void -nv50_sor_disconnect(struct drm_encoder *encoder) +nv50_display_unk40_handler(struct drm_device *dev) { - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nv50_mast *mast = nv50_mast(encoder->dev); - const int or = nv_encoder->or; - u32 *push; - - if (nv_encoder->crtc) { - nv50_crtc_prepare(nv_encoder->crtc); - - push = evo_wait(mast, 4); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { - evo_mthd(push, 0x0600 + (or * 0x40), 1); - evo_data(push, 0x00000000); - } else { - evo_mthd(push, 0x0200 + (or * 0x20), 1); - evo_data(push, 0x00000000); - } - - evo_mthd(push, 0x0080, 1); - evo_data(push, 0x00000000); - evo_kick(push, mast); - } - - nv50_hdmi_disconnect(encoder); - } + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv50_display *disp = nv50_display(dev); + struct dcb_output *dcb = disp->irq.dcb; + u16 script = disp->irq.script; + u32 unk30 = nv_rd32(device, 0x610030), pclk = disp->irq.pclk; - nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; - nv_encoder->crtc = NULL; -} + NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30); + disp->irq.dcb = NULL; + if (!dcb) + goto ack; -static void -nv50_sor_prepare(struct drm_encoder *encoder) -{ - nv50_sor_disconnect(encoder); - if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP) - evo_sync(encoder->dev); -} + nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1); + nv50_display_unk40_dp_set_tmds(dev, dcb); -static void -nv50_sor_commit(struct drm_encoder *encoder) -{ +ack: + nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40); + nv_wr32(device, 0x610030, 0x80000000); + nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) | 8); } static void -nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, - struct drm_display_mode *mode) +nv50_display_bh(unsigned long data) { - struct nv50_disp *disp = nv50_disp(encoder->dev); - struct nv50_mast *mast = nv50_mast(encoder->dev); - struct drm_device *dev = encoder->dev; + struct drm_device *dev = (struct drm_device *)data; + struct nouveau_device *device = nouveau_dev(dev); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); - struct nouveau_connector *nv_connector; - struct nvbios *bios = &drm->vbios; - u32 *push, lvds = 0; - u8 owner = 1 << nv_crtc->index; - u8 proto = 0xf; - u8 depth = 0x0; - - nv_connector = nouveau_encoder_connector_get(nv_encoder); - switch (nv_encoder->dcb->type) { - case DCB_OUTPUT_TMDS: - if (nv_encoder->dcb->sorconf.link & 1) { - if (mode->clock < 165000) - proto = 0x1; - else - proto = 0x5; - } else { - proto = 0x2; - } - nv50_hdmi_mode_set(encoder, mode); - break; - case DCB_OUTPUT_LVDS: - proto = 0x0; + for (;;) { + uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0); + uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1); - if (bios->fp_no_ddc) { - if (bios->fp.dual_link) - lvds |= 0x0100; - if (bios->fp.if_is_24bit) - lvds |= 0x0200; - } else { - if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { - if (((u8 *)nv_connector->edid)[121] == 2) - lvds |= 0x0100; - } else - if (mode->clock >= bios->fp.duallink_transition_clk) { - lvds |= 0x0100; - } - - if (lvds & 0x0100) { - if (bios->fp.strapless_is_24bit & 2) - lvds |= 0x0200; - } else { - if (bios->fp.strapless_is_24bit & 1) - lvds |= 0x0200; - } + NV_DEBUG(drm, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1); - if (nv_connector->base.display_info.bpc == 8) - lvds |= 0x0200; - } - - nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds); - break; - case DCB_OUTPUT_DP: - if (nv_connector->base.display_info.bpc == 6) { - nv_encoder->dp.datarate = mode->clock * 18 / 8; - depth = 0x2; - } else - if (nv_connector->base.display_info.bpc == 8) { - nv_encoder->dp.datarate = mode->clock * 24 / 8; - depth = 0x5; - } else { - nv_encoder->dp.datarate = mode->clock * 30 / 8; - depth = 0x6; - } - - if (nv_encoder->dcb->sorconf.link & 1) - proto = 0x8; + if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10) + nv50_display_unk10_handler(dev); else - proto = 0x9; - break; - default: - BUG_ON(1); - break; - } - - nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); - - push = evo_wait(nv50_mast(dev), 8); - if (push) { - if (nv50_vers(mast) < NVD0_DISP_CLASS) { - evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1); - evo_data(push, (depth << 16) | (proto << 8) | owner); - } else { - u32 magic = 0x31ec6000 | (nv_crtc->index << 25); - u32 syncs = 0x00000001; - - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - syncs |= 0x00000008; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - syncs |= 0x00000010; - - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - magic |= 0x00000001; - - evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); - evo_data(push, syncs | (depth << 6)); - evo_data(push, magic); - evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1); - evo_data(push, owner | (proto << 8)); - } - - evo_kick(push, mast); + if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20) + nv50_display_unk20_handler(dev); + else + if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40) + nv50_display_unk40_handler(dev); + else + break; } - nv_encoder->crtc = encoder->crtc; + nv_wr32(device, NV03_PMC_INTR_EN_0, 1); } static void -nv50_sor_destroy(struct drm_encoder *encoder) +nv50_display_error_handler(struct drm_device *dev) { - drm_encoder_cleanup(encoder); - kfree(encoder); -} - -static const struct drm_encoder_helper_funcs nv50_sor_hfunc = { - .dpms = nv50_sor_dpms, - .mode_fixup = nv50_sor_mode_fixup, - .prepare = nv50_sor_prepare, - .commit = nv50_sor_commit, - .mode_set = nv50_sor_mode_set, - .disable = nv50_sor_disconnect, - .get_crtc = nv50_display_crtc_get, -}; - -static const struct drm_encoder_funcs nv50_sor_func = { - .destroy = nv50_sor_destroy, -}; - -static int -nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) -{ - struct drm_device *dev = connector->dev; - struct nouveau_encoder *nv_encoder; - struct drm_encoder *encoder; - - nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); - if (!nv_encoder) - return -ENOMEM; - nv_encoder->dcb = dcbe; - nv_encoder->or = ffs(dcbe->or) - 1; - nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; - - encoder = to_drm_encoder(nv_encoder); - encoder->possible_crtcs = dcbe->heads; - encoder->possible_clones = 0; - drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(encoder, &nv50_sor_hfunc); + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + u32 channels = (nv_rd32(device, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16; + u32 addr, data; + int chid; - drm_mode_connector_attach_encoder(connector, encoder); - return 0; -} + for (chid = 0; chid < 5; chid++) { + if (!(channels & (1 << chid))) + continue; -/****************************************************************************** - * Init - *****************************************************************************/ -void -nv50_display_fini(struct drm_device *dev) -{ -} + nv_wr32(device, NV50_PDISPLAY_INTR_0, 0x00010000 << chid); + addr = nv_rd32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid)); + data = nv_rd32(device, NV50_PDISPLAY_TRAPPED_DATA(chid)); + NV_ERROR(drm, "EvoCh %d Mthd 0x%04x Data 0x%08x " + "(0x%04x 0x%02x)\n", chid, + addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); -int -nv50_display_init(struct drm_device *dev) -{ - u32 *push = evo_wait(nv50_mast(dev), 32); - if (push) { - evo_mthd(push, 0x0088, 1); - evo_data(push, NvEvoSync); - evo_kick(push, nv50_mast(dev)); - return evo_sync(dev); + nv_wr32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000); } - - return -EBUSY; } void -nv50_display_destroy(struct drm_device *dev) -{ - struct nv50_disp *disp = nv50_disp(dev); - - nv50_dmac_destroy(disp->core, &disp->mast.base); - - nouveau_bo_unmap(disp->sync); - if (disp->sync) - nouveau_bo_unpin(disp->sync); - nouveau_bo_ref(NULL, &disp->sync); - - nouveau_display(dev)->priv = NULL; - kfree(disp); -} - -int -nv50_display_create(struct drm_device *dev) +nv50_display_intr(struct drm_device *dev) { - static const u16 oclass[] = { - NVE0_DISP_CLASS, - NVD0_DISP_CLASS, - NVA3_DISP_CLASS, - NV94_DISP_CLASS, - NVA0_DISP_CLASS, - NV84_DISP_CLASS, - NV50_DISP_CLASS, - }; struct nouveau_device *device = nouveau_dev(dev); struct nouveau_drm *drm = nouveau_drm(dev); - struct dcb_table *dcb = &drm->vbios.dcb; - struct drm_connector *connector, *tmp; - struct nv50_disp *disp; - struct dcb_output *dcbe; - int crtcs, ret, i; + struct nv50_display *disp = nv50_display(dev); + uint32_t delayed = 0; - disp = kzalloc(sizeof(*disp), GFP_KERNEL); - if (!disp) - return -ENOMEM; + while (nv_rd32(device, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { + uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0); + uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1); + uint32_t clock; - nouveau_display(dev)->priv = disp; - nouveau_display(dev)->dtor = nv50_display_destroy; - nouveau_display(dev)->init = nv50_display_init; - nouveau_display(dev)->fini = nv50_display_fini; + NV_DEBUG(drm, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1); - /* small shared memory area we use for notifiers and semaphores */ - ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, - 0, 0x0000, NULL, &disp->sync); - if (!ret) { - ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM); - if (!ret) { - ret = nouveau_bo_map(disp->sync); - if (ret) - nouveau_bo_unpin(disp->sync); - } - if (ret) - nouveau_bo_ref(NULL, &disp->sync); - } - - if (ret) - goto out; - - /* attempt to allocate a supported evo display class */ - ret = -ENODEV; - for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) { - ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, - 0xd1500000, oclass[i], NULL, 0, - &disp->core); - } - - if (ret) - goto out; - - /* allocate master evo channel */ - ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0, - &(struct nv50_display_mast_class) { - .pushbuf = EVO_PUSH_HANDLE(MAST, 0), - }, sizeof(struct nv50_display_mast_class), - disp->sync->bo.offset, &disp->mast.base); - if (ret) - goto out; - - /* create crtc objects to represent the hw heads */ - if (nv_mclass(disp->core) >= NVD0_DISP_CLASS) - crtcs = nv_rd32(device, 0x022448); - else - crtcs = 2; - - for (i = 0; i < crtcs; i++) { - ret = nv50_crtc_create(dev, disp->core, i); - if (ret) - goto out; - } + if (!intr0 && !(intr1 & ~delayed)) + break; - /* create encoder/connector objects based on VBIOS DCB table */ - for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) { - connector = nouveau_connector_create(dev, dcbe->connector); - if (IS_ERR(connector)) - continue; + if (intr0 & 0x001f0000) { + nv50_display_error_handler(dev); + intr0 &= ~0x001f0000; + } - if (dcbe->location != DCB_LOC_ON_CHIP) { - NV_WARN(drm, "skipping off-chip encoder %d/%d\n", - dcbe->type, ffs(dcbe->or) - 1); - continue; + if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { + intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC; + delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC; } - switch (dcbe->type) { - case DCB_OUTPUT_TMDS: - case DCB_OUTPUT_LVDS: - case DCB_OUTPUT_DP: - nv50_sor_create(connector, dcbe); - break; - case DCB_OUTPUT_ANALOG: - nv50_dac_create(connector, dcbe); - break; - default: - NV_WARN(drm, "skipping unsupported encoder %d/%d\n", - dcbe->type, ffs(dcbe->or) - 1); - continue; + clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 | + NV50_PDISPLAY_INTR_1_CLK_UNK20 | + NV50_PDISPLAY_INTR_1_CLK_UNK40)); + if (clock) { + nv_wr32(device, NV03_PMC_INTR_EN_0, 0); + tasklet_schedule(&disp->tasklet); + delayed |= clock; + intr1 &= ~clock; } - } - /* cull any connectors we created that don't have an encoder */ - list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { - if (connector->encoder_ids[0]) - continue; + if (intr0) { + NV_ERROR(drm, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0); + nv_wr32(device, NV50_PDISPLAY_INTR_0, intr0); + } - NV_WARN(drm, "%s has no encoders, removing\n", - drm_get_connector_name(connector)); - connector->funcs->destroy(connector); + if (intr1) { + NV_ERROR(drm, + "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1); + nv_wr32(device, NV50_PDISPLAY_INTR_1, intr1); + } } - -out: - if (ret) - nv50_display_destroy(dev); - return ret; } diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_display.h b/trunk/drivers/gpu/drm/nouveau/nv50_display.h index 70da347aa8c5..973554d8a7a6 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_display.h +++ b/trunk/drivers/gpu/drm/nouveau/nv50_display.h @@ -30,16 +30,77 @@ #include "nouveau_display.h" #include "nouveau_crtc.h" #include "nouveau_reg.h" +#include "nv50_evo.h" -int nv50_display_create(struct drm_device *); -void nv50_display_destroy(struct drm_device *); -int nv50_display_init(struct drm_device *); -void nv50_display_fini(struct drm_device *); +struct nv50_display_crtc { + struct nouveau_channel *sync; + struct { + struct nouveau_bo *bo; + u32 offset; + u16 value; + } sem; +}; -void nv50_display_flip_stop(struct drm_crtc *); +struct nv50_display { + struct nouveau_channel *master; + + struct nouveau_gpuobj *ramin; + u32 dmao; + u32 hash; + + struct nv50_display_crtc crtc[2]; + + struct tasklet_struct tasklet; + struct { + struct dcb_output *dcb; + u16 script; + u32 pclk; + } irq; +}; + +static inline struct nv50_display * +nv50_display(struct drm_device *dev) +{ + return nouveau_display(dev)->priv; +} + +int nv50_display_early_init(struct drm_device *dev); +void nv50_display_late_takedown(struct drm_device *dev); +int nv50_display_create(struct drm_device *dev); +int nv50_display_init(struct drm_device *dev); +void nv50_display_fini(struct drm_device *dev); +void nv50_display_destroy(struct drm_device *dev); +void nv50_display_intr(struct drm_device *); +int nv50_crtc_blank(struct nouveau_crtc *, bool blank); +int nv50_crtc_set_clock(struct drm_device *, int head, int pclk); + +u32 nv50_display_active_crtcs(struct drm_device *); + +int nv50_display_sync(struct drm_device *); int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *, + struct nouveau_channel *chan); +void nv50_display_flip_stop(struct drm_crtc *); + +int nv50_evo_create(struct drm_device *dev); +void nv50_evo_destroy(struct drm_device *dev); +int nv50_evo_init(struct drm_device *dev); +void nv50_evo_fini(struct drm_device *dev); +void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base, + u64 size); +int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype, + u64 base, u64 size, struct nouveau_gpuobj **); + +int nvd0_display_create(struct drm_device *); +void nvd0_display_destroy(struct drm_device *); +int nvd0_display_init(struct drm_device *); +void nvd0_display_fini(struct drm_device *); +void nvd0_display_intr(struct drm_device *); + +void nvd0_display_flip_stop(struct drm_crtc *); +int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *, struct nouveau_channel *, u32 swap_interval); struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head); +struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int head); #endif /* __NV50_DISPLAY_H__ */ diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_evo.c b/trunk/drivers/gpu/drm/nouveau/nv50_evo.c new file mode 100644 index 000000000000..9f6f55cdfa77 --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nv50_evo.c @@ -0,0 +1,403 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nv50_display.h" + +#include + +#include +#include + +static u32 +nv50_evo_rd32(struct nouveau_object *object, u32 addr) +{ + void __iomem *iomem = object->oclass->ofuncs->rd08; + return ioread32_native(iomem + addr); +} + +static void +nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data) +{ + void __iomem *iomem = object->oclass->ofuncs->rd08; + iowrite32_native(data, iomem + addr); +} + +static void +nv50_evo_channel_del(struct nouveau_channel **pevo) +{ + struct nouveau_channel *evo = *pevo; + + if (!evo) + return; + *pevo = NULL; + + nouveau_bo_unmap(evo->push.buffer); + nouveau_bo_ref(NULL, &evo->push.buffer); + + if (evo->object) + iounmap(evo->object->oclass->ofuncs); + + kfree(evo); +} + +int +nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype, + u64 base, u64 size, struct nouveau_gpuobj **pobj) +{ + struct drm_device *dev = evo->fence; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv50_display *disp = nv50_display(dev); + u32 dmao = disp->dmao; + u32 hash = disp->hash; + u32 flags5; + + if (nv_device(drm->device)->chipset < 0xc0) { + /* not supported on 0x50, specified in format mthd */ + if (nv_device(drm->device)->chipset == 0x50) + memtype = 0; + flags5 = 0x00010000; + } else { + if (memtype & 0x80000000) + flags5 = 0x00000000; /* large pages */ + else + flags5 = 0x00020000; + } + + nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22)); + nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1)); + nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base)); + nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 | + upper_32_bits(base)); + nv_wo32(disp->ramin, dmao + 0x10, 0x00000000); + nv_wo32(disp->ramin, dmao + 0x14, flags5); + + nv_wo32(disp->ramin, hash + 0x00, handle); + nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) | + evo->handle); + + disp->dmao += 0x20; + disp->hash += 0x08; + return 0; +} + +static int +nv50_evo_channel_new(struct drm_device *dev, int chid, + struct nouveau_channel **pevo) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv50_display *disp = nv50_display(dev); + struct nouveau_channel *evo; + int ret; + + evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); + if (!evo) + return -ENOMEM; + *pevo = evo; + + evo->drm = drm; + evo->handle = chid; + evo->fence = dev; + evo->user_get = 4; + evo->user_put = 0; + + ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL, + &evo->push.buffer); + if (ret == 0) + ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM); + if (ret) { + NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret); + nv50_evo_channel_del(pevo); + return ret; + } + + ret = nouveau_bo_map(evo->push.buffer); + if (ret) { + NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret); + nv50_evo_channel_del(pevo); + return ret; + } + + evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL); +#ifdef NOUVEAU_OBJECT_MAGIC + evo->object->_magic = NOUVEAU_OBJECT_MAGIC; +#endif + evo->object->parent = nv_object(disp->ramin)->parent; + evo->object->engine = nv_object(disp->ramin)->engine; + evo->object->oclass = + kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL); + evo->object->oclass->ofuncs = + kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL); + evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32; + evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32; + evo->object->oclass->ofuncs->rd08 = + ioremap(pci_resource_start(dev->pdev, 0) + + NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE); + return 0; +} + +static int +nv50_evo_channel_init(struct nouveau_channel *evo) +{ + struct nouveau_drm *drm = evo->drm; + struct nouveau_device *device = nv_device(drm->device); + int id = evo->handle, ret, i; + u64 pushbuf = evo->push.buffer->bo.offset; + u32 tmp; + + tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)); + if ((tmp & 0x009f0000) == 0x00020000) + nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000); + + tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)); + if ((tmp & 0x003f0000) == 0x00030000) + nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000); + + /* initialise fifo */ + nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 | + NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | + NV50_PDISPLAY_EVO_DMA_CB_VALID); + nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000); + nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id); + nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA, + NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); + + nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000); + nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 | + NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); + if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) { + NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id, + nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id))); + return -EBUSY; + } + + /* enable error reporting on the channel */ + nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id); + + evo->dma.max = (4096/4) - 2; + evo->dma.max &= ~7; + evo->dma.put = 0; + evo->dma.cur = evo->dma.put; + evo->dma.free = evo->dma.max - evo->dma.cur; + + ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); + if (ret) + return ret; + + for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) + OUT_RING(evo, 0); + + return 0; +} + +static void +nv50_evo_channel_fini(struct nouveau_channel *evo) +{ + struct nouveau_drm *drm = evo->drm; + struct nouveau_device *device = nv_device(drm->device); + int id = evo->handle; + + nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000); + nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000); + nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id)); + nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000); + if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) { + NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id, + nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id))); + } +} + +void +nv50_evo_destroy(struct drm_device *dev) +{ + struct nv50_display *disp = nv50_display(dev); + int i; + + for (i = 0; i < 2; i++) { + if (disp->crtc[i].sem.bo) { + nouveau_bo_unmap(disp->crtc[i].sem.bo); + nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo); + } + nv50_evo_channel_del(&disp->crtc[i].sync); + } + nv50_evo_channel_del(&disp->master); + nouveau_gpuobj_ref(NULL, &disp->ramin); +} + +int +nv50_evo_create(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nv50_display *disp = nv50_display(dev); + struct nouveau_channel *evo; + int ret, i, j; + + /* setup object management on it, any other evo channel will + * use this also as there's no per-channel support on the + * hardware + */ + ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536, + NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin); + if (ret) { + NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret); + goto err; + } + + disp->hash = 0x0000; + disp->dmao = 0x1000; + + /* create primary evo channel, the one we use for modesetting + * purporses + */ + ret = nv50_evo_channel_new(dev, 0, &disp->master); + if (ret) + return ret; + evo = disp->master; + + ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000, + disp->ramin->addr + 0x2000, 0x1000, NULL); + if (ret) + goto err; + + /* create some default objects for the scanout memtypes we support */ + ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000, + 0, pfb->ram.size, NULL); + if (ret) + goto err; + + ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000, + 0, pfb->ram.size, NULL); + if (ret) + goto err; + + ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 | + (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe), + 0, pfb->ram.size, NULL); + if (ret) + goto err; + + ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 | + (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe), + 0, pfb->ram.size, NULL); + if (ret) + goto err; + + /* create "display sync" channels and other structures we need + * to implement page flipping + */ + for (i = 0; i < 2; i++) { + struct nv50_display_crtc *dispc = &disp->crtc[i]; + u64 offset; + + ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync); + if (ret) + goto err; + + ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &dispc->sem.bo); + if (!ret) { + ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(dispc->sem.bo); + if (ret) + nouveau_bo_ref(NULL, &dispc->sem.bo); + offset = dispc->sem.bo->bo.offset; + } + + if (ret) + goto err; + + ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000, + offset, 4096, NULL); + if (ret) + goto err; + + ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000, + 0, pfb->ram.size, NULL); + if (ret) + goto err; + + ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 | + (nv_device(drm->device)->chipset < 0xc0 ? + 0x7a : 0xfe), + 0, pfb->ram.size, NULL); + if (ret) + goto err; + + ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 | + (nv_device(drm->device)->chipset < 0xc0 ? + 0x70 : 0xfe), + 0, pfb->ram.size, NULL); + if (ret) + goto err; + + for (j = 0; j < 4096; j += 4) + nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000); + dispc->sem.offset = 0; + } + + return 0; + +err: + nv50_evo_destroy(dev); + return ret; +} + +int +nv50_evo_init(struct drm_device *dev) +{ + struct nv50_display *disp = nv50_display(dev); + int ret, i; + + ret = nv50_evo_channel_init(disp->master); + if (ret) + return ret; + + for (i = 0; i < 2; i++) { + ret = nv50_evo_channel_init(disp->crtc[i].sync); + if (ret) + return ret; + } + + return 0; +} + +void +nv50_evo_fini(struct drm_device *dev) +{ + struct nv50_display *disp = nv50_display(dev); + int i; + + for (i = 0; i < 2; i++) { + if (disp->crtc[i].sync) + nv50_evo_channel_fini(disp->crtc[i].sync); + } + + if (disp->master) + nv50_evo_channel_fini(disp->master); +} diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_evo.h b/trunk/drivers/gpu/drm/nouveau/nv50_evo.h new file mode 100644 index 000000000000..771d879bc834 --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nv50_evo.h @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NV50_EVO_H__ +#define __NV50_EVO_H__ + +#define NV50_EVO_UPDATE 0x00000080 +#define NV50_EVO_UNK84 0x00000084 +#define NV50_EVO_UNK84_NOTIFY 0x40000000 +#define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000 +#define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000 +#define NV50_EVO_DMA_NOTIFY 0x00000088 +#define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff +#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000 +#define NV50_EVO_UNK8C 0x0000008C + +#define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r) +#define NV50_EVO_DAC_MODE_CTRL 0x00000400 +#define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001 +#define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002 +#define NV50_EVO_DAC_MODE_CTRL2 0x00000404 +#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001 +#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002 + +#define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r) +#define NV50_EVO_SOR_MODE_CTRL 0x00000600 +#define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001 +#define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002 +#define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100 +#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400 +#define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000 +#define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000 + +#define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r) +#define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r) +#define NV50_EVO_CRTC_UNK0800 0x00000800 +#define NV50_EVO_CRTC_CLOCK 0x00000804 +#define NV50_EVO_CRTC_INTERLACE 0x00000808 +#define NV50_EVO_CRTC_DISPLAY_START 0x00000810 +#define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814 +#define NV50_EVO_CRTC_SYNC_DURATION 0x00000818 +#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c +#define NV50_EVO_CRTC_UNK0820 0x00000820 +#define NV50_EVO_CRTC_UNK0824 0x00000824 +#define NV50_EVO_CRTC_UNK082C 0x0000082c +#define NV50_EVO_CRTC_CLUT_MODE 0x00000840 +/* You can't have a palette in 8 bit mode (=OFF) */ +#define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000 +#define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000 +#define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000 +#define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844 +#define NV84_EVO_CRTC_CLUT_DMA 0x0000085C +#define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff +#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000 +#define NV50_EVO_CRTC_FB_OFFSET 0x00000860 +#define NV50_EVO_CRTC_FB_SIZE 0x00000868 +#define NV50_EVO_CRTC_FB_CONFIG 0x0000086c +#define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000 +#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000 +#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000 +#define NV50_EVO_CRTC_FB_DEPTH 0x00000870 +#define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00 +#define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900 +#define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800 +#define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00 +#define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100 +#define NV50_EVO_CRTC_FB_DMA 0x00000874 +#define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff +#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000 +#define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880 +#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000 +#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000 +#define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884 +#define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c +#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff +#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000 +#define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0 +#define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000 +#define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011 +#define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4 +#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000 +#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009 +#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8 +#define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE 0x000fff00 +#define NV50_EVO_CRTC_COLOR_CTRL_HUE 0xfff00000 +#define NV50_EVO_CRTC_FB_POS 0x000008c0 +#define NV50_EVO_CRTC_REAL_RES 0x000008c8 +#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4 +#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \ + ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF)) +/* Both of these are needed, otherwise nothing happens. */ +#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8 +#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc +#define NV50_EVO_CRTC_UNK900 0x00000900 +#define NV50_EVO_CRTC_UNK904 0x00000904 + +#endif diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_fence.c b/trunk/drivers/gpu/drm/nouveau/nv50_fence.c index c20f2727ea0b..e0763ea88ee2 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_fence.c @@ -110,11 +110,8 @@ nv50_fence_create(struct nouveau_drm *drm) 0, 0x0000, NULL, &priv->bo); if (!ret) { ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); - if (!ret) { + if (!ret) ret = nouveau_bo_map(priv->bo); - if (ret) - nouveau_bo_unpin(priv->bo); - } if (ret) nouveau_bo_ref(NULL, &priv->bo); } diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_pm.c b/trunk/drivers/gpu/drm/nouveau/nv50_pm.c index 8bd5d2781baf..c4a65039b1ca 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_pm.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_pm.c @@ -546,7 +546,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl, { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_device *device = nouveau_dev(dev); - u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */ + u32 crtc_mask = nv50_display_active_crtcs(dev); struct nouveau_mem_exec_func exec = { .dev = dev, .precharge = mclk_precharge, diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_sor.c b/trunk/drivers/gpu/drm/nouveau/nv50_sor.c new file mode 100644 index 000000000000..b562b59e1326 --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nv50_sor.c @@ -0,0 +1,530 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include + +#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) +#include "nouveau_reg.h" +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "nv50_display.h" + +#include + +static u32 +nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */ + static const u8 nv50[] = { 16, 8, 0, 24 }; + if (nv_device(drm->device)->chipset == 0xaf) + return nvaf[lane]; + return nv50[lane]; +} + +static void +nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern) +{ + struct nouveau_device *device = nouveau_dev(dev); + u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); + nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24); +} + +static void +nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb, + u8 lane, u8 swing, u8 preem) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); + u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane); + u32 mask = 0x000000ff << shift; + u8 *table, *entry, *config; + + table = nouveau_dp_bios_data(dev, dcb, &entry); + if (!table || (table[0] != 0x20 && table[0] != 0x21)) { + NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n"); + return; + } + + config = entry + table[4]; + while (config[0] != swing || config[1] != preem) { + config += table[5]; + if (config >= entry + table[4] + entry[4] * table[5]) + return; + } + + nv_mask(device, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift); + nv_mask(device, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift); + nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8); +} + +static void +nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc, + int link_nr, u32 link_bw, bool enhframe) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); + u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000; + u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)) & ~0x000c0000; + u8 *table, *entry, mask; + int i; + + table = nouveau_dp_bios_data(dev, dcb, &entry); + if (!table || (table[0] != 0x20 && table[0] != 0x21)) { + NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n"); + return; + } + + entry = ROMPTR(dev, entry[10]); + if (entry) { + while (link_bw < ROM16(entry[0]) * 10) + entry += 4; + + nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc); + } + + dpctrl |= ((1 << link_nr) - 1) << 16; + if (enhframe) + dpctrl |= 0x00004000; + + if (link_bw > 162000) + clksor |= 0x00040000; + + nv_wr32(device, 0x614300 + (or * 0x800), clksor); + nv_wr32(device, NV50_SOR_DP_CTRL(or, link), dpctrl); + + mask = 0; + for (i = 0; i < link_nr; i++) + mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3); + nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask); +} + +static void +nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw) +{ + struct nouveau_device *device = nouveau_dev(dev); + u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000; + u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)); + if (clksor & 0x000c0000) + *bw = 270000; + else + *bw = 162000; + + if (dpctrl > 0x00030000) *nr = 4; + else if (dpctrl > 0x00010000) *nr = 2; + else *nr = 1; +} + +void +nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + const u32 symbol = 100000; + int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; + int TU, VTUi, VTUf, VTUa; + u64 link_data_rate, link_ratio, unk; + u32 best_diff = 64 * symbol; + u32 link_nr, link_bw, r; + + /* calculate packed data rate for each lane */ + nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw); + link_data_rate = (clk * bpp / 8) / link_nr; + + /* calculate ratio of packed data rate to link symbol rate */ + link_ratio = link_data_rate * symbol; + r = do_div(link_ratio, link_bw); + + for (TU = 64; TU >= 32; TU--) { + /* calculate average number of valid symbols in each TU */ + u32 tu_valid = link_ratio * TU; + u32 calc, diff; + + /* find a hw representation for the fraction.. */ + VTUi = tu_valid / symbol; + calc = VTUi * symbol; + diff = tu_valid - calc; + if (diff) { + if (diff >= (symbol / 2)) { + VTUf = symbol / (symbol - diff); + if (symbol - (VTUf * diff)) + VTUf++; + + if (VTUf <= 15) { + VTUa = 1; + calc += symbol - (symbol / VTUf); + } else { + VTUa = 0; + VTUf = 1; + calc += symbol; + } + } else { + VTUa = 0; + VTUf = min((int)(symbol / diff), 15); + calc += symbol / VTUf; + } + + diff = calc - tu_valid; + } else { + /* no remainder, but the hw doesn't like the fractional + * part to be zero. decrement the integer part and + * have the fraction add a whole symbol back + */ + VTUa = 0; + VTUf = 1; + VTUi--; + } + + if (diff < best_diff) { + best_diff = diff; + bestTU = TU; + bestVTUa = VTUa; + bestVTUf = VTUf; + bestVTUi = VTUi; + if (diff == 0) + break; + } + } + + if (!bestTU) { + NV_ERROR(drm, "DP: unable to find suitable config\n"); + return; + } + + /* XXX close to vbios numbers, but not right */ + unk = (symbol - link_ratio) * bestTU; + unk *= link_ratio; + r = do_div(unk, symbol); + r = do_div(unk, symbol); + unk += 6; + + nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2); + nv_mask(device, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 | + bestVTUf << 16 | + bestVTUi << 8 | + unk); +} +static void +nv50_sor_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct drm_device *dev = encoder->dev; + struct nouveau_channel *evo = nv50_display(dev)->master; + int ret; + + if (!nv_encoder->crtc) + return; + nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); + + NV_DEBUG(drm, "Disconnecting SOR %d\n", nv_encoder->or); + + ret = RING_SPACE(evo, 4); + if (ret) { + NV_ERROR(drm, "no space while disconnecting SOR\n"); + return; + } + BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); + OUT_RING (evo, 0); + BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING (evo, 0); + + nouveau_hdmi_mode_set(encoder, NULL); + + nv_encoder->crtc = NULL; + nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; +} + +static void +nv50_sor_dpms(struct drm_encoder *encoder, int mode) +{ + struct nouveau_device *device = nouveau_dev(encoder->dev); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct drm_device *dev = encoder->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_encoder *enc; + uint32_t val; + int or = nv_encoder->or; + + NV_DEBUG(drm, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode); + + nv_encoder->last_dpms = mode; + list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nvenc = nouveau_encoder(enc); + + if (nvenc == nv_encoder || + (nvenc->dcb->type != DCB_OUTPUT_TMDS && + nvenc->dcb->type != DCB_OUTPUT_LVDS && + nvenc->dcb->type != DCB_OUTPUT_DP) || + nvenc->dcb->or != nv_encoder->dcb->or) + continue; + + if (nvenc->last_dpms == DRM_MODE_DPMS_ON) + return; + } + + /* wait for it to be done */ + if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), + NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { + NV_ERROR(drm, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or); + NV_ERROR(drm, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or, + nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or))); + } + + val = nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or)); + + if (mode == DRM_MODE_DPMS_ON) + val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON; + else + val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON; + + nv_wr32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val | + NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING); + if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(or), + NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { + NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or); + NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", or, + nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(or))); + } + + if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { + struct dp_train_func func = { + .link_set = nv50_sor_dp_link_set, + .train_set = nv50_sor_dp_train_set, + .train_adj = nv50_sor_dp_train_adj + }; + + nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func); + } +} + +static void +nv50_sor_save(struct drm_encoder *encoder) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + NV_ERROR(drm, "!!\n"); +} + +static void +nv50_sor_restore(struct drm_encoder *encoder) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + NV_ERROR(drm, "!!\n"); +} + +static bool +nv50_sor_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *connector; + + NV_DEBUG(drm, "or %d\n", nv_encoder->or); + + connector = nouveau_encoder_connector_get(nv_encoder); + if (!connector) { + NV_ERROR(drm, "Encoder has no connector\n"); + return false; + } + + if (connector->scaling_mode != DRM_MODE_SCALE_NONE && + connector->native_mode) + drm_mode_copy(adjusted_mode, connector->native_mode); + + return true; +} + +static void +nv50_sor_prepare(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + nv50_sor_disconnect(encoder); + if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { + /* avoid race between link training and supervisor intr */ + nv50_display_sync(encoder->dev); + } +} + +static void +nv50_sor_commit(struct drm_encoder *encoder) +{ +} + +static void +nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, + struct drm_display_mode *mode) +{ + struct nouveau_channel *evo = nv50_display(encoder->dev)->master; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); + struct nouveau_connector *nv_connector; + uint32_t mode_ctl = 0; + int ret; + + NV_DEBUG(drm, "or %d type %d -> crtc %d\n", + nv_encoder->or, nv_encoder->dcb->type, crtc->index); + nv_encoder->crtc = encoder->crtc; + + switch (nv_encoder->dcb->type) { + case DCB_OUTPUT_TMDS: + if (nv_encoder->dcb->sorconf.link & 1) { + if (mode->clock < 165000) + mode_ctl = 0x0100; + else + mode_ctl = 0x0500; + } else + mode_ctl = 0x0200; + + nouveau_hdmi_mode_set(encoder, mode); + break; + case DCB_OUTPUT_DP: + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (nv_connector && nv_connector->base.display_info.bpc == 6) { + nv_encoder->dp.datarate = mode->clock * 18 / 8; + mode_ctl |= 0x00020000; + } else { + nv_encoder->dp.datarate = mode->clock * 24 / 8; + mode_ctl |= 0x00050000; + } + + if (nv_encoder->dcb->sorconf.link & 1) + mode_ctl |= 0x00000800; + else + mode_ctl |= 0x00000900; + break; + default: + break; + } + + if (crtc->index == 1) + mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1; + else + mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0; + + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC; + + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC; + + nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); + + ret = RING_SPACE(evo, 2); + if (ret) { + NV_ERROR(drm, "no space while connecting SOR\n"); + nv_encoder->crtc = NULL; + return; + } + BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); + OUT_RING(evo, mode_ctl); +} + +static struct drm_crtc * +nv50_sor_crtc_get(struct drm_encoder *encoder) +{ + return nouveau_encoder(encoder)->crtc; +} + +static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = { + .dpms = nv50_sor_dpms, + .save = nv50_sor_save, + .restore = nv50_sor_restore, + .mode_fixup = nv50_sor_mode_fixup, + .prepare = nv50_sor_prepare, + .commit = nv50_sor_commit, + .mode_set = nv50_sor_mode_set, + .get_crtc = nv50_sor_crtc_get, + .detect = NULL, + .disable = nv50_sor_disconnect +}; + +static void +nv50_sor_destroy(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + + NV_DEBUG(drm, "\n"); + + drm_encoder_cleanup(encoder); + + kfree(nv_encoder); +} + +static const struct drm_encoder_funcs nv50_sor_encoder_funcs = { + .destroy = nv50_sor_destroy, +}; + +int +nv50_sor_create(struct drm_connector *connector, struct dcb_output *entry) +{ + struct nouveau_encoder *nv_encoder = NULL; + struct drm_device *dev = connector->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct drm_encoder *encoder; + int type; + + NV_DEBUG(drm, "\n"); + + switch (entry->type) { + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_DP: + type = DRM_MODE_ENCODER_TMDS; + break; + case DCB_OUTPUT_LVDS: + type = DRM_MODE_ENCODER_LVDS; + break; + default: + return -EINVAL; + } + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + encoder = to_drm_encoder(nv_encoder); + + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; + nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type); + drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + + drm_mode_connector_attach_encoder(connector, encoder); + return 0; +} diff --git a/trunk/drivers/gpu/drm/nouveau/nvc0_fence.c b/trunk/drivers/gpu/drm/nouveau/nvc0_fence.c index 2a56b1b551cb..53299eac9676 100644 --- a/trunk/drivers/gpu/drm/nouveau/nvc0_fence.c +++ b/trunk/drivers/gpu/drm/nouveau/nvc0_fence.c @@ -114,9 +114,17 @@ nvc0_fence_context_del(struct nouveau_channel *chan) struct nvc0_fence_chan *fctx = chan->fence; int i; - for (i = 0; i < dev->mode_config.num_crtc; i++) { - struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); - nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]); + if (nv_device(chan->drm->device)->card_type >= NV_D0) { + for (i = 0; i < dev->mode_config.num_crtc; i++) { + struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i); + nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]); + } + } else + if (nv_device(chan->drm->device)->card_type >= NV_50) { + for (i = 0; i < dev->mode_config.num_crtc; i++) { + struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); + nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]); + } } nouveau_bo_vma_del(priv->bo, &fctx->vma); @@ -146,7 +154,12 @@ nvc0_fence_context_new(struct nouveau_channel *chan) /* map display semaphore buffers into channel's vm */ for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { - struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i); + struct nouveau_bo *bo; + if (nv_device(chan->drm->device)->card_type >= NV_D0) + bo = nvd0_display_crtc_sema(chan->drm->dev, i); + else + bo = nv50_display_crtc_sema(chan->drm->dev, i); + ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]); } @@ -190,8 +203,6 @@ nvc0_fence_destroy(struct nouveau_drm *drm) { struct nvc0_fence_priv *priv = drm->fence; nouveau_bo_unmap(priv->bo); - if (priv->bo) - nouveau_bo_unpin(priv->bo); nouveau_bo_ref(NULL, &priv->bo); drm->fence = NULL; kfree(priv); @@ -221,11 +232,8 @@ nvc0_fence_create(struct nouveau_drm *drm) TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo); if (ret == 0) { ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); - if (ret == 0) { + if (ret == 0) ret = nouveau_bo_map(priv->bo); - if (ret) - nouveau_bo_unpin(priv->bo); - } if (ret) nouveau_bo_ref(NULL, &priv->bo); } diff --git a/trunk/drivers/gpu/drm/nouveau/nvd0_display.c b/trunk/drivers/gpu/drm/nouveau/nvd0_display.c new file mode 100644 index 000000000000..c402fca2b2b8 --- /dev/null +++ b/trunk/drivers/gpu/drm/nouveau/nvd0_display.c @@ -0,0 +1,2141 @@ +/* + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include + +#include +#include + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_gem.h" +#include "nouveau_connector.h" +#include "nouveau_encoder.h" +#include "nouveau_crtc.h" +#include "nouveau_fence.h" +#include "nv50_display.h" + +#include + +#include +#include +#include + +#define EVO_DMA_NR 9 + +#define EVO_MASTER (0x00) +#define EVO_FLIP(c) (0x01 + (c)) +#define EVO_OVLY(c) (0x05 + (c)) +#define EVO_OIMM(c) (0x09 + (c)) +#define EVO_CURS(c) (0x0d + (c)) + +/* offsets in shared sync bo of various structures */ +#define EVO_SYNC(c, o) ((c) * 0x0100 + (o)) +#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00) +#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00) +#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10) + +struct evo { + int idx; + dma_addr_t handle; + u32 *ptr; + struct { + u32 offset; + u16 value; + } sem; +}; + +struct nvd0_display { + struct nouveau_gpuobj *mem; + struct nouveau_bo *sync; + struct evo evo[9]; + + struct tasklet_struct tasklet; + u32 modeset; +}; + +static struct nvd0_display * +nvd0_display(struct drm_device *dev) +{ + return nouveau_display(dev)->priv; +} + +static struct drm_crtc * +nvd0_display_crtc_get(struct drm_encoder *encoder) +{ + return nouveau_encoder(encoder)->crtc; +} + +/****************************************************************************** + * EVO channel helpers + *****************************************************************************/ +static inline int +evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data) +{ + struct nouveau_device *device = nouveau_dev(dev); + int ret = 0; + nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000001); + nv_wr32(device, 0x610704 + (id * 0x10), data); + nv_mask(device, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd); + if (!nv_wait(device, 0x610704 + (id * 0x10), 0x80000000, 0x00000000)) + ret = -EBUSY; + nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000000); + return ret; +} + +static u32 * +evo_wait(struct drm_device *dev, int id, int nr) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvd0_display *disp = nvd0_display(dev); + u32 put = nv_rd32(device, 0x640000 + (id * 0x1000)) / 4; + + if (put + nr >= (PAGE_SIZE / 4)) { + disp->evo[id].ptr[put] = 0x20000000; + + nv_wr32(device, 0x640000 + (id * 0x1000), 0x00000000); + if (!nv_wait(device, 0x640004 + (id * 0x1000), ~0, 0x00000000)) { + NV_ERROR(drm, "evo %d dma stalled\n", id); + return NULL; + } + + put = 0; + } + + return disp->evo[id].ptr + put; +} + +static void +evo_kick(u32 *push, struct drm_device *dev, int id) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nvd0_display *disp = nvd0_display(dev); + + nv_wr32(device, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2); +} + +#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m)) +#define evo_data(p,d) *((p)++) = (d) + +static int +evo_init_dma(struct drm_device *dev, int ch) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvd0_display *disp = nvd0_display(dev); + u32 flags; + + flags = 0x00000000; + if (ch == EVO_MASTER) + flags |= 0x01000000; + + nv_wr32(device, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3); + nv_wr32(device, 0x610498 + (ch * 0x0010), 0x00010000); + nv_wr32(device, 0x61049c + (ch * 0x0010), 0x00000001); + nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010); + nv_wr32(device, 0x640000 + (ch * 0x1000), 0x00000000); + nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000013 | flags); + if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) { + NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch, + nv_rd32(device, 0x610490 + (ch * 0x0010))); + return -EBUSY; + } + + nv_mask(device, 0x610090, (1 << ch), (1 << ch)); + nv_mask(device, 0x6100a0, (1 << ch), (1 << ch)); + return 0; +} + +static void +evo_fini_dma(struct drm_device *dev, int ch) +{ + struct nouveau_device *device = nouveau_dev(dev); + + if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000010)) + return; + + nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000); + nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000); + nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000); + nv_mask(device, 0x610090, (1 << ch), 0x00000000); + nv_mask(device, 0x6100a0, (1 << ch), 0x00000000); +} + +static inline void +evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data) +{ + struct nouveau_device *device = nouveau_dev(dev); + nv_wr32(device, 0x640000 + (ch * 0x1000) + mthd, data); +} + +static int +evo_init_pio(struct drm_device *dev, int ch) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + + nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000001); + if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) { + NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch, + nv_rd32(device, 0x610490 + (ch * 0x0010))); + return -EBUSY; + } + + nv_mask(device, 0x610090, (1 << ch), (1 << ch)); + nv_mask(device, 0x6100a0, (1 << ch), (1 << ch)); + return 0; +} + +static void +evo_fini_pio(struct drm_device *dev, int ch) +{ + struct nouveau_device *device = nouveau_dev(dev); + + if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000001)) + return; + + nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010); + nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000); + nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000); + nv_mask(device, 0x610090, (1 << ch), 0x00000000); + nv_mask(device, 0x6100a0, (1 << ch), 0x00000000); +} + +static bool +evo_sync_wait(void *data) +{ + return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000; +} + +static int +evo_sync(struct drm_device *dev, int ch) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nvd0_display *disp = nvd0_display(dev); + u32 *push = evo_wait(dev, ch, 8); + if (push) { + nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000); + evo_mthd(push, 0x0084, 1); + evo_data(push, 0x80000000 | EVO_MAST_NTFY); + evo_mthd(push, 0x0080, 2); + evo_data(push, 0x00000000); + evo_data(push, 0x00000000); + evo_kick(push, dev, ch); + if (nv_wait_cb(device, evo_sync_wait, disp->sync)) + return 0; + } + + return -EBUSY; +} + +/****************************************************************************** + * Page flipping channel + *****************************************************************************/ +struct nouveau_bo * +nvd0_display_crtc_sema(struct drm_device *dev, int crtc) +{ + return nvd0_display(dev)->sync; +} + +void +nvd0_display_flip_stop(struct drm_crtc *crtc) +{ + struct nvd0_display *disp = nvd0_display(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)]; + u32 *push; + + push = evo_wait(crtc->dev, evo->idx, 8); + if (push) { + evo_mthd(push, 0x0084, 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0094, 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x00c0, 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + evo_kick(push, crtc->dev, evo->idx); + } +} + +int +nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct nouveau_channel *chan, u32 swap_interval) +{ + struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); + struct nvd0_display *disp = nvd0_display(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)]; + u64 offset; + u32 *push; + int ret; + + swap_interval <<= 4; + if (swap_interval == 0) + swap_interval |= 0x100; + + push = evo_wait(crtc->dev, evo->idx, 128); + if (unlikely(push == NULL)) + return -EBUSY; + + /* synchronise with the rendering channel, if necessary */ + if (likely(chan)) { + ret = RING_SPACE(chan, 10); + if (ret) + return ret; + + + offset = nvc0_fence_crtc(chan, nv_crtc->index); + offset += evo->sem.offset; + + BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + OUT_RING (chan, upper_32_bits(offset)); + OUT_RING (chan, lower_32_bits(offset)); + OUT_RING (chan, 0xf00d0000 | evo->sem.value); + OUT_RING (chan, 0x1002); + BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + OUT_RING (chan, upper_32_bits(offset)); + OUT_RING (chan, lower_32_bits(offset ^ 0x10)); + OUT_RING (chan, 0x74b1e000); + OUT_RING (chan, 0x1001); + FIRE_RING (chan); + } else { + nouveau_bo_wr32(disp->sync, evo->sem.offset / 4, + 0xf00d0000 | evo->sem.value); + evo_sync(crtc->dev, EVO_MASTER); + } + + /* queue the flip */ + evo_mthd(push, 0x0100, 1); + evo_data(push, 0xfffe0000); + evo_mthd(push, 0x0084, 1); + evo_data(push, swap_interval); + if (!(swap_interval & 0x00000100)) { + evo_mthd(push, 0x00e0, 1); + evo_data(push, 0x40000000); + } + evo_mthd(push, 0x0088, 4); + evo_data(push, evo->sem.offset); + evo_data(push, 0xf00d0000 | evo->sem.value); + evo_data(push, 0x74b1e000); + evo_data(push, NvEvoSync); + evo_mthd(push, 0x00a0, 2); + evo_data(push, 0x00000000); + evo_data(push, 0x00000000); + evo_mthd(push, 0x00c0, 1); + evo_data(push, nv_fb->r_dma); + evo_mthd(push, 0x0110, 2); + evo_data(push, 0x00000000); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0400, 5); + evo_data(push, nv_fb->nvbo->bo.offset >> 8); + evo_data(push, 0); + evo_data(push, (fb->height << 16) | fb->width); + evo_data(push, nv_fb->r_pitch); + evo_data(push, nv_fb->r_format); + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + evo_kick(push, crtc->dev, evo->idx); + + evo->sem.offset ^= 0x10; + evo->sem.value++; + return 0; +} + +/****************************************************************************** + * CRTC + *****************************************************************************/ +static int +nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update) +{ + struct nouveau_drm *drm = nouveau_drm(nv_crtc->base.dev); + struct drm_device *dev = nv_crtc->base.dev; + struct nouveau_connector *nv_connector; + struct drm_connector *connector; + u32 *push, mode = 0x00; + u32 mthd; + + nv_connector = nouveau_crtc_connector_get(nv_crtc); + connector = &nv_connector->base; + if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) { + if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3) + mode = DITHERING_MODE_DYNAMIC2X2; + } else { + mode = nv_connector->dithering_mode; + } + + if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) { + if (connector->display_info.bpc >= 8) + mode |= DITHERING_DEPTH_8BPC; + } else { + mode |= nv_connector->dithering_depth; + } + + if (nv_device(drm->device)->card_type < NV_E0) + mthd = 0x0490 + (nv_crtc->index * 0x0300); + else + mthd = 0x04a0 + (nv_crtc->index * 0x0300); + + push = evo_wait(dev, EVO_MASTER, 4); + if (push) { + evo_mthd(push, mthd, 1); + evo_data(push, mode); + if (update) { + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + } + evo_kick(push, dev, EVO_MASTER); + } + + return 0; +} + +static int +nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update) +{ + struct drm_display_mode *omode, *umode = &nv_crtc->base.mode; + struct drm_device *dev = nv_crtc->base.dev; + struct drm_crtc *crtc = &nv_crtc->base; + struct nouveau_connector *nv_connector; + int mode = DRM_MODE_SCALE_NONE; + u32 oX, oY, *push; + + /* start off at the resolution we programmed the crtc for, this + * effectively handles NONE/FULL scaling + */ + nv_connector = nouveau_crtc_connector_get(nv_crtc); + if (nv_connector && nv_connector->native_mode) + mode = nv_connector->scaling_mode; + + if (mode != DRM_MODE_SCALE_NONE) + omode = nv_connector->native_mode; + else + omode = umode; + + oX = omode->hdisplay; + oY = omode->vdisplay; + if (omode->flags & DRM_MODE_FLAG_DBLSCAN) + oY *= 2; + + /* add overscan compensation if necessary, will keep the aspect + * ratio the same as the backend mode unless overridden by the + * user setting both hborder and vborder properties. + */ + if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON || + (nv_connector->underscan == UNDERSCAN_AUTO && + nv_connector->edid && + drm_detect_hdmi_monitor(nv_connector->edid)))) { + u32 bX = nv_connector->underscan_hborder; + u32 bY = nv_connector->underscan_vborder; + u32 aspect = (oY << 19) / oX; + + if (bX) { + oX -= (bX * 2); + if (bY) oY -= (bY * 2); + else oY = ((oX * aspect) + (aspect / 2)) >> 19; + } else { + oX -= (oX >> 4) + 32; + if (bY) oY -= (bY * 2); + else oY = ((oX * aspect) + (aspect / 2)) >> 19; + } + } + + /* handle CENTER/ASPECT scaling, taking into account the areas + * removed already for overscan compensation + */ + switch (mode) { + case DRM_MODE_SCALE_CENTER: + oX = min((u32)umode->hdisplay, oX); + oY = min((u32)umode->vdisplay, oY); + /* fall-through */ + case DRM_MODE_SCALE_ASPECT: + if (oY < oX) { + u32 aspect = (umode->hdisplay << 19) / umode->vdisplay; + oX = ((oY * aspect) + (aspect / 2)) >> 19; + } else { + u32 aspect = (umode->vdisplay << 19) / umode->hdisplay; + oY = ((oX * aspect) + (aspect / 2)) >> 19; + } + break; + default: + break; + } + + push = evo_wait(dev, EVO_MASTER, 8); + if (push) { + evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3); + evo_data(push, (oY << 16) | oX); + evo_data(push, (oY << 16) | oX); + evo_data(push, (oY << 16) | oX); + evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1); + evo_data(push, (umode->vdisplay << 16) | umode->hdisplay); + evo_kick(push, dev, EVO_MASTER); + if (update) { + nvd0_display_flip_stop(crtc); + nvd0_display_flip_next(crtc, crtc->fb, NULL, 1); + } + } + + return 0; +} + +static int +nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, + int x, int y, bool update) +{ + struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb); + u32 *push; + + push = evo_wait(fb->dev, EVO_MASTER, 16); + if (push) { + evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1); + evo_data(push, nvfb->nvbo->bo.offset >> 8); + evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4); + evo_data(push, (fb->height << 16) | fb->width); + evo_data(push, nvfb->r_pitch); + evo_data(push, nvfb->r_format); + evo_data(push, nvfb->r_dma); + evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1); + evo_data(push, (y << 16) | x); + if (update) { + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + } + evo_kick(push, fb->dev, EVO_MASTER); + } + + nv_crtc->fb.tile_flags = nvfb->r_dma; + return 0; +} + +static void +nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update) +{ + struct drm_device *dev = nv_crtc->base.dev; + u32 *push = evo_wait(dev, EVO_MASTER, 16); + if (push) { + if (show) { + evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2); + evo_data(push, 0x85000000); + evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); + evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); + evo_data(push, NvEvoVRAM); + } else { + evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x05000000); + evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); + } + + if (update) { + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + } + + evo_kick(push, dev, EVO_MASTER); + } +} + +static void +nvd0_crtc_dpms(struct drm_crtc *crtc, int mode) +{ +} + +static void +nvd0_crtc_prepare(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + u32 *push; + + nvd0_display_flip_stop(crtc); + + push = evo_wait(crtc->dev, EVO_MASTER, 2); + if (push) { + evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x03000000); + evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); + evo_kick(push, crtc->dev, EVO_MASTER); + } + + nvd0_crtc_cursor_show(nv_crtc, false, false); +} + +static void +nvd0_crtc_commit(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + u32 *push; + + push = evo_wait(crtc->dev, EVO_MASTER, 32); + if (push) { + evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); + evo_data(push, nv_crtc->fb.tile_flags); + evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4); + evo_data(push, 0x83000000); + evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); + evo_data(push, 0x00000000); + evo_data(push, 0x00000000); + evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); + evo_data(push, NvEvoVRAM); + evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0xffffff00); + evo_kick(push, crtc->dev, EVO_MASTER); + } + + nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true); + nvd0_display_flip_next(crtc, crtc->fb, NULL, 1); +} + +static bool +nvd0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static int +nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) +{ + struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); + int ret; + + ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + if (old_fb) { + nvfb = nouveau_framebuffer(old_fb); + nouveau_bo_unpin(nvfb->nvbo); + } + + return 0; +} + +static int +nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, + struct drm_display_mode *mode, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nouveau_connector *nv_connector; + u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1; + u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1; + u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks; + u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks; + u32 vblan2e = 0, vblan2s = 1; + u32 *push; + int ret; + + hactive = mode->htotal; + hsynce = mode->hsync_end - mode->hsync_start - 1; + hbackp = mode->htotal - mode->hsync_end; + hblanke = hsynce + hbackp; + hfrontp = mode->hsync_start - mode->hdisplay; + hblanks = mode->htotal - hfrontp - 1; + + vactive = mode->vtotal * vscan / ilace; + vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1; + vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace; + vblanke = vsynce + vbackp; + vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; + vblanks = vactive - vfrontp - 1; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { + vblan2e = vactive + vsynce + vbackp; + vblan2s = vblan2e + (mode->vdisplay * vscan / ilace); + vactive = (vactive * 2) + 1; + } + + ret = nvd0_crtc_swap_fbs(crtc, old_fb); + if (ret) + return ret; + + push = evo_wait(crtc->dev, EVO_MASTER, 64); + if (push) { + evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6); + evo_data(push, 0x00000000); + evo_data(push, (vactive << 16) | hactive); + evo_data(push, ( vsynce << 16) | hsynce); + evo_data(push, (vblanke << 16) | hblanke); + evo_data(push, (vblanks << 16) | hblanks); + evo_data(push, (vblan2e << 16) | vblan2s); + evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); /* ??? */ + evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3); + evo_data(push, mode->clock * 1000); + evo_data(push, 0x00200000); /* ??? */ + evo_data(push, mode->clock * 1000); + evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2); + evo_data(push, 0x00000311); + evo_data(push, 0x00000100); + evo_kick(push, crtc->dev, EVO_MASTER); + } + + nv_connector = nouveau_crtc_connector_get(nv_crtc); + nvd0_crtc_set_dither(nv_crtc, false); + nvd0_crtc_set_scale(nv_crtc, false); + nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false); + return 0; +} + +static int +nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct nouveau_drm *drm = nouveau_drm(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + int ret; + + if (!crtc->fb) { + NV_DEBUG(drm, "No FB bound\n"); + return 0; + } + + ret = nvd0_crtc_swap_fbs(crtc, old_fb); + if (ret) + return ret; + + nvd0_display_flip_stop(crtc); + nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true); + nvd0_display_flip_next(crtc, crtc->fb, NULL, 1); + return 0; +} + +static int +nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, int x, int y, + enum mode_set_atomic state) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + nvd0_display_flip_stop(crtc); + nvd0_crtc_set_image(nv_crtc, fb, x, y, true); + return 0; +} + +static void +nvd0_crtc_lut_load(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); + int i; + + for (i = 0; i < 256; i++) { + writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0); + writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2); + writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4); + } +} + +static int +nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t handle, uint32_t width, uint32_t height) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_gem_object *gem; + struct nouveau_bo *nvbo; + bool visible = (handle != 0); + int i, ret = 0; + + if (visible) { + if (width != 64 || height != 64) + return -EINVAL; + + gem = drm_gem_object_lookup(dev, file_priv, handle); + if (unlikely(!gem)) + return -ENOENT; + nvbo = nouveau_gem_object(gem); + + ret = nouveau_bo_map(nvbo); + if (ret == 0) { + for (i = 0; i < 64 * 64; i++) { + u32 v = nouveau_bo_rd32(nvbo, i); + nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v); + } + nouveau_bo_unmap(nvbo); + } + + drm_gem_object_unreference_unlocked(gem); + } + + if (visible != nv_crtc->cursor.visible) { + nvd0_crtc_cursor_show(nv_crtc, visible, true); + nv_crtc->cursor.visible = visible; + } + + return ret; +} + +static int +nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + int ch = EVO_CURS(nv_crtc->index); + + evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff)); + evo_piow(crtc->dev, ch, 0x0080, 0x00000000); + return 0; +} + +static void +nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, + uint32_t start, uint32_t size) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + u32 end = max(start + size, (u32)256); + u32 i; + + for (i = start; i < end; i++) { + nv_crtc->lut.r[i] = r[i]; + nv_crtc->lut.g[i] = g[i]; + nv_crtc->lut.b[i] = b[i]; + } + + nvd0_crtc_lut_load(crtc); +} + +static void +nvd0_crtc_destroy(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + nouveau_bo_unmap(nv_crtc->cursor.nvbo); + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + nouveau_bo_unmap(nv_crtc->lut.nvbo); + nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); + drm_crtc_cleanup(crtc); + kfree(crtc); +} + +static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = { + .dpms = nvd0_crtc_dpms, + .prepare = nvd0_crtc_prepare, + .commit = nvd0_crtc_commit, + .mode_fixup = nvd0_crtc_mode_fixup, + .mode_set = nvd0_crtc_mode_set, + .mode_set_base = nvd0_crtc_mode_set_base, + .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic, + .load_lut = nvd0_crtc_lut_load, +}; + +static const struct drm_crtc_funcs nvd0_crtc_func = { + .cursor_set = nvd0_crtc_cursor_set, + .cursor_move = nvd0_crtc_cursor_move, + .gamma_set = nvd0_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, + .destroy = nvd0_crtc_destroy, + .page_flip = nouveau_crtc_page_flip, +}; + +static void +nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) +{ +} + +static void +nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) +{ +} + +static int +nvd0_crtc_create(struct drm_device *dev, int index) +{ + struct nouveau_crtc *nv_crtc; + struct drm_crtc *crtc; + int ret, i; + + nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); + if (!nv_crtc) + return -ENOMEM; + + nv_crtc->index = index; + nv_crtc->set_dither = nvd0_crtc_set_dither; + nv_crtc->set_scale = nvd0_crtc_set_scale; + nv_crtc->cursor.set_offset = nvd0_cursor_set_offset; + nv_crtc->cursor.set_pos = nvd0_cursor_set_pos; + for (i = 0; i < 256; i++) { + nv_crtc->lut.r[i] = i << 8; + nv_crtc->lut.g[i] = i << 8; + nv_crtc->lut.b[i] = i << 8; + } + + crtc = &nv_crtc->base; + drm_crtc_init(dev, crtc, &nvd0_crtc_func); + drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc); + drm_mode_crtc_set_gamma_size(crtc, 256); + + ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &nv_crtc->cursor.nvbo); + if (!ret) { + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(nv_crtc->cursor.nvbo); + if (ret) + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + } + + if (ret) + goto out; + + ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &nv_crtc->lut.nvbo); + if (!ret) { + ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(nv_crtc->lut.nvbo); + if (ret) + nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); + } + + if (ret) + goto out; + + nvd0_crtc_lut_load(crtc); + +out: + if (ret) + nvd0_crtc_destroy(crtc); + return ret; +} + +/****************************************************************************** + * DAC + *****************************************************************************/ +static void +nvd0_dac_dpms(struct drm_encoder *encoder, int mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + int or = nv_encoder->or; + u32 dpms_ctrl; + + dpms_ctrl = 0x80000000; + if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF) + dpms_ctrl |= 0x00000001; + if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF) + dpms_ctrl |= 0x00000004; + + nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000); + nv_mask(device, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl); + nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000); +} + +static bool +nvd0_dac_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (nv_connector && nv_connector->native_mode) { + if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { + int id = adjusted_mode->base.id; + *adjusted_mode = *nv_connector->native_mode; + adjusted_mode->base.id = id; + } + } + + return true; +} + +static void +nvd0_dac_commit(struct drm_encoder *encoder) +{ +} + +static void +nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + u32 syncs, magic, *push; + + syncs = 0x00000001; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + syncs |= 0x00000008; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + syncs |= 0x00000010; + + magic = 0x31ec6000 | (nv_crtc->index << 25); + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + magic |= 0x00000001; + + nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON); + + push = evo_wait(encoder->dev, EVO_MASTER, 8); + if (push) { + evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); + evo_data(push, syncs); + evo_data(push, magic); + evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2); + evo_data(push, 1 << nv_crtc->index); + evo_data(push, 0x00ff); + evo_kick(push, encoder->dev, EVO_MASTER); + } + + nv_encoder->crtc = encoder->crtc; +} + +static void +nvd0_dac_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + u32 *push; + + if (nv_encoder->crtc) { + nvd0_crtc_prepare(nv_encoder->crtc); + + push = evo_wait(dev, EVO_MASTER, 4); + if (push) { + evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + evo_kick(push, dev, EVO_MASTER); + } + + nv_encoder->crtc = NULL; + } +} + +static enum drm_connector_status +nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) +{ + enum drm_connector_status status = connector_status_disconnected; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + int or = nv_encoder->or; + u32 load; + + nv_wr32(device, 0x61a00c + (or * 0x800), 0x00100000); + udelay(9500); + nv_wr32(device, 0x61a00c + (or * 0x800), 0x80000000); + + load = nv_rd32(device, 0x61a00c + (or * 0x800)); + if ((load & 0x38000000) == 0x38000000) + status = connector_status_connected; + + nv_wr32(device, 0x61a00c + (or * 0x800), 0x00000000); + return status; +} + +static void +nvd0_dac_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = { + .dpms = nvd0_dac_dpms, + .mode_fixup = nvd0_dac_mode_fixup, + .prepare = nvd0_dac_disconnect, + .commit = nvd0_dac_commit, + .mode_set = nvd0_dac_mode_set, + .disable = nvd0_dac_disconnect, + .get_crtc = nvd0_display_crtc_get, + .detect = nvd0_dac_detect +}; + +static const struct drm_encoder_funcs nvd0_dac_func = { + .destroy = nvd0_dac_destroy, +}; + +static int +nvd0_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) +{ + struct drm_device *dev = connector->dev; + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + nv_encoder->dcb = dcbe; + nv_encoder->or = ffs(dcbe->or) - 1; + + encoder = to_drm_encoder(nv_encoder); + encoder->possible_crtcs = dcbe->heads; + encoder->possible_clones = 0; + drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC); + drm_encoder_helper_add(encoder, &nvd0_dac_hfunc); + + drm_mode_connector_attach_encoder(connector, encoder); + return 0; +} + +/****************************************************************************** + * Audio + *****************************************************************************/ +static void +nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector; + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + int i, or = nv_encoder->or * 0x30; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (!drm_detect_monitor_audio(nv_connector->edid)) + return; + + nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000001); + + drm_edid_to_eld(&nv_connector->base, nv_connector->edid); + if (nv_connector->base.eld[0]) { + u8 *eld = nv_connector->base.eld; + + for (i = 0; i < eld[2] * 4; i++) + nv_wr32(device, 0x10ec00 + or, (i << 8) | eld[i]); + for (i = eld[2] * 4; i < 0x60; i++) + nv_wr32(device, 0x10ec00 + or, (i << 8) | 0x00); + + nv_mask(device, 0x10ec10 + or, 0x80000002, 0x80000002); + } +} + +static void +nvd0_audio_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + int or = nv_encoder->or * 0x30; + + nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000000); +} + +/****************************************************************************** + * HDMI + *****************************************************************************/ +static void +nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nouveau_connector *nv_connector; + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + int head = nv_crtc->index * 0x800; + u32 rekey = 56; /* binary driver, and tegra constant */ + u32 max_ac_packet; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (!drm_detect_hdmi_monitor(nv_connector->edid)) + return; + + max_ac_packet = mode->htotal - mode->hdisplay; + max_ac_packet -= rekey; + max_ac_packet -= 18; /* constant from tegra */ + max_ac_packet /= 32; + + /* AVI InfoFrame */ + nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000); + nv_wr32(device, 0x61671c + head, 0x000d0282); + nv_wr32(device, 0x616720 + head, 0x0000006f); + nv_wr32(device, 0x616724 + head, 0x00000000); + nv_wr32(device, 0x616728 + head, 0x00000000); + nv_wr32(device, 0x61672c + head, 0x00000000); + nv_mask(device, 0x616714 + head, 0x00000001, 0x00000001); + + /* ??? InfoFrame? */ + nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000); + nv_wr32(device, 0x6167ac + head, 0x00000010); + nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000001); + + /* HDMI_CTRL */ + nv_mask(device, 0x616798 + head, 0x401f007f, 0x40000000 | rekey | + max_ac_packet << 16); + + /* NFI, audio doesn't work without it though.. */ + nv_mask(device, 0x616548 + head, 0x00000070, 0x00000000); + + nvd0_audio_mode_set(encoder, mode); +} + +static void +nvd0_hdmi_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc); + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + int head = nv_crtc->index * 0x800; + + nvd0_audio_disconnect(encoder); + + nv_mask(device, 0x616798 + head, 0x40000000, 0x00000000); + nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000); + nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000); +} + +/****************************************************************************** + * SOR + *****************************************************************************/ +static inline u32 +nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane) +{ + static const u8 nvd0[] = { 16, 8, 0, 24 }; + return nvd0[lane]; +} + +static void +nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern) +{ + struct nouveau_device *device = nouveau_dev(dev); + const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); + const u32 loff = (or * 0x800) + (link * 0x80); + nv_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); +} + +static void +nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb, + u8 lane, u8 swing, u8 preem) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); + const u32 loff = (or * 0x800) + (link * 0x80); + u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane); + u32 mask = 0x000000ff << shift; + u8 *table, *entry, *config = NULL; + + switch (swing) { + case 0: preem += 0; break; + case 1: preem += 4; break; + case 2: preem += 7; break; + case 3: preem += 9; break; + } + + table = nouveau_dp_bios_data(dev, dcb, &entry); + if (table) { + if (table[0] == 0x30) { + config = entry + table[4]; + config += table[5] * preem; + } else + if (table[0] == 0x40) { + config = table + table[1]; + config += table[2] * table[3]; + config += table[6] * preem; + } + } + + if (!config) { + NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n"); + return; + } + + nv_mask(device, 0x61c118 + loff, mask, config[1] << shift); + nv_mask(device, 0x61c120 + loff, mask, config[2] << shift); + nv_mask(device, 0x61c130 + loff, 0x0000ff00, config[3] << 8); + nv_mask(device, 0x61c13c + loff, 0x00000000, 0x00000000); +} + +static void +nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc, + int link_nr, u32 link_bw, bool enhframe) +{ + struct nouveau_device *device = nouveau_dev(dev); + const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); + const u32 loff = (or * 0x800) + (link * 0x80); + const u32 soff = (or * 0x800); + u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & ~0x001f4000; + u32 clksor = nv_rd32(device, 0x612300 + soff) & ~0x007c0000; + u32 script = 0x0000, lane_mask = 0; + u8 *table, *entry; + int i; + + link_bw /= 27000; + + table = nouveau_dp_bios_data(dev, dcb, &entry); + if (table) { + if (table[0] == 0x30) entry = ROMPTR(dev, entry[10]); + else if (table[0] == 0x40) entry = ROMPTR(dev, entry[9]); + else entry = NULL; + + while (entry) { + if (entry[0] >= link_bw) + break; + entry += 3; + } + + nouveau_bios_run_init_table(dev, script, dcb, crtc); + } + + clksor |= link_bw << 18; + dpctrl |= ((1 << link_nr) - 1) << 16; + if (enhframe) + dpctrl |= 0x00004000; + + for (i = 0; i < link_nr; i++) + lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3); + + nv_wr32(device, 0x612300 + soff, clksor); + nv_wr32(device, 0x61c10c + loff, dpctrl); + nv_mask(device, 0x61c130 + loff, 0x0000000f, lane_mask); +} + +static void +nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_output *dcb, + u32 *link_nr, u32 *link_bw) +{ + struct nouveau_device *device = nouveau_dev(dev); + const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); + const u32 loff = (or * 0x800) + (link * 0x80); + const u32 soff = (or * 0x800); + u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & 0x000f0000; + u32 clksor = nv_rd32(device, 0x612300 + soff); + + if (dpctrl > 0x00030000) *link_nr = 4; + else if (dpctrl > 0x00010000) *link_nr = 2; + else *link_nr = 1; + + *link_bw = (clksor & 0x007c0000) >> 18; + *link_bw *= 27000; +} + +static void +nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_output *dcb, + u32 crtc, u32 datarate) +{ + struct nouveau_device *device = nouveau_dev(dev); + const u32 symbol = 100000; + const u32 TU = 64; + u32 link_nr, link_bw; + u64 ratio, value; + + nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw); + + ratio = datarate; + ratio *= symbol; + do_div(ratio, link_nr * link_bw); + + value = (symbol - ratio) * TU; + value *= ratio; + do_div(value, symbol); + do_div(value, symbol); + + value += 5; + value |= 0x08000000; + + nv_wr32(device, 0x616610 + (crtc * 0x800), value); +} + +static void +nvd0_sor_dpms(struct drm_encoder *encoder, int mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + struct drm_encoder *partner; + int or = nv_encoder->or; + u32 dpms_ctrl; + + nv_encoder->last_dpms = mode; + + list_for_each_entry(partner, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_partner = nouveau_encoder(partner); + + if (partner->encoder_type != DRM_MODE_ENCODER_TMDS) + continue; + + if (nv_partner != nv_encoder && + nv_partner->dcb->or == nv_encoder->dcb->or) { + if (nv_partner->last_dpms == DRM_MODE_DPMS_ON) + return; + break; + } + } + + dpms_ctrl = (mode == DRM_MODE_DPMS_ON); + dpms_ctrl |= 0x80000000; + + nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000); + nv_mask(device, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl); + nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000); + nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000); + + if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { + struct dp_train_func func = { + .link_set = nvd0_sor_dp_link_set, + .train_set = nvd0_sor_dp_train_set, + .train_adj = nvd0_sor_dp_train_adj + }; + + nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func); + } +} + +static bool +nvd0_sor_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (nv_connector && nv_connector->native_mode) { + if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { + int id = adjusted_mode->base.id; + *adjusted_mode = *nv_connector->native_mode; + adjusted_mode->base.id = id; + } + } + + return true; +} + +static void +nvd0_sor_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + u32 *push; + + if (nv_encoder->crtc) { + nvd0_crtc_prepare(nv_encoder->crtc); + + push = evo_wait(dev, EVO_MASTER, 4); + if (push) { + evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + evo_kick(push, dev, EVO_MASTER); + } + + nvd0_hdmi_disconnect(encoder); + + nv_encoder->crtc = NULL; + nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; + } +} + +static void +nvd0_sor_prepare(struct drm_encoder *encoder) +{ + nvd0_sor_disconnect(encoder); + if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP) + evo_sync(encoder->dev, EVO_MASTER); +} + +static void +nvd0_sor_commit(struct drm_encoder *encoder) +{ +} + +static void +nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, + struct drm_display_mode *mode) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nouveau_connector *nv_connector; + struct nvbios *bios = &drm->vbios; + u32 mode_ctrl = (1 << nv_crtc->index); + u32 syncs, magic, *push; + u32 or_config; + + syncs = 0x00000001; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + syncs |= 0x00000008; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + syncs |= 0x00000010; + + magic = 0x31ec6000 | (nv_crtc->index << 25); + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + magic |= 0x00000001; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + switch (nv_encoder->dcb->type) { + case DCB_OUTPUT_TMDS: + if (nv_encoder->dcb->sorconf.link & 1) { + if (mode->clock < 165000) + mode_ctrl |= 0x00000100; + else + mode_ctrl |= 0x00000500; + } else { + mode_ctrl |= 0x00000200; + } + + or_config = (mode_ctrl & 0x00000f00) >> 8; + if (mode->clock >= 165000) + or_config |= 0x0100; + + nvd0_hdmi_mode_set(encoder, mode); + break; + case DCB_OUTPUT_LVDS: + or_config = (mode_ctrl & 0x00000f00) >> 8; + if (bios->fp_no_ddc) { + if (bios->fp.dual_link) + or_config |= 0x0100; + if (bios->fp.if_is_24bit) + or_config |= 0x0200; + } else { + if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { + if (((u8 *)nv_connector->edid)[121] == 2) + or_config |= 0x0100; + } else + if (mode->clock >= bios->fp.duallink_transition_clk) { + or_config |= 0x0100; + } + + if (or_config & 0x0100) { + if (bios->fp.strapless_is_24bit & 2) + or_config |= 0x0200; + } else { + if (bios->fp.strapless_is_24bit & 1) + or_config |= 0x0200; + } + + if (nv_connector->base.display_info.bpc == 8) + or_config |= 0x0200; + + } + break; + case DCB_OUTPUT_DP: + if (nv_connector->base.display_info.bpc == 6) { + nv_encoder->dp.datarate = mode->clock * 18 / 8; + syncs |= 0x00000002 << 6; + } else { + nv_encoder->dp.datarate = mode->clock * 24 / 8; + syncs |= 0x00000005 << 6; + } + + if (nv_encoder->dcb->sorconf.link & 1) + mode_ctrl |= 0x00000800; + else + mode_ctrl |= 0x00000900; + + or_config = (mode_ctrl & 0x00000f00) >> 8; + break; + default: + BUG_ON(1); + break; + } + + nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON); + + if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { + nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index, + nv_encoder->dp.datarate); + } + + push = evo_wait(dev, EVO_MASTER, 8); + if (push) { + evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); + evo_data(push, syncs); + evo_data(push, magic); + evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2); + evo_data(push, mode_ctrl); + evo_data(push, or_config); + evo_kick(push, dev, EVO_MASTER); + } + + nv_encoder->crtc = encoder->crtc; +} + +static void +nvd0_sor_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = { + .dpms = nvd0_sor_dpms, + .mode_fixup = nvd0_sor_mode_fixup, + .prepare = nvd0_sor_prepare, + .commit = nvd0_sor_commit, + .mode_set = nvd0_sor_mode_set, + .disable = nvd0_sor_disconnect, + .get_crtc = nvd0_display_crtc_get, +}; + +static const struct drm_encoder_funcs nvd0_sor_func = { + .destroy = nvd0_sor_destroy, +}; + +static int +nvd0_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) +{ + struct drm_device *dev = connector->dev; + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + nv_encoder->dcb = dcbe; + nv_encoder->or = ffs(dcbe->or) - 1; + nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; + + encoder = to_drm_encoder(nv_encoder); + encoder->possible_crtcs = dcbe->heads; + encoder->possible_clones = 0; + drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(encoder, &nvd0_sor_hfunc); + + drm_mode_connector_attach_encoder(connector, encoder); + return 0; +} + +/****************************************************************************** + * IRQ + *****************************************************************************/ +static struct dcb_output * +lookup_dcb(struct drm_device *dev, int id, u32 mc) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + int type, or, i, link = -1; + + if (id < 4) { + type = DCB_OUTPUT_ANALOG; + or = id; + } else { + switch (mc & 0x00000f00) { + case 0x00000000: link = 0; type = DCB_OUTPUT_LVDS; break; + case 0x00000100: link = 0; type = DCB_OUTPUT_TMDS; break; + case 0x00000200: link = 1; type = DCB_OUTPUT_TMDS; break; + case 0x00000500: link = 0; type = DCB_OUTPUT_TMDS; break; + case 0x00000800: link = 0; type = DCB_OUTPUT_DP; break; + case 0x00000900: link = 1; type = DCB_OUTPUT_DP; break; + default: + NV_ERROR(drm, "PDISP: unknown SOR mc 0x%08x\n", mc); + return NULL; + } + + or = id - 4; + } + + for (i = 0; i < drm->vbios.dcb.entries; i++) { + struct dcb_output *dcb = &drm->vbios.dcb.entry[i]; + if (dcb->type == type && (dcb->or & (1 << or)) && + (link < 0 || link == !(dcb->sorconf.link & 1))) + return dcb; + } + + NV_ERROR(drm, "PDISP: DCB for %d/0x%08x not found\n", id, mc); + return NULL; +} + +static void +nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct dcb_output *dcb; + int i; + + for (i = 0; mask && i < 8; i++) { + u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20)); + if (!(mcc & (1 << crtc))) + continue; + + dcb = lookup_dcb(dev, i, mcc); + if (!dcb) + continue; + + nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc); + } + + nv_wr32(device, 0x6101d4, 0x00000000); + nv_wr32(device, 0x6109d4, 0x00000000); + nv_wr32(device, 0x6101d0, 0x80000000); +} + +static void +nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct dcb_output *dcb; + u32 or, tmp, pclk; + int i; + + for (i = 0; mask && i < 8; i++) { + u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20)); + if (!(mcc & (1 << crtc))) + continue; + + dcb = lookup_dcb(dev, i, mcc); + if (!dcb) + continue; + + nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc); + } + + pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000; + NV_DEBUG(drm, "PDISP: crtc %d pclk %d mask 0x%08x\n", + crtc, pclk, mask); + if (pclk && (mask & 0x00010000)) { + nv50_crtc_set_clock(dev, crtc, pclk); + } + + for (i = 0; mask && i < 8; i++) { + u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20)); + u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20)); + if (!(mcp & (1 << crtc))) + continue; + + dcb = lookup_dcb(dev, i, mcp); + if (!dcb) + continue; + or = ffs(dcb->or) - 1; + + nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc); + + nv_wr32(device, 0x612200 + (crtc * 0x800), 0x00000000); + switch (dcb->type) { + case DCB_OUTPUT_ANALOG: + nv_wr32(device, 0x612280 + (or * 0x800), 0x00000000); + break; + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_LVDS: + case DCB_OUTPUT_DP: + if (cfg & 0x00000100) + tmp = 0x00000101; + else + tmp = 0x00000000; + + nv_mask(device, 0x612300 + (or * 0x800), 0x00000707, tmp); + break; + default: + break; + } + + break; + } + + nv_wr32(device, 0x6101d4, 0x00000000); + nv_wr32(device, 0x6109d4, 0x00000000); + nv_wr32(device, 0x6101d0, 0x80000000); +} + +static void +nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct dcb_output *dcb; + int pclk, i; + + pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000; + + for (i = 0; mask && i < 8; i++) { + u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20)); + u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20)); + if (!(mcp & (1 << crtc))) + continue; + + dcb = lookup_dcb(dev, i, mcp); + if (!dcb) + continue; + + nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc); + } + + nv_wr32(device, 0x6101d4, 0x00000000); + nv_wr32(device, 0x6109d4, 0x00000000); + nv_wr32(device, 0x6101d0, 0x80000000); +} + +static void +nvd0_display_bh(unsigned long data) +{ + struct drm_device *dev = (struct drm_device *)data; + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvd0_display *disp = nvd0_display(dev); + u32 mask = 0, crtc = ~0; + int i; + + if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) { + NV_INFO(drm, "PDISP: modeset req %d\n", disp->modeset); + NV_INFO(drm, " STAT: 0x%08x 0x%08x 0x%08x\n", + nv_rd32(device, 0x6101d0), + nv_rd32(device, 0x6101d4), nv_rd32(device, 0x6109d4)); + for (i = 0; i < 8; i++) { + NV_INFO(drm, " %s%d: 0x%08x 0x%08x\n", + i < 4 ? "DAC" : "SOR", i, + nv_rd32(device, 0x640180 + (i * 0x20)), + nv_rd32(device, 0x660180 + (i * 0x20))); + } + } + + while (!mask && ++crtc < dev->mode_config.num_crtc) + mask = nv_rd32(device, 0x6101d4 + (crtc * 0x800)); + + if (disp->modeset & 0x00000001) + nvd0_display_unk1_handler(dev, crtc, mask); + if (disp->modeset & 0x00000002) + nvd0_display_unk2_handler(dev, crtc, mask); + if (disp->modeset & 0x00000004) + nvd0_display_unk4_handler(dev, crtc, mask); +} + +void +nvd0_display_intr(struct drm_device *dev) +{ + struct nvd0_display *disp = nvd0_display(dev); + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + u32 intr = nv_rd32(device, 0x610088); + + if (intr & 0x00000001) { + u32 stat = nv_rd32(device, 0x61008c); + nv_wr32(device, 0x61008c, stat); + intr &= ~0x00000001; + } + + if (intr & 0x00000002) { + u32 stat = nv_rd32(device, 0x61009c); + int chid = ffs(stat) - 1; + if (chid >= 0) { + u32 mthd = nv_rd32(device, 0x6101f0 + (chid * 12)); + u32 data = nv_rd32(device, 0x6101f4 + (chid * 12)); + u32 unkn = nv_rd32(device, 0x6101f8 + (chid * 12)); + + NV_INFO(drm, "EvoCh: chid %d mthd 0x%04x data 0x%08x " + "0x%08x 0x%08x\n", + chid, (mthd & 0x0000ffc), data, mthd, unkn); + nv_wr32(device, 0x61009c, (1 << chid)); + nv_wr32(device, 0x6101f0 + (chid * 12), 0x90000000); + } + + intr &= ~0x00000002; + } + + if (intr & 0x00100000) { + u32 stat = nv_rd32(device, 0x6100ac); + + if (stat & 0x00000007) { + disp->modeset = stat; + tasklet_schedule(&disp->tasklet); + + nv_wr32(device, 0x6100ac, (stat & 0x00000007)); + stat &= ~0x00000007; + } + + if (stat) { + NV_INFO(drm, "PDISP: unknown intr24 0x%08x\n", stat); + nv_wr32(device, 0x6100ac, stat); + } + + intr &= ~0x00100000; + } + + intr &= ~0x0f000000; /* vblank, handled in core */ + if (intr) + NV_INFO(drm, "PDISP: unknown intr 0x%08x\n", intr); +} + +/****************************************************************************** + * Init + *****************************************************************************/ +void +nvd0_display_fini(struct drm_device *dev) +{ + int i; + + /* fini cursors + overlays + flips */ + for (i = 1; i >= 0; i--) { + evo_fini_pio(dev, EVO_CURS(i)); + evo_fini_pio(dev, EVO_OIMM(i)); + evo_fini_dma(dev, EVO_OVLY(i)); + evo_fini_dma(dev, EVO_FLIP(i)); + } + + /* fini master */ + evo_fini_dma(dev, EVO_MASTER); +} + +int +nvd0_display_init(struct drm_device *dev) +{ + struct nvd0_display *disp = nvd0_display(dev); + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + int ret, i; + u32 *push; + + if (nv_rd32(device, 0x6100ac) & 0x00000100) { + nv_wr32(device, 0x6100ac, 0x00000100); + nv_mask(device, 0x6194e8, 0x00000001, 0x00000000); + if (!nv_wait(device, 0x6194e8, 0x00000002, 0x00000000)) { + NV_ERROR(drm, "PDISP: 0x6194e8 0x%08x\n", + nv_rd32(device, 0x6194e8)); + return -EBUSY; + } + } + + /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't + * work at all unless you do the SOR part below. + */ + for (i = 0; i < 3; i++) { + u32 dac = nv_rd32(device, 0x61a000 + (i * 0x800)); + nv_wr32(device, 0x6101c0 + (i * 0x800), dac); + } + + for (i = 0; i < 4; i++) { + u32 sor = nv_rd32(device, 0x61c000 + (i * 0x800)); + nv_wr32(device, 0x6301c4 + (i * 0x800), sor); + } + + for (i = 0; i < dev->mode_config.num_crtc; i++) { + u32 crtc0 = nv_rd32(device, 0x616104 + (i * 0x800)); + u32 crtc1 = nv_rd32(device, 0x616108 + (i * 0x800)); + u32 crtc2 = nv_rd32(device, 0x61610c + (i * 0x800)); + nv_wr32(device, 0x6101b4 + (i * 0x800), crtc0); + nv_wr32(device, 0x6101b8 + (i * 0x800), crtc1); + nv_wr32(device, 0x6101bc + (i * 0x800), crtc2); + } + + /* point at our hash table / objects, enable interrupts */ + nv_wr32(device, 0x610010, (disp->mem->addr >> 8) | 9); + nv_mask(device, 0x6100b0, 0x00000307, 0x00000307); + + /* init master */ + ret = evo_init_dma(dev, EVO_MASTER); + if (ret) + goto error; + + /* init flips + overlays + cursors */ + for (i = 0; i < dev->mode_config.num_crtc; i++) { + if ((ret = evo_init_dma(dev, EVO_FLIP(i))) || + (ret = evo_init_dma(dev, EVO_OVLY(i))) || + (ret = evo_init_pio(dev, EVO_OIMM(i))) || + (ret = evo_init_pio(dev, EVO_CURS(i)))) + goto error; + } + + push = evo_wait(dev, EVO_MASTER, 32); + if (!push) { + ret = -EBUSY; + goto error; + } + evo_mthd(push, 0x0088, 1); + evo_data(push, NvEvoSync); + evo_mthd(push, 0x0084, 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0084, 1); + evo_data(push, 0x80000000); + evo_mthd(push, 0x008c, 1); + evo_data(push, 0x00000000); + evo_kick(push, dev, EVO_MASTER); + +error: + if (ret) + nvd0_display_fini(dev); + return ret; +} + +void +nvd0_display_destroy(struct drm_device *dev) +{ + struct nvd0_display *disp = nvd0_display(dev); + struct pci_dev *pdev = dev->pdev; + int i; + + for (i = 0; i < EVO_DMA_NR; i++) { + struct evo *evo = &disp->evo[i]; + pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle); + } + + nouveau_gpuobj_ref(NULL, &disp->mem); + nouveau_bo_unmap(disp->sync); + nouveau_bo_ref(NULL, &disp->sync); + + nouveau_display(dev)->priv = NULL; + kfree(disp); +} + +int +nvd0_display_create(struct drm_device *dev) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_bar *bar = nouveau_bar(device); + struct nouveau_fb *pfb = nouveau_fb(device); + struct dcb_table *dcb = &drm->vbios.dcb; + struct drm_connector *connector, *tmp; + struct pci_dev *pdev = dev->pdev; + struct nvd0_display *disp; + struct dcb_output *dcbe; + int crtcs, ret, i; + + disp = kzalloc(sizeof(*disp), GFP_KERNEL); + if (!disp) + return -ENOMEM; + + nouveau_display(dev)->priv = disp; + nouveau_display(dev)->dtor = nvd0_display_destroy; + nouveau_display(dev)->init = nvd0_display_init; + nouveau_display(dev)->fini = nvd0_display_fini; + + /* create crtc objects to represent the hw heads */ + crtcs = nv_rd32(device, 0x022448); + for (i = 0; i < crtcs; i++) { + ret = nvd0_crtc_create(dev, i); + if (ret) + goto out; + } + + /* create encoder/connector objects based on VBIOS DCB table */ + for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) { + connector = nouveau_connector_create(dev, dcbe->connector); + if (IS_ERR(connector)) + continue; + + if (dcbe->location != DCB_LOC_ON_CHIP) { + NV_WARN(drm, "skipping off-chip encoder %d/%d\n", + dcbe->type, ffs(dcbe->or) - 1); + continue; + } + + switch (dcbe->type) { + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_LVDS: + case DCB_OUTPUT_DP: + nvd0_sor_create(connector, dcbe); + break; + case DCB_OUTPUT_ANALOG: + nvd0_dac_create(connector, dcbe); + break; + default: + NV_WARN(drm, "skipping unsupported encoder %d/%d\n", + dcbe->type, ffs(dcbe->or) - 1); + continue; + } + } + + /* cull any connectors we created that don't have an encoder */ + list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { + if (connector->encoder_ids[0]) + continue; + + NV_WARN(drm, "%s has no encoders, removing\n", + drm_get_connector_name(connector)); + connector->funcs->destroy(connector); + } + + /* setup interrupt handling */ + tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev); + + /* small shared memory area we use for notifiers and semaphores */ + ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &disp->sync); + if (!ret) { + ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(disp->sync); + if (ret) + nouveau_bo_ref(NULL, &disp->sync); + } + + if (ret) + goto out; + + /* hash table and dma objects for the memory areas we care about */ + ret = nouveau_gpuobj_new(nv_object(device), NULL, 0x4000, 0x10000, + NVOBJ_FLAG_ZERO_ALLOC, &disp->mem); + if (ret) + goto out; + + /* create evo dma channels */ + for (i = 0; i < EVO_DMA_NR; i++) { + struct evo *evo = &disp->evo[i]; + u64 offset = disp->sync->bo.offset; + u32 dmao = 0x1000 + (i * 0x100); + u32 hash = 0x0000 + (i * 0x040); + + evo->idx = i; + evo->sem.offset = EVO_SYNC(evo->idx, 0x00); + evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle); + if (!evo->ptr) { + ret = -ENOMEM; + goto out; + } + + nv_wo32(disp->mem, dmao + 0x00, 0x00000049); + nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8); + nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8); + nv_wo32(disp->mem, dmao + 0x0c, 0x00000000); + nv_wo32(disp->mem, dmao + 0x10, 0x00000000); + nv_wo32(disp->mem, dmao + 0x14, 0x00000000); + nv_wo32(disp->mem, hash + 0x00, NvEvoSync); + nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) | + ((dmao + 0x00) << 9)); + + nv_wo32(disp->mem, dmao + 0x20, 0x00000049); + nv_wo32(disp->mem, dmao + 0x24, 0x00000000); + nv_wo32(disp->mem, dmao + 0x28, (pfb->ram.size - 1) >> 8); + nv_wo32(disp->mem, dmao + 0x2c, 0x00000000); + nv_wo32(disp->mem, dmao + 0x30, 0x00000000); + nv_wo32(disp->mem, dmao + 0x34, 0x00000000); + nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM); + nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) | + ((dmao + 0x20) << 9)); + + nv_wo32(disp->mem, dmao + 0x40, 0x00000009); + nv_wo32(disp->mem, dmao + 0x44, 0x00000000); + nv_wo32(disp->mem, dmao + 0x48, (pfb->ram.size - 1) >> 8); + nv_wo32(disp->mem, dmao + 0x4c, 0x00000000); + nv_wo32(disp->mem, dmao + 0x50, 0x00000000); + nv_wo32(disp->mem, dmao + 0x54, 0x00000000); + nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP); + nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) | + ((dmao + 0x40) << 9)); + + nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009); + nv_wo32(disp->mem, dmao + 0x64, 0x00000000); + nv_wo32(disp->mem, dmao + 0x68, (pfb->ram.size - 1) >> 8); + nv_wo32(disp->mem, dmao + 0x6c, 0x00000000); + nv_wo32(disp->mem, dmao + 0x70, 0x00000000); + nv_wo32(disp->mem, dmao + 0x74, 0x00000000); + nv_wo32(disp->mem, hash + 0x18, NvEvoFB32); + nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) | + ((dmao + 0x60) << 9)); + } + + bar->flush(bar); + +out: + if (ret) + nvd0_display_destroy(dev); + return ret; +} diff --git a/trunk/drivers/gpu/drm/radeon/atombios_dp.c b/trunk/drivers/gpu/drm/radeon/atombios_dp.c index 064023bed480..d5699fe4f1e8 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_dp.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_dp.c @@ -34,7 +34,8 @@ /* move these to drm_dp_helper.c/h */ #define DP_LINK_CONFIGURATION_SIZE 9 -#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE +#define DP_LINK_STATUS_SIZE 6 +#define DP_DPCD_SIZE 8 static char *voltage_names[] = { "0.4V", "0.6V", "0.8V", "1.2V" @@ -289,6 +290,78 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, /***** general DP utility functions *****/ +static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_LANE0_1_STATUS + (lane >> 1); + int s = (lane & 1) * 4; + u8 l = dp_link_status(link_status, i); + return (l >> s) & 0xf; +} + +static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + int lane; + u8 lane_status; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = dp_get_lane_status(link_status, lane); + if ((lane_status & DP_LANE_CR_DONE) == 0) + return false; + } + return true; +} + +static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + u8 lane_align; + u8 lane_status; + int lane; + + lane_align = dp_link_status(link_status, + DP_LANE_ALIGN_STATUS_UPDATED); + if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) + return false; + for (lane = 0; lane < lane_count; lane++) { + lane_status = dp_get_lane_status(link_status, lane); + if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS) + return false; + } + return true; +} + +static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], + int lane) + +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : + DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); + u8 l = dp_link_status(link_status, i); + + return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; +} + +static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : + DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); + u8 l = dp_link_status(link_status, i); + + return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; +} + #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 #define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5 @@ -301,8 +374,8 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], int lane; for (lane = 0; lane < lane_count; lane++) { - u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane); - u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + u8 this_v = dp_get_adjust_request_voltage(link_status, lane); + u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane); DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n", lane, @@ -347,6 +420,37 @@ static int dp_get_max_dp_pix_clock(int link_rate, return (link_rate * lane_num * 8) / bpp; } +static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE]) +{ + switch (dpcd[DP_MAX_LINK_RATE]) { + case DP_LINK_BW_1_62: + default: + return 162000; + case DP_LINK_BW_2_7: + return 270000; + case DP_LINK_BW_5_4: + return 540000; + } +} + +static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE]) +{ + return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; +} + +static u8 dp_get_dp_link_rate_coded(int link_rate) +{ + switch (link_rate) { + case 162000: + default: + return DP_LINK_BW_1_62; + case 270000: + return DP_LINK_BW_2_7; + case 540000: + return DP_LINK_BW_5_4; + } +} + /***** radeon specific DP functions *****/ /* First get the min lane# when low rate is used according to pixel clock @@ -358,8 +462,8 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector, int pix_clock) { int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); - int max_link_rate = drm_dp_max_link_rate(dpcd); - int max_lane_num = drm_dp_max_lane_count(dpcd); + int max_link_rate = dp_get_max_link_rate(dpcd); + int max_lane_num = dp_get_max_lane_number(dpcd); int lane_num; int max_dp_pix_clock; @@ -396,7 +500,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector, return 540000; } - return drm_dp_max_link_rate(dpcd); + return dp_get_max_link_rate(dpcd); } static u8 radeon_dp_encoder_service(struct radeon_device *rdev, @@ -447,15 +551,14 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector) bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; - u8 msg[DP_DPCD_SIZE]; + u8 msg[25]; int ret, i; - ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, - DP_DPCD_SIZE, 0); + ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0); if (ret > 0) { - memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); + memcpy(dig_connector->dpcd, msg, 8); DRM_DEBUG_KMS("DPCD: "); - for (i = 0; i < DP_DPCD_SIZE; i++) + for (i = 0; i < 8; i++) DRM_DEBUG_KMS("%02x ", msg[i]); DRM_DEBUG_KMS("\n"); @@ -561,7 +664,7 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) if (!radeon_dp_get_link_status(radeon_connector, link_status)) return false; - if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) + if (dp_channel_eq_ok(link_status, dig->dp_lane_count)) return false; return true; } @@ -574,8 +677,9 @@ struct radeon_dp_link_train_info { int enc_id; int dp_clock; int dp_lane_count; + int rd_interval; bool tp3_supported; - u8 dpcd[DP_RECEIVER_CAP_SIZE]; + u8 dpcd[8]; u8 train_set[4]; u8 link_status[DP_LINK_STATUS_SIZE]; u8 tries; @@ -661,7 +765,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); /* set the link rate on the sink */ - tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock); + tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock); radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); /* start training on the source */ @@ -717,14 +821,17 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info) dp_info->tries = 0; voltage = 0xff; while (1) { - drm_dp_link_train_clock_recovery_delay(dp_info->dpcd); + if (dp_info->rd_interval == 0) + udelay(100); + else + mdelay(dp_info->rd_interval * 4); if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { DRM_ERROR("displayport link status failed\n"); break; } - if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { + if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { clock_recovery = true; break; } @@ -779,14 +886,17 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info) dp_info->tries = 0; channel_eq = false; while (1) { - drm_dp_link_train_channel_eq_delay(dp_info->dpcd); + if (dp_info->rd_interval == 0) + udelay(400); + else + mdelay(dp_info->rd_interval * 4); if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { DRM_ERROR("displayport link status failed\n"); break; } - if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { + if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { channel_eq = true; break; } @@ -864,13 +974,14 @@ void radeon_dp_link_train(struct drm_encoder *encoder, else dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; + dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL); tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT); if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) dp_info.tp3_supported = true; else dp_info.tp3_supported = false; - memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); + memcpy(dp_info.dpcd, dig_connector->dpcd, 8); dp_info.rdev = rdev; dp_info.encoder = encoder; dp_info.connector = connector; diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c index 169ecc9628ea..ee06c8781cd4 100644 --- a/trunk/drivers/gpu/drm/radeon/r600.c +++ b/trunk/drivers/gpu/drm/radeon/r600.c @@ -1370,6 +1370,29 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) return radeon_ring_test_lockup(rdev, ring); } +/** + * r600_dma_is_lockup - Check if the DMA engine is locked up + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Check if the async DMA engine is locked up (r6xx-evergreen). + * Returns true if the engine appears to be locked up, false if not. + */ +bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) +{ + u32 dma_status_reg; + + dma_status_reg = RREG32(DMA_STATUS_REG); + if (dma_status_reg & DMA_IDLE) { + radeon_ring_lockup_update(ring); + return false; + } + /* force ring activities */ + radeon_ring_force_activity(rdev, ring); + return radeon_ring_test_lockup(rdev, ring); +} + int r600_asic_reset(struct radeon_device *rdev) { return r600_gpu_soft_reset(rdev); @@ -1424,7 +1447,13 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev, int r600_count_pipe_bits(uint32_t val) { - return hweight32(val); + int i, ret = 0; + + for (i = 0; i < 32; i++) { + ret += val & 1; + val >>= 1; + } + return ret; } static void r600_gpu_init(struct radeon_device *rdev) @@ -1588,6 +1617,7 @@ static void r600_gpu_init(struct radeon_device *rdev) WREG32(GB_TILING_CONFIG, tiling_config); WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); + WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff); tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); @@ -1865,6 +1895,7 @@ void r600_cp_stop(struct radeon_device *rdev) radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); WREG32(SCRATCH_UMSK, 0); + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; } int r600_init_microcode(struct radeon_device *rdev) @@ -2190,6 +2221,128 @@ void r600_cp_fini(struct radeon_device *rdev) radeon_scratch_free(rdev, ring->rptr_save_reg); } +/* + * DMA + * Starting with R600, the GPU has an asynchronous + * DMA engine. The programming model is very similar + * to the 3D engine (ring buffer, IBs, etc.), but the + * DMA controller has it's own packet format that is + * different form the PM4 format used by the 3D engine. + * It supports copying data, writing embedded data, + * solid fills, and a number of other things. It also + * has support for tiling/detiling of buffers. + */ +/** + * r600_dma_stop - stop the async dma engine + * + * @rdev: radeon_device pointer + * + * Stop the async dma engine (r6xx-evergreen). + */ +void r600_dma_stop(struct radeon_device *rdev) +{ + u32 rb_cntl = RREG32(DMA_RB_CNTL); + + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + + rb_cntl &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL, rb_cntl); + + rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; +} + +/** + * r600_dma_resume - setup and start the async dma engine + * + * @rdev: radeon_device pointer + * + * Set up the DMA ring buffer and enable it. (r6xx-evergreen). + * Returns 0 for success, error for failure. + */ +int r600_dma_resume(struct radeon_device *rdev) +{ + struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; + u32 rb_cntl, dma_cntl; + u32 rb_bufsz; + int r; + + /* Reset dma */ + if (rdev->family >= CHIP_RV770) + WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); + else + WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); + RREG32(SRBM_SOFT_RESET); + udelay(50); + WREG32(SRBM_SOFT_RESET, 0); + + WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); + WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); + + /* Set ring buffer size in dwords */ + rb_bufsz = drm_order(ring->ring_size / 4); + rb_cntl = rb_bufsz << 1; +#ifdef __BIG_ENDIAN + rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; +#endif + WREG32(DMA_RB_CNTL, rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(DMA_RB_RPTR, 0); + WREG32(DMA_RB_WPTR, 0); + + /* set the wb address whether it's enabled or not */ + WREG32(DMA_RB_RPTR_ADDR_HI, + upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF); + WREG32(DMA_RB_RPTR_ADDR_LO, + ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC)); + + if (rdev->wb.enabled) + rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; + + WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); + + /* enable DMA IBs */ + WREG32(DMA_IB_CNTL, DMA_IB_ENABLE); + + dma_cntl = RREG32(DMA_CNTL); + dma_cntl &= ~CTXEMPTY_INT_ENABLE; + WREG32(DMA_CNTL, dma_cntl); + + if (rdev->family >= CHIP_RV770) + WREG32(DMA_MODE, 1); + + ring->wptr = 0; + WREG32(DMA_RB_WPTR, ring->wptr << 2); + + ring->rptr = RREG32(DMA_RB_RPTR) >> 2; + + WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); + + ring->ready = true; + + r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring); + if (r) { + ring->ready = false; + return r; + } + + radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); + + return 0; +} + +/** + * r600_dma_fini - tear down the async dma engine + * + * @rdev: radeon_device pointer + * + * Stop the async dma engine and free the ring (r6xx-evergreen). + */ +void r600_dma_fini(struct radeon_device *rdev) +{ + r600_dma_stop(rdev); + radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); +} /* * GPU scratch registers helpers function. @@ -2246,6 +2399,64 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) return r; } +/** + * r600_dma_ring_test - simple async dma engine test + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Test the DMA engine by writing using it to write an + * value to memory. (r6xx-SI). + * Returns 0 for success, error for failure. + */ +int r600_dma_ring_test(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + unsigned i; + int r; + void __iomem *ptr = (void *)rdev->vram_scratch.ptr; + u32 tmp; + + if (!ptr) { + DRM_ERROR("invalid vram scratch pointer\n"); + return -EINVAL; + } + + tmp = 0xCAFEDEAD; + writel(tmp, ptr); + + r = radeon_ring_lock(rdev, ring, 4); + if (r) { + DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); + return r; + } + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); + radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); + radeon_ring_write(ring, 0xDEADBEEF); + radeon_ring_unlock_commit(rdev, ring); + + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = readl(ptr); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i < rdev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + } else { + DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", + ring->idx, tmp); + r = -EINVAL; + } + return r; +} + +/* + * CP fences/semaphores + */ + void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { @@ -2309,6 +2520,58 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); } +/* + * DMA fences/semaphores + */ + +/** + * r600_dma_fence_ring_emit - emit a fence on the DMA ring + * + * @rdev: radeon_device pointer + * @fence: radeon fence object + * + * Add a DMA fence packet to the ring to write + * the fence seq number and DMA trap packet to generate + * an interrupt if needed (r6xx-r7xx). + */ +void r600_dma_fence_ring_emit(struct radeon_device *rdev, + struct radeon_fence *fence) +{ + struct radeon_ring *ring = &rdev->ring[fence->ring]; + u64 addr = rdev->fence_drv[fence->ring].gpu_addr; + /* write the fence */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); + radeon_ring_write(ring, addr & 0xfffffffc); + radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); + radeon_ring_write(ring, fence->seq); + /* generate an interrupt */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); +} + +/** + * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * @semaphore: radeon semaphore object + * @emit_wait: wait or signal semaphore + * + * Add a DMA semaphore packet to the ring wait on or signal + * other rings (r6xx-SI). + */ +void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait) +{ + u64 addr = semaphore->gpu_addr; + u32 s = emit_wait ? 0 : 1; + + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); + radeon_ring_write(ring, addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(addr) & 0xff); +} + int r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, @@ -2328,6 +2591,80 @@ int r600_copy_blit(struct radeon_device *rdev, return 0; } +/** + * r600_copy_dma - copy pages using the DMA engine + * + * @rdev: radeon_device pointer + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @num_gpu_pages: number of GPU pages to xfer + * @fence: radeon fence object + * + * Copy GPU paging using the DMA engine (r6xx-r7xx). + * Used by the radeon ttm implementation to move pages if + * registered as the asic copy callback. + */ +int r600_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence) +{ + struct radeon_semaphore *sem = NULL; + int ring_index = rdev->asic->copy.dma_ring_index; + struct radeon_ring *ring = &rdev->ring[ring_index]; + u32 size_in_dw, cur_size_in_dw; + int i, num_loops; + int r = 0; + + r = radeon_semaphore_create(rdev, &sem); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + return r; + } + + size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; + num_loops = DIV_ROUND_UP(size_in_dw, 0xffff); + r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + radeon_semaphore_free(rdev, &sem, NULL); + return r; + } + + if (radeon_fence_need_sync(*fence, ring->idx)) { + radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, + ring->idx); + radeon_fence_note_sync(*fence, ring->idx); + } else { + radeon_semaphore_free(rdev, &sem, NULL); + } + + for (i = 0; i < num_loops; i++) { + cur_size_in_dw = size_in_dw; + if (cur_size_in_dw > 0xFFFF) + cur_size_in_dw = 0xFFFF; + size_in_dw -= cur_size_in_dw; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); + radeon_ring_write(ring, dst_offset & 0xfffffffc); + radeon_ring_write(ring, src_offset & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); + radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); + src_offset += cur_size_in_dw * 4; + dst_offset += cur_size_in_dw * 4; + } + + r = radeon_fence_emit(rdev, fence, ring->idx); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return r; + } + + radeon_ring_unlock_commit(rdev, ring); + radeon_semaphore_free(rdev, &sem, *fence); + + return r; +} + int r600_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t tiling_flags, uint32_t pitch, uint32_t offset, uint32_t obj_size) @@ -2343,7 +2680,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg) static int r600_startup(struct radeon_device *rdev) { - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; + struct radeon_ring *ring; int r; /* enable pcie gen2 link */ @@ -2388,6 +2725,12 @@ static int r600_startup(struct radeon_device *rdev) return r; } + r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); + if (r) { + dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); + return r; + } + /* Enable IRQ */ r = r600_irq_init(rdev); if (r) { @@ -2397,12 +2740,20 @@ static int r600_startup(struct radeon_device *rdev) } r600_irq_set(rdev); + ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, R600_CP_RB_RPTR, R600_CP_RB_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); + if (r) + return r; + ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; + r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, + DMA_RB_RPTR, DMA_RB_WPTR, + 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); if (r) return r; + r = r600_cp_load_microcode(rdev); if (r) return r; @@ -2410,6 +2761,10 @@ static int r600_startup(struct radeon_device *rdev) if (r) return r; + r = r600_dma_resume(rdev); + if (r) + return r; + r = radeon_ib_pool_init(rdev); if (r) { dev_err(rdev->dev, "IB initialization failed (%d).\n", r); @@ -2465,7 +2820,7 @@ int r600_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); r600_cp_stop(rdev); - rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; + r600_dma_stop(rdev); r600_irq_suspend(rdev); radeon_wb_disable(rdev); r600_pcie_gart_disable(rdev); @@ -2538,6 +2893,9 @@ int r600_init(struct radeon_device *rdev) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); + rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; + r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); + rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); @@ -2550,6 +2908,7 @@ int r600_init(struct radeon_device *rdev) if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r600_cp_fini(rdev); + r600_dma_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); @@ -2566,6 +2925,7 @@ void r600_fini(struct radeon_device *rdev) r600_audio_fini(rdev); r600_blit_fini(rdev); r600_cp_fini(rdev); + r600_dma_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); @@ -2668,6 +3028,104 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) return r; } +/** + * r600_dma_ib_test - test an IB on the DMA engine + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Test a simple IB in the DMA ring (r6xx-SI). + * Returns 0 on success, error on failure. + */ +int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) +{ + struct radeon_ib ib; + unsigned i; + int r; + void __iomem *ptr = (void *)rdev->vram_scratch.ptr; + u32 tmp = 0; + + if (!ptr) { + DRM_ERROR("invalid vram scratch pointer\n"); + return -EINVAL; + } + + tmp = 0xCAFEDEAD; + writel(tmp, ptr); + + r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); + if (r) { + DRM_ERROR("radeon: failed to get ib (%d).\n", r); + return r; + } + + ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); + ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; + ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; + ib.ptr[3] = 0xDEADBEEF; + ib.length_dw = 4; + + r = radeon_ib_schedule(rdev, &ib, NULL); + if (r) { + radeon_ib_free(rdev, &ib); + DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); + return r; + } + r = radeon_fence_wait(ib.fence, false); + if (r) { + DRM_ERROR("radeon: fence wait failed (%d).\n", r); + return r; + } + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = readl(ptr); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + if (i < rdev->usec_timeout) { + DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); + } else { + DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); + r = -EINVAL; + } + radeon_ib_free(rdev, &ib); + return r; +} + +/** + * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine + * + * @rdev: radeon_device pointer + * @ib: IB object to schedule + * + * Schedule an IB in the DMA ring (r6xx-r7xx). + */ +void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) +{ + struct radeon_ring *ring = &rdev->ring[ib->ring]; + + if (rdev->wb.enabled) { + u32 next_rptr = ring->wptr + 4; + while ((next_rptr & 7) != 5) + next_rptr++; + next_rptr += 3; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); + radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); + radeon_ring_write(ring, next_rptr); + } + + /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. + * Pad as necessary with NOPs. + */ + while ((ring->wptr & 7) != 5) + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); + radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); + radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF)); + +} + /* * Interrupts * @@ -2859,6 +3317,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev) u32 tmp; WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; + WREG32(DMA_CNTL, tmp); WREG32(GRBM_INT_CNTL, 0); WREG32(DxMODE_INT_MASK, 0); WREG32(D1GRPH_INTERRUPT_CONTROL, 0); @@ -3000,6 +3460,7 @@ int r600_irq_set(struct radeon_device *rdev) u32 grbm_int_cntl = 0; u32 hdmi0, hdmi1; u32 d1grph = 0, d2grph = 0; + u32 dma_cntl; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -3034,12 +3495,19 @@ int r600_irq_set(struct radeon_device *rdev) hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; } + dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { DRM_DEBUG("r600_irq_set: sw int\n"); cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE; } + + if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { + DRM_DEBUG("r600_irq_set: sw int dma\n"); + dma_cntl |= TRAP_ENABLE; + } + if (rdev->irq.crtc_vblank_int[0] || atomic_read(&rdev->irq.pflip[0])) { DRM_DEBUG("r600_irq_set: vblank 0\n"); @@ -3084,6 +3552,7 @@ int r600_irq_set(struct radeon_device *rdev) } WREG32(CP_INT_CNTL, cp_int_cntl); + WREG32(DMA_CNTL, dma_cntl); WREG32(DxMODE_INT_MASK, mode_int); WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); @@ -3463,6 +3932,10 @@ int r600_irq_process(struct radeon_device *rdev) DRM_DEBUG("IH: CP EOP\n"); radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); break; + case 224: /* DMA trap event */ + DRM_DEBUG("IH: DMA trap\n"); + radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); + break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: GUI idle\n"); break; diff --git a/trunk/drivers/gpu/drm/radeon/r600_cp.c b/trunk/drivers/gpu/drm/radeon/r600_cp.c index be85f75aedda..2514123d2d00 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_cp.c +++ b/trunk/drivers/gpu/drm/radeon/r600_cp.c @@ -721,7 +721,12 @@ static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes, static int r600_count_pipe_bits(uint32_t val) { - return hweight32(val); + int i, ret = 0; + for (i = 0; i < 32; i++) { + ret += val & 1; + val >>= 1; + } + return ret; } static void r600_gfx_init(struct drm_device *dev, diff --git a/trunk/drivers/gpu/drm/radeon/r600d.h b/trunk/drivers/gpu/drm/radeon/r600d.h index fa6f37099ba9..a596c554a3a0 100644 --- a/trunk/drivers/gpu/drm/radeon/r600d.h +++ b/trunk/drivers/gpu/drm/radeon/r600d.h @@ -590,9 +590,59 @@ #define WAIT_2D_IDLECLEAN_bit (1 << 16) #define WAIT_3D_IDLECLEAN_bit (1 << 17) +/* async DMA */ +#define DMA_TILING_CONFIG 0x3ec4 +#define DMA_CONFIG 0x3e4c + +#define DMA_RB_CNTL 0xd000 +# define DMA_RB_ENABLE (1 << 0) +# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */ +# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */ +# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12) +# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */ +# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */ +#define DMA_RB_BASE 0xd004 +#define DMA_RB_RPTR 0xd008 +#define DMA_RB_WPTR 0xd00c + +#define DMA_RB_RPTR_ADDR_HI 0xd01c +#define DMA_RB_RPTR_ADDR_LO 0xd020 + +#define DMA_IB_CNTL 0xd024 +# define DMA_IB_ENABLE (1 << 0) +# define DMA_IB_SWAP_ENABLE (1 << 4) +#define DMA_IB_RPTR 0xd028 +#define DMA_CNTL 0xd02c +# define TRAP_ENABLE (1 << 0) +# define SEM_INCOMPLETE_INT_ENABLE (1 << 1) +# define SEM_WAIT_INT_ENABLE (1 << 2) +# define DATA_SWAP_ENABLE (1 << 3) +# define FENCE_SWAP_ENABLE (1 << 4) +# define CTXEMPTY_INT_ENABLE (1 << 28) +#define DMA_STATUS_REG 0xd034 +# define DMA_IDLE (1 << 0) +#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044 +#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048 +#define DMA_MODE 0xd0bc + +/* async DMA packets */ +#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ + (((t) & 0x1) << 23) | \ + (((s) & 0x1) << 22) | \ + (((n) & 0xFFFF) << 0)) +/* async DMA Packet types */ +#define DMA_PACKET_WRITE 0x2 +#define DMA_PACKET_COPY 0x3 +#define DMA_PACKET_INDIRECT_BUFFER 0x4 +#define DMA_PACKET_SEMAPHORE 0x5 +#define DMA_PACKET_FENCE 0x6 +#define DMA_PACKET_TRAP 0x7 +#define DMA_PACKET_CONSTANT_FILL 0xd /* 7xx only */ +#define DMA_PACKET_NOP 0xf + #define IH_RB_CNTL 0x3e00 # define IH_RB_ENABLE (1 << 0) -# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ +# define IH_RB_SIZE(x) ((x) << 1) /* log2 */ # define IH_RB_FULL_DRAIN_ENABLE (1 << 6) # define IH_WPTR_WRITEBACK_ENABLE (1 << 8) # define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ @@ -637,7 +687,9 @@ #define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20 #define SRBM_SOFT_RESET 0xe60 +# define SOFT_RESET_DMA (1 << 12) # define SOFT_RESET_RLC (1 << 13) +# define RV770_SOFT_RESET_DMA (1 << 20) #define CP_INT_CNTL 0xc124 # define CNTX_BUSY_INT_ENABLE (1 << 19) diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h index 8c42d54c2e26..461bf53709f5 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon.h +++ b/trunk/drivers/gpu/drm/radeon/radeon.h @@ -109,7 +109,7 @@ extern int radeon_lockup_timeout; #define RADEON_BIOS_NUM_SCRATCH 8 /* max number of rings */ -#define RADEON_NUM_RINGS 3 +#define RADEON_NUM_RINGS 4 /* fence seq are set to this number when signaled */ #define RADEON_FENCE_SIGNALED_SEQ 0LL @@ -122,6 +122,9 @@ extern int radeon_lockup_timeout; #define CAYMAN_RING_TYPE_CP1_INDEX 1 #define CAYMAN_RING_TYPE_CP2_INDEX 2 +/* R600+ has an async dma ring */ +#define R600_RING_TYPE_DMA_INDEX 3 + /* hardcode those limit for now */ #define RADEON_VA_IB_OFFSET (1 << 20) #define RADEON_VA_RESERVED_SIZE (8 << 20) @@ -787,6 +790,11 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigne void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); +/* r600 async dma */ +void r600_dma_stop(struct radeon_device *rdev); +int r600_dma_resume(struct radeon_device *rdev); +void r600_dma_fini(struct radeon_device *rdev); + /* * CS. */ @@ -883,6 +891,7 @@ struct radeon_wb { #define RADEON_WB_CP_RPTR_OFFSET 1024 #define RADEON_WB_CP1_RPTR_OFFSET 1280 #define RADEON_WB_CP2_RPTR_OFFSET 1536 +#define R600_WB_DMA_RPTR_OFFSET 1792 #define R600_WB_IH_WPTR_OFFSET 2048 #define R600_WB_EVENT_OFFSET 3072 diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.c b/trunk/drivers/gpu/drm/radeon/radeon_asic.c index 654520b95ab7..3cf9b29fb53f 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.c @@ -947,6 +947,15 @@ static struct radeon_asic r600_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &r600_gpu_is_lockup, + }, + [R600_RING_TYPE_DMA_INDEX] = { + .ib_execute = &r600_dma_ring_ib_execute, + .emit_fence = &r600_dma_fence_ring_emit, + .emit_semaphore = &r600_dma_semaphore_ring_emit, + .cs_parse = NULL, + .ring_test = &r600_dma_ring_test, + .ib_test = &r600_dma_ib_test, + .is_lockup = &r600_dma_is_lockup, } }, .irq = { @@ -963,8 +972,8 @@ static struct radeon_asic r600_asic = { .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, - .dma = NULL, - .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, + .dma = &r600_copy_dma, + .dma_ring_index = R600_RING_TYPE_DMA_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, @@ -1022,6 +1031,15 @@ static struct radeon_asic rs780_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &r600_gpu_is_lockup, + }, + [R600_RING_TYPE_DMA_INDEX] = { + .ib_execute = &r600_dma_ring_ib_execute, + .emit_fence = &r600_dma_fence_ring_emit, + .emit_semaphore = &r600_dma_semaphore_ring_emit, + .cs_parse = NULL, + .ring_test = &r600_dma_ring_test, + .ib_test = &r600_dma_ib_test, + .is_lockup = &r600_dma_is_lockup, } }, .irq = { @@ -1038,8 +1056,8 @@ static struct radeon_asic rs780_asic = { .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, - .dma = NULL, - .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, + .dma = &r600_copy_dma, + .dma_ring_index = R600_RING_TYPE_DMA_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, @@ -1097,6 +1115,15 @@ static struct radeon_asic rv770_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &r600_gpu_is_lockup, + }, + [R600_RING_TYPE_DMA_INDEX] = { + .ib_execute = &r600_dma_ring_ib_execute, + .emit_fence = &r600_dma_fence_ring_emit, + .emit_semaphore = &r600_dma_semaphore_ring_emit, + .cs_parse = NULL, + .ring_test = &r600_dma_ring_test, + .ib_test = &r600_dma_ib_test, + .is_lockup = &r600_dma_is_lockup, } }, .irq = { @@ -1113,8 +1140,8 @@ static struct radeon_asic rv770_asic = { .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, - .dma = NULL, - .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, + .dma = &r600_copy_dma, + .dma_ring_index = R600_RING_TYPE_DMA_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.h b/trunk/drivers/gpu/drm/radeon/radeon_asic.h index 5e3a0e5c6be1..70a5b1f0e43e 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.h @@ -309,6 +309,14 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *cp, struct radeon_semaphore *semaphore, bool emit_wait); +void r600_dma_fence_ring_emit(struct radeon_device *rdev, + struct radeon_fence *fence); +void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait); +void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); +bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); int r600_asic_reset(struct radeon_device *rdev); int r600_set_surface_reg(struct radeon_device *rdev, int reg, @@ -316,11 +324,16 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t offset, uint32_t obj_size); void r600_clear_surface_reg(struct radeon_device *rdev, int reg); int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); +int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); +int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); int r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence); +int r600_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, struct radeon_fence **fence); void r600_hpd_init(struct radeon_device *rdev); void r600_hpd_fini(struct radeon_device *rdev); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c index 47bf162ab9c6..b884c362a8c2 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1599,7 +1599,7 @@ radeon_add_atom_connector(struct drm_device *dev, connector->interlace_allowed = true; connector->doublescan_allowed = true; radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); break; @@ -1608,13 +1608,13 @@ radeon_add_atom_connector(struct drm_device *dev, case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: case DRM_MODE_CONNECTOR_DisplayPort: - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_vborder_property, 0); subpixel_order = SubPixelHorizontalRGB; @@ -1625,14 +1625,14 @@ radeon_add_atom_connector(struct drm_device *dev, connector->doublescan_allowed = false; if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); } break; case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_eDP: - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; @@ -1651,7 +1651,7 @@ radeon_add_atom_connector(struct drm_device *dev, DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); /* no HPD on analog connectors */ @@ -1669,7 +1669,7 @@ radeon_add_atom_connector(struct drm_device *dev, DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); /* no HPD on analog connectors */ @@ -1692,23 +1692,23 @@ radeon_add_atom_connector(struct drm_device *dev, DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } subpixel_order = SubPixelHorizontalRGB; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); if (ASIC_IS_AVIVO(rdev)) { - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_vborder_property, 0); } if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); } @@ -1732,17 +1732,17 @@ radeon_add_atom_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); if (ASIC_IS_AVIVO(rdev)) { - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_vborder_property, 0); } @@ -1771,17 +1771,17 @@ radeon_add_atom_connector(struct drm_device *dev, DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } subpixel_order = SubPixelHorizontalRGB; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); if (ASIC_IS_AVIVO(rdev)) { - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_OFF); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_vborder_property, 0); } @@ -1806,7 +1806,7 @@ radeon_add_atom_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; @@ -1819,10 +1819,10 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.tv_std_property, radeon_atombios_get_tv_info(rdev)); /* no HPD on analog connectors */ @@ -1843,7 +1843,7 @@ radeon_add_atom_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; @@ -1922,7 +1922,7 @@ radeon_add_legacy_connector(struct drm_device *dev, DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); /* no HPD on analog connectors */ @@ -1940,7 +1940,7 @@ radeon_add_legacy_connector(struct drm_device *dev, DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); /* no HPD on analog connectors */ @@ -1959,7 +1959,7 @@ radeon_add_legacy_connector(struct drm_device *dev, } if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); } @@ -1983,10 +1983,10 @@ radeon_add_legacy_connector(struct drm_device *dev, */ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) radeon_connector->dac_load_detect = false; - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, radeon_connector->dac_load_detect); - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.tv_std_property, radeon_combios_get_tv_info(rdev)); /* no HPD on analog connectors */ @@ -2002,7 +2002,7 @@ radeon_add_legacy_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } - drm_object_attach_property(&radeon_connector->base.base, + drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_drv.c b/trunk/drivers/gpu/drm/radeon/radeon_drv.c index 91b64278c4ff..d5ab55de44c0 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_drv.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_drv.c @@ -282,15 +282,12 @@ static struct drm_driver driver_old = { static struct drm_driver kms_driver; -static int radeon_kick_out_firmware_fb(struct pci_dev *pdev) +static void radeon_kick_out_firmware_fb(struct pci_dev *pdev) { struct apertures_struct *ap; bool primary = false; ap = alloc_apertures(1); - if (!ap) - return -ENOMEM; - ap->ranges[0].base = pci_resource_start(pdev, 0); ap->ranges[0].size = pci_resource_len(pdev, 0); @@ -299,19 +296,13 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev) #endif remove_conflicting_framebuffers(ap, "radeondrmfb", primary); kfree(ap); - - return 0; } static int __devinit radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - int ret; - /* Get rid of things like offb */ - ret = radeon_kick_out_firmware_fb(pdev); - if (ret) - return ret; + radeon_kick_out_firmware_fb(pdev); return drm_get_pci_dev(pdev, ent, &kms_driver); } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gart.c b/trunk/drivers/gpu/drm/radeon/radeon_gart.c index 6e24f84755b5..4debd60e5aa6 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gart.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gart.c @@ -1237,6 +1237,7 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev, { struct radeon_bo_va *bo_va; + BUG_ON(!atomic_read(&bo->tbo.reserved)); list_for_each_entry(bo_va, &bo->va, bo_list) { bo_va->valid = false; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_mode.h b/trunk/drivers/gpu/drm/radeon/radeon_mode.h index d818b503b42f..92c5f473cf08 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_mode.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_mode.h @@ -427,7 +427,7 @@ struct radeon_connector_atom_dig { uint32_t igp_lane_info; /* displayport */ struct radeon_i2c_chan *dp_i2c_bus; - u8 dpcd[DP_RECEIVER_CAP_SIZE]; + u8 dpcd[8]; u8 dp_sink_type; int dp_clock; int dp_lane_count; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_object.c b/trunk/drivers/gpu/drm/radeon/radeon_object.c index bfb332e616dc..3f9f3bbc4681 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_object.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_object.c @@ -150,7 +150,7 @@ int radeon_bo_create(struct radeon_device *rdev, /* Kernel allocation are uninterruptible */ down_read(&rdev->pm.mclk_lock); r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, - &bo->placement, page_align, !kernel, NULL, + &bo->placement, page_align, 0, !kernel, NULL, acc_size, sg, &radeon_ttm_bo_destroy); up_read(&rdev->pm.mclk_lock); if (unlikely(r != 0)) { @@ -250,7 +250,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, } for (i = 0; i < bo->placement.num_placement; i++) bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); if (likely(r == 0)) { bo->pin_count = 1; if (gpu_addr != NULL) @@ -279,7 +279,7 @@ int radeon_bo_unpin(struct radeon_bo *bo) return 0; for (i = 0; i < bo->placement.num_placement; i++) bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); if (unlikely(r != 0)) dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); return r; @@ -365,7 +365,7 @@ int radeon_bo_list_validate(struct list_head *head) retry: radeon_ttm_placement_from_domain(bo, domain); r = ttm_bo_validate(&bo->tbo, &bo->placement, - true, false); + true, false, false); if (unlikely(r)) { if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { domain |= RADEON_GEM_DOMAIN_GTT; @@ -394,7 +394,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) int steal; int i; - BUG_ON(!radeon_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->tbo.reserved)); if (!bo->tiling_flags) return 0; @@ -520,7 +520,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo, uint32_t *tiling_flags, uint32_t *pitch) { - BUG_ON(!radeon_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->tbo.reserved)); if (tiling_flags) *tiling_flags = bo->tiling_flags; if (pitch) @@ -530,7 +530,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo, int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, bool force_drop) { - BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop); + BUG_ON(!atomic_read(&bo->tbo.reserved)); if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) return 0; @@ -585,7 +585,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* hurrah the memory is not visible ! */ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; - r = ttm_bo_validate(bo, &rbo->placement, false, false); + r = ttm_bo_validate(bo, &rbo->placement, false, true, false); if (unlikely(r != 0)) return r; offset = bo->mem.start << PAGE_SHIFT; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_object.h b/trunk/drivers/gpu/drm/radeon/radeon_object.h index 5fc86b03043b..93cd491fff2e 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_object.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_object.h @@ -80,7 +80,7 @@ static inline unsigned long radeon_bo_size(struct radeon_bo *bo) static inline bool radeon_bo_is_reserved(struct radeon_bo *bo) { - return ttm_bo_is_reserved(&bo->tbo); + return !!atomic_read(&bo->tbo.reserved); } static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo) diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ttm.c b/trunk/drivers/gpu/drm/radeon/radeon_ttm.c index 1d8ff2f850ba..5ebe1b3e5db2 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_ttm.c @@ -216,7 +216,7 @@ static void radeon_move_null(struct ttm_buffer_object *bo, } static int radeon_move_blit(struct ttm_buffer_object *bo, - bool evict, bool no_wait_gpu, + bool evict, int no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem, struct ttm_mem_reg *old_mem) { @@ -265,15 +265,15 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ &fence); /* FIXME: handle copy error */ - r = ttm_bo_move_accel_cleanup(bo, (void *)fence, - evict, no_wait_gpu, new_mem); + r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, + evict, no_wait_reserve, no_wait_gpu, new_mem); radeon_fence_unref(&fence); return r; } static int radeon_move_vram_ram(struct ttm_buffer_object *bo, bool evict, bool interruptible, - bool no_wait_gpu, + bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; @@ -294,7 +294,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, placement.busy_placement = &placements; placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, - interruptible, no_wait_gpu); + interruptible, no_wait_reserve, no_wait_gpu); if (unlikely(r)) { return r; } @@ -308,11 +308,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); + r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); + r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; @@ -320,7 +320,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, static int radeon_move_ram_vram(struct ttm_buffer_object *bo, bool evict, bool interruptible, - bool no_wait_gpu, + bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; @@ -340,16 +340,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, placement.num_busy_placement = 1; placement.busy_placement = &placements; placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, - interruptible, no_wait_gpu); + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); + r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } - r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); + r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } @@ -360,7 +359,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, bool interruptible, - bool no_wait_gpu, + bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; @@ -389,18 +388,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, if (old_mem->mem_type == TTM_PL_VRAM && new_mem->mem_type == TTM_PL_SYSTEM) { r = radeon_move_vram_ram(bo, evict, interruptible, - no_wait_gpu, new_mem); + no_wait_reserve, no_wait_gpu, new_mem); } else if (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) { r = radeon_move_ram_vram(bo, evict, interruptible, - no_wait_gpu, new_mem); + no_wait_reserve, no_wait_gpu, new_mem); } else { - r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); + r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem); } if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); } return r; } @@ -472,12 +471,13 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re { } -static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) +static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, + bool lazy, bool interruptible) { return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); } -static int radeon_sync_obj_flush(void *sync_obj) +static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg) { return 0; } @@ -492,7 +492,7 @@ static void *radeon_sync_obj_ref(void *sync_obj) return radeon_fence_ref((struct radeon_fence *)sync_obj); } -static bool radeon_sync_obj_signaled(void *sync_obj) +static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg) { return radeon_fence_signaled((struct radeon_fence *)sync_obj); } diff --git a/trunk/drivers/gpu/drm/radeon/rv770.c b/trunk/drivers/gpu/drm/radeon/rv770.c index 79814a08c8e5..87c979c4f721 100644 --- a/trunk/drivers/gpu/drm/radeon/rv770.c +++ b/trunk/drivers/gpu/drm/radeon/rv770.c @@ -316,6 +316,7 @@ void r700_cp_stop(struct radeon_device *rdev) radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); WREG32(SCRATCH_UMSK, 0); + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; } static int rv770_cp_load_microcode(struct radeon_device *rdev) @@ -583,6 +584,8 @@ static void rv770_gpu_init(struct radeon_device *rdev) WREG32(GB_TILING_CONFIG, gb_tiling_config); WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); + WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff)); + WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff)); WREG32(CGTS_SYS_TCC_DISABLE, 0); WREG32(CGTS_TCC_DISABLE, 0); @@ -886,7 +889,7 @@ static int rv770_mc_init(struct radeon_device *rdev) static int rv770_startup(struct radeon_device *rdev) { - struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; + struct radeon_ring *ring; int r; /* enable pcie gen2 link */ @@ -932,6 +935,12 @@ static int rv770_startup(struct radeon_device *rdev) return r; } + r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); + if (r) { + dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); + return r; + } + /* Enable IRQ */ r = r600_irq_init(rdev); if (r) { @@ -941,11 +950,20 @@ static int rv770_startup(struct radeon_device *rdev) } r600_irq_set(rdev); + ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, R600_CP_RB_RPTR, R600_CP_RB_WPTR, 0, 0xfffff, RADEON_CP_PACKET2); if (r) return r; + + ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; + r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, + DMA_RB_RPTR, DMA_RB_WPTR, + 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); + if (r) + return r; + r = rv770_cp_load_microcode(rdev); if (r) return r; @@ -953,6 +971,10 @@ static int rv770_startup(struct radeon_device *rdev) if (r) return r; + r = r600_dma_resume(rdev); + if (r) + return r; + r = radeon_ib_pool_init(rdev); if (r) { dev_err(rdev->dev, "IB initialization failed (%d).\n", r); @@ -995,7 +1017,7 @@ int rv770_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); r700_cp_stop(rdev); - rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; + r600_dma_stop(rdev); r600_irq_suspend(rdev); radeon_wb_disable(rdev); rv770_pcie_gart_disable(rdev); @@ -1066,6 +1088,9 @@ int rv770_init(struct radeon_device *rdev) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); + rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; + r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); + rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); @@ -1078,6 +1103,7 @@ int rv770_init(struct radeon_device *rdev) if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r700_cp_fini(rdev); + r600_dma_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); @@ -1093,6 +1119,7 @@ void rv770_fini(struct radeon_device *rdev) { r600_blit_fini(rdev); r700_cp_fini(rdev); + r600_dma_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/rv770d.h b/trunk/drivers/gpu/drm/radeon/rv770d.h index e2d9dc8e751e..20e29d23d348 100644 --- a/trunk/drivers/gpu/drm/radeon/rv770d.h +++ b/trunk/drivers/gpu/drm/radeon/rv770d.h @@ -109,6 +109,9 @@ #define PIPE_TILING__SHIFT 1 #define PIPE_TILING__MASK 0x0000000e +#define DMA_TILING_CONFIG 0x3ec8 +#define DMA_TILING_CONFIG2 0xd0b8 + #define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define INACTIVE_QD_PIPES(x) ((x) << 8) #define INACTIVE_QD_PIPES_MASK 0x0000FF00 @@ -358,6 +361,26 @@ #define WAIT_UNTIL 0x8040 +/* async DMA */ +#define DMA_RB_RPTR 0xd008 +#define DMA_RB_WPTR 0xd00c + +/* async DMA packets */ +#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ + (((t) & 0x1) << 23) | \ + (((s) & 0x1) << 22) | \ + (((n) & 0xFFFF) << 0)) +/* async DMA Packet types */ +#define DMA_PACKET_WRITE 0x2 +#define DMA_PACKET_COPY 0x3 +#define DMA_PACKET_INDIRECT_BUFFER 0x4 +#define DMA_PACKET_SEMAPHORE 0x5 +#define DMA_PACKET_FENCE 0x6 +#define DMA_PACKET_TRAP 0x7 +#define DMA_PACKET_CONSTANT_FILL 0xd +#define DMA_PACKET_NOP 0xf + + #define SRBM_STATUS 0x0E50 /* DCE 3.2 HDMI */ diff --git a/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index d917a411ca85..0e7a9306bd0c 100644 --- a/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -748,7 +748,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev, connector->encoder = encoder; drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - drm_object_property_set_value(&connector->base, + drm_connector_property_set_value(connector, sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); return 0; diff --git a/trunk/drivers/gpu/drm/tegra/Kconfig b/trunk/drivers/gpu/drm/tegra/Kconfig deleted file mode 100644 index be1daf7344d3..000000000000 --- a/trunk/drivers/gpu/drm/tegra/Kconfig +++ /dev/null @@ -1,23 +0,0 @@ -config DRM_TEGRA - tristate "NVIDIA Tegra DRM" - depends on DRM && OF && ARCH_TEGRA - select DRM_KMS_HELPER - select DRM_GEM_CMA_HELPER - select DRM_KMS_CMA_HELPER - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - help - Choose this option if you have an NVIDIA Tegra SoC. - - To compile this driver as a module, choose M here: the module - will be called tegra-drm. - -if DRM_TEGRA - -config DRM_TEGRA_DEBUG - bool "NVIDIA Tegra DRM debug support" - help - Say yes here to enable debugging support. - -endif diff --git a/trunk/drivers/gpu/drm/tegra/Makefile b/trunk/drivers/gpu/drm/tegra/Makefile deleted file mode 100644 index 80f73d1315d0..000000000000 --- a/trunk/drivers/gpu/drm/tegra/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -ccflags-y := -Iinclude/drm -ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG - -tegra-drm-y := drm.o fb.o dc.o host1x.o -tegra-drm-y += output.o rgb.o hdmi.o - -obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o diff --git a/trunk/drivers/gpu/drm/tegra/dc.c b/trunk/drivers/gpu/drm/tegra/dc.c deleted file mode 100644 index 074410371e2a..000000000000 --- a/trunk/drivers/gpu/drm/tegra/dc.c +++ /dev/null @@ -1,834 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include - -#include - -#include "drm.h" -#include "dc.h" - -struct tegra_dc_window { - fixed20_12 x; - fixed20_12 y; - fixed20_12 w; - fixed20_12 h; - unsigned int outx; - unsigned int outy; - unsigned int outw; - unsigned int outh; - unsigned int stride; - unsigned int fmt; -}; - -static const struct drm_crtc_funcs tegra_crtc_funcs = { - .set_config = drm_crtc_helper_set_config, - .destroy = drm_crtc_cleanup, -}; - -static void tegra_crtc_dpms(struct drm_crtc *crtc, int mode) -{ -} - -static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted) -{ - return true; -} - -static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v, - unsigned int bpp) -{ - fixed20_12 outf = dfixed_init(out); - u32 dda_inc; - int max; - - if (v) - max = 15; - else { - switch (bpp) { - case 2: - max = 8; - break; - - default: - WARN_ON_ONCE(1); - /* fallthrough */ - case 4: - max = 4; - break; - } - } - - outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1)); - inf.full -= dfixed_const(1); - - dda_inc = dfixed_div(inf, outf); - dda_inc = min_t(u32, dda_inc, dfixed_const(max)); - - return dda_inc; -} - -static inline u32 compute_initial_dda(fixed20_12 in) -{ - return dfixed_frac(in); -} - -static int tegra_dc_set_timings(struct tegra_dc *dc, - struct drm_display_mode *mode) -{ - /* TODO: For HDMI compliance, h & v ref_to_sync should be set to 1 */ - unsigned int h_ref_to_sync = 0; - unsigned int v_ref_to_sync = 0; - unsigned long value; - - tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS); - - value = (v_ref_to_sync << 16) | h_ref_to_sync; - tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC); - - value = ((mode->vsync_end - mode->vsync_start) << 16) | - ((mode->hsync_end - mode->hsync_start) << 0); - tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH); - - value = ((mode->vsync_start - mode->vdisplay) << 16) | - ((mode->hsync_start - mode->hdisplay) << 0); - tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH); - - value = ((mode->vtotal - mode->vsync_end) << 16) | - ((mode->htotal - mode->hsync_end) << 0); - tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH); - - value = (mode->vdisplay << 16) | mode->hdisplay; - tegra_dc_writel(dc, value, DC_DISP_ACTIVE); - - return 0; -} - -static int tegra_crtc_setup_clk(struct drm_crtc *crtc, - struct drm_display_mode *mode, - unsigned long *div) -{ - unsigned long pclk = mode->clock * 1000, rate; - struct tegra_dc *dc = to_tegra_dc(crtc); - struct tegra_output *output = NULL; - struct drm_encoder *encoder; - long err; - - list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head) - if (encoder->crtc == crtc) { - output = encoder_to_output(encoder); - break; - } - - if (!output) - return -ENODEV; - - /* - * This assumes that the display controller will divide its parent - * clock by 2 to generate the pixel clock. - */ - err = tegra_output_setup_clock(output, dc->clk, pclk * 2); - if (err < 0) { - dev_err(dc->dev, "failed to setup clock: %ld\n", err); - return err; - } - - rate = clk_get_rate(dc->clk); - *div = (rate * 2 / pclk) - 2; - - DRM_DEBUG_KMS("rate: %lu, div: %lu\n", rate, *div); - - return 0; -} - -static int tegra_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted, - int x, int y, struct drm_framebuffer *old_fb) -{ - struct tegra_framebuffer *fb = to_tegra_fb(crtc->fb); - struct tegra_dc *dc = to_tegra_dc(crtc); - unsigned int h_dda, v_dda, bpp; - struct tegra_dc_window win; - unsigned long div, value; - int err; - - err = tegra_crtc_setup_clk(crtc, mode, &div); - if (err) { - dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err); - return err; - } - - /* program display mode */ - tegra_dc_set_timings(dc, mode); - - value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL; - tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS); - - value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1)); - value &= ~LVS_OUTPUT_POLARITY_LOW; - value &= ~LHS_OUTPUT_POLARITY_LOW; - tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1)); - - value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB | - DISP_ORDER_RED_BLUE; - tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL); - - tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS); - - value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1; - tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); - - /* setup window parameters */ - memset(&win, 0, sizeof(win)); - win.x.full = dfixed_const(0); - win.y.full = dfixed_const(0); - win.w.full = dfixed_const(mode->hdisplay); - win.h.full = dfixed_const(mode->vdisplay); - win.outx = 0; - win.outy = 0; - win.outw = mode->hdisplay; - win.outh = mode->vdisplay; - - switch (crtc->fb->pixel_format) { - case DRM_FORMAT_XRGB8888: - win.fmt = WIN_COLOR_DEPTH_B8G8R8A8; - break; - - case DRM_FORMAT_RGB565: - win.fmt = WIN_COLOR_DEPTH_B5G6R5; - break; - - default: - win.fmt = WIN_COLOR_DEPTH_B8G8R8A8; - WARN_ON(1); - break; - } - - bpp = crtc->fb->bits_per_pixel / 8; - win.stride = crtc->fb->pitches[0]; - - /* program window registers */ - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_WINDOW_HEADER); - value |= WINDOW_A_SELECT; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); - - tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH); - tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP); - - value = V_POSITION(win.outy) | H_POSITION(win.outx); - tegra_dc_writel(dc, value, DC_WIN_POSITION); - - value = V_SIZE(win.outh) | H_SIZE(win.outw); - tegra_dc_writel(dc, value, DC_WIN_SIZE); - - value = V_PRESCALED_SIZE(dfixed_trunc(win.h)) | - H_PRESCALED_SIZE(dfixed_trunc(win.w) * bpp); - tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE); - - h_dda = compute_dda_inc(win.w, win.outw, false, bpp); - v_dda = compute_dda_inc(win.h, win.outh, true, bpp); - - value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda); - tegra_dc_writel(dc, value, DC_WIN_DDA_INC); - - h_dda = compute_initial_dda(win.x); - v_dda = compute_initial_dda(win.y); - - tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA); - tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA); - - tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE); - tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE); - - tegra_dc_writel(dc, fb->obj->paddr, DC_WINBUF_START_ADDR); - tegra_dc_writel(dc, win.stride, DC_WIN_LINE_STRIDE); - tegra_dc_writel(dc, dfixed_trunc(win.x) * bpp, - DC_WINBUF_ADDR_H_OFFSET); - tegra_dc_writel(dc, dfixed_trunc(win.y), DC_WINBUF_ADDR_V_OFFSET); - - value = WIN_ENABLE; - - if (bpp < 24) - value |= COLOR_EXPAND; - - tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); - - tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_NOKEY); - tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_1WIN); - - return 0; -} - -static void tegra_crtc_prepare(struct drm_crtc *crtc) -{ - struct tegra_dc *dc = to_tegra_dc(crtc); - unsigned int syncpt; - unsigned long value; - - /* hardware initialization */ - tegra_periph_reset_deassert(dc->clk); - usleep_range(10000, 20000); - - if (dc->pipe) - syncpt = SYNCPT_VBLANK1; - else - syncpt = SYNCPT_VBLANK0; - - /* initialize display controller */ - tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); - tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC); - - value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_TYPE); - - value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | - WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY); - - value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); - value |= DISP_CTRL_MODE_C_DISPLAY; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - - /* initialize timer */ - value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) | - WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20); - tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY); - - value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) | - WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1); - tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); - - value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_MASK); - - value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); -} - -static void tegra_crtc_commit(struct drm_crtc *crtc) -{ - struct tegra_dc *dc = to_tegra_dc(crtc); - unsigned long update_mask; - unsigned long value; - - update_mask = GENERAL_ACT_REQ | WIN_A_ACT_REQ; - - tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL); - - value = tegra_dc_readl(dc, DC_CMD_INT_ENABLE); - value |= FRAME_END_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); - - value = tegra_dc_readl(dc, DC_CMD_INT_MASK); - value |= FRAME_END_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_MASK); - - tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL); -} - -static void tegra_crtc_load_lut(struct drm_crtc *crtc) -{ -} - -static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = { - .dpms = tegra_crtc_dpms, - .mode_fixup = tegra_crtc_mode_fixup, - .mode_set = tegra_crtc_mode_set, - .prepare = tegra_crtc_prepare, - .commit = tegra_crtc_commit, - .load_lut = tegra_crtc_load_lut, -}; - -static irqreturn_t tegra_drm_irq(int irq, void *data) -{ - struct tegra_dc *dc = data; - unsigned long status; - - status = tegra_dc_readl(dc, DC_CMD_INT_STATUS); - tegra_dc_writel(dc, status, DC_CMD_INT_STATUS); - - if (status & FRAME_END_INT) { - /* - dev_dbg(dc->dev, "%s(): frame end\n", __func__); - */ - } - - if (status & VBLANK_INT) { - /* - dev_dbg(dc->dev, "%s(): vertical blank\n", __func__); - */ - drm_handle_vblank(dc->base.dev, dc->pipe); - } - - if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) { - /* - dev_dbg(dc->dev, "%s(): underflow\n", __func__); - */ - } - - return IRQ_HANDLED; -} - -static int tegra_dc_show_regs(struct seq_file *s, void *data) -{ - struct drm_info_node *node = s->private; - struct tegra_dc *dc = node->info_ent->data; - -#define DUMP_REG(name) \ - seq_printf(s, "%-40s %#05x %08lx\n", #name, name, \ - tegra_dc_readl(dc, name)) - - DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT); - DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); - DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_ERROR); - DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT); - DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL); - DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_ERROR); - DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT); - DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL); - DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_ERROR); - DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT); - DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL); - DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_ERROR); - DUMP_REG(DC_CMD_CONT_SYNCPT_VSYNC); - DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0); - DUMP_REG(DC_CMD_DISPLAY_COMMAND); - DUMP_REG(DC_CMD_SIGNAL_RAISE); - DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL); - DUMP_REG(DC_CMD_INT_STATUS); - DUMP_REG(DC_CMD_INT_MASK); - DUMP_REG(DC_CMD_INT_ENABLE); - DUMP_REG(DC_CMD_INT_TYPE); - DUMP_REG(DC_CMD_INT_POLARITY); - DUMP_REG(DC_CMD_SIGNAL_RAISE1); - DUMP_REG(DC_CMD_SIGNAL_RAISE2); - DUMP_REG(DC_CMD_SIGNAL_RAISE3); - DUMP_REG(DC_CMD_STATE_ACCESS); - DUMP_REG(DC_CMD_STATE_CONTROL); - DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER); - DUMP_REG(DC_CMD_REG_ACT_CONTROL); - DUMP_REG(DC_COM_CRC_CONTROL); - DUMP_REG(DC_COM_CRC_CHECKSUM); - DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(0)); - DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(1)); - DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(2)); - DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(3)); - DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(0)); - DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(1)); - DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(2)); - DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(3)); - DUMP_REG(DC_COM_PIN_OUTPUT_DATA(0)); - DUMP_REG(DC_COM_PIN_OUTPUT_DATA(1)); - DUMP_REG(DC_COM_PIN_OUTPUT_DATA(2)); - DUMP_REG(DC_COM_PIN_OUTPUT_DATA(3)); - DUMP_REG(DC_COM_PIN_INPUT_ENABLE(0)); - DUMP_REG(DC_COM_PIN_INPUT_ENABLE(1)); - DUMP_REG(DC_COM_PIN_INPUT_ENABLE(2)); - DUMP_REG(DC_COM_PIN_INPUT_ENABLE(3)); - DUMP_REG(DC_COM_PIN_INPUT_DATA(0)); - DUMP_REG(DC_COM_PIN_INPUT_DATA(1)); - DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(0)); - DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(1)); - DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(2)); - DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(3)); - DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(4)); - DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(5)); - DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(6)); - DUMP_REG(DC_COM_PIN_MISC_CONTROL); - DUMP_REG(DC_COM_PIN_PM0_CONTROL); - DUMP_REG(DC_COM_PIN_PM0_DUTY_CYCLE); - DUMP_REG(DC_COM_PIN_PM1_CONTROL); - DUMP_REG(DC_COM_PIN_PM1_DUTY_CYCLE); - DUMP_REG(DC_COM_SPI_CONTROL); - DUMP_REG(DC_COM_SPI_START_BYTE); - DUMP_REG(DC_COM_HSPI_WRITE_DATA_AB); - DUMP_REG(DC_COM_HSPI_WRITE_DATA_CD); - DUMP_REG(DC_COM_HSPI_CS_DC); - DUMP_REG(DC_COM_SCRATCH_REGISTER_A); - DUMP_REG(DC_COM_SCRATCH_REGISTER_B); - DUMP_REG(DC_COM_GPIO_CTRL); - DUMP_REG(DC_COM_GPIO_DEBOUNCE_COUNTER); - DUMP_REG(DC_COM_CRC_CHECKSUM_LATCHED); - DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0); - DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1); - DUMP_REG(DC_DISP_DISP_WIN_OPTIONS); - DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY); - DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); - DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS); - DUMP_REG(DC_DISP_REF_TO_SYNC); - DUMP_REG(DC_DISP_SYNC_WIDTH); - DUMP_REG(DC_DISP_BACK_PORCH); - DUMP_REG(DC_DISP_ACTIVE); - DUMP_REG(DC_DISP_FRONT_PORCH); - DUMP_REG(DC_DISP_H_PULSE0_CONTROL); - DUMP_REG(DC_DISP_H_PULSE0_POSITION_A); - DUMP_REG(DC_DISP_H_PULSE0_POSITION_B); - DUMP_REG(DC_DISP_H_PULSE0_POSITION_C); - DUMP_REG(DC_DISP_H_PULSE0_POSITION_D); - DUMP_REG(DC_DISP_H_PULSE1_CONTROL); - DUMP_REG(DC_DISP_H_PULSE1_POSITION_A); - DUMP_REG(DC_DISP_H_PULSE1_POSITION_B); - DUMP_REG(DC_DISP_H_PULSE1_POSITION_C); - DUMP_REG(DC_DISP_H_PULSE1_POSITION_D); - DUMP_REG(DC_DISP_H_PULSE2_CONTROL); - DUMP_REG(DC_DISP_H_PULSE2_POSITION_A); - DUMP_REG(DC_DISP_H_PULSE2_POSITION_B); - DUMP_REG(DC_DISP_H_PULSE2_POSITION_C); - DUMP_REG(DC_DISP_H_PULSE2_POSITION_D); - DUMP_REG(DC_DISP_V_PULSE0_CONTROL); - DUMP_REG(DC_DISP_V_PULSE0_POSITION_A); - DUMP_REG(DC_DISP_V_PULSE0_POSITION_B); - DUMP_REG(DC_DISP_V_PULSE0_POSITION_C); - DUMP_REG(DC_DISP_V_PULSE1_CONTROL); - DUMP_REG(DC_DISP_V_PULSE1_POSITION_A); - DUMP_REG(DC_DISP_V_PULSE1_POSITION_B); - DUMP_REG(DC_DISP_V_PULSE1_POSITION_C); - DUMP_REG(DC_DISP_V_PULSE2_CONTROL); - DUMP_REG(DC_DISP_V_PULSE2_POSITION_A); - DUMP_REG(DC_DISP_V_PULSE3_CONTROL); - DUMP_REG(DC_DISP_V_PULSE3_POSITION_A); - DUMP_REG(DC_DISP_M0_CONTROL); - DUMP_REG(DC_DISP_M1_CONTROL); - DUMP_REG(DC_DISP_DI_CONTROL); - DUMP_REG(DC_DISP_PP_CONTROL); - DUMP_REG(DC_DISP_PP_SELECT_A); - DUMP_REG(DC_DISP_PP_SELECT_B); - DUMP_REG(DC_DISP_PP_SELECT_C); - DUMP_REG(DC_DISP_PP_SELECT_D); - DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL); - DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL); - DUMP_REG(DC_DISP_DISP_COLOR_CONTROL); - DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS); - DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS); - DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS); - DUMP_REG(DC_DISP_LCD_SPI_OPTIONS); - DUMP_REG(DC_DISP_BORDER_COLOR); - DUMP_REG(DC_DISP_COLOR_KEY0_LOWER); - DUMP_REG(DC_DISP_COLOR_KEY0_UPPER); - DUMP_REG(DC_DISP_COLOR_KEY1_LOWER); - DUMP_REG(DC_DISP_COLOR_KEY1_UPPER); - DUMP_REG(DC_DISP_CURSOR_FOREGROUND); - DUMP_REG(DC_DISP_CURSOR_BACKGROUND); - DUMP_REG(DC_DISP_CURSOR_START_ADDR); - DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS); - DUMP_REG(DC_DISP_CURSOR_POSITION); - DUMP_REG(DC_DISP_CURSOR_POSITION_NS); - DUMP_REG(DC_DISP_INIT_SEQ_CONTROL); - DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A); - DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B); - DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C); - DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D); - DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL); - DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST); - DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST); - DUMP_REG(DC_DISP_MCCIF_DISPLAY1A_HYST); - DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST); - DUMP_REG(DC_DISP_DAC_CRT_CTRL); - DUMP_REG(DC_DISP_DISP_MISC_CONTROL); - DUMP_REG(DC_DISP_SD_CONTROL); - DUMP_REG(DC_DISP_SD_CSC_COEFF); - DUMP_REG(DC_DISP_SD_LUT(0)); - DUMP_REG(DC_DISP_SD_LUT(1)); - DUMP_REG(DC_DISP_SD_LUT(2)); - DUMP_REG(DC_DISP_SD_LUT(3)); - DUMP_REG(DC_DISP_SD_LUT(4)); - DUMP_REG(DC_DISP_SD_LUT(5)); - DUMP_REG(DC_DISP_SD_LUT(6)); - DUMP_REG(DC_DISP_SD_LUT(7)); - DUMP_REG(DC_DISP_SD_LUT(8)); - DUMP_REG(DC_DISP_SD_FLICKER_CONTROL); - DUMP_REG(DC_DISP_DC_PIXEL_COUNT); - DUMP_REG(DC_DISP_SD_HISTOGRAM(0)); - DUMP_REG(DC_DISP_SD_HISTOGRAM(1)); - DUMP_REG(DC_DISP_SD_HISTOGRAM(2)); - DUMP_REG(DC_DISP_SD_HISTOGRAM(3)); - DUMP_REG(DC_DISP_SD_HISTOGRAM(4)); - DUMP_REG(DC_DISP_SD_HISTOGRAM(5)); - DUMP_REG(DC_DISP_SD_HISTOGRAM(6)); - DUMP_REG(DC_DISP_SD_HISTOGRAM(7)); - DUMP_REG(DC_DISP_SD_BL_TF(0)); - DUMP_REG(DC_DISP_SD_BL_TF(1)); - DUMP_REG(DC_DISP_SD_BL_TF(2)); - DUMP_REG(DC_DISP_SD_BL_TF(3)); - DUMP_REG(DC_DISP_SD_BL_CONTROL); - DUMP_REG(DC_DISP_SD_HW_K_VALUES); - DUMP_REG(DC_DISP_SD_MAN_K_VALUES); - DUMP_REG(DC_WIN_WIN_OPTIONS); - DUMP_REG(DC_WIN_BYTE_SWAP); - DUMP_REG(DC_WIN_BUFFER_CONTROL); - DUMP_REG(DC_WIN_COLOR_DEPTH); - DUMP_REG(DC_WIN_POSITION); - DUMP_REG(DC_WIN_SIZE); - DUMP_REG(DC_WIN_PRESCALED_SIZE); - DUMP_REG(DC_WIN_H_INITIAL_DDA); - DUMP_REG(DC_WIN_V_INITIAL_DDA); - DUMP_REG(DC_WIN_DDA_INC); - DUMP_REG(DC_WIN_LINE_STRIDE); - DUMP_REG(DC_WIN_BUF_STRIDE); - DUMP_REG(DC_WIN_UV_BUF_STRIDE); - DUMP_REG(DC_WIN_BUFFER_ADDR_MODE); - DUMP_REG(DC_WIN_DV_CONTROL); - DUMP_REG(DC_WIN_BLEND_NOKEY); - DUMP_REG(DC_WIN_BLEND_1WIN); - DUMP_REG(DC_WIN_BLEND_2WIN_X); - DUMP_REG(DC_WIN_BLEND_2WIN_Y); - DUMP_REG(DC_WIN_BLEND32WIN_XY); - DUMP_REG(DC_WIN_HP_FETCH_CONTROL); - DUMP_REG(DC_WINBUF_START_ADDR); - DUMP_REG(DC_WINBUF_START_ADDR_NS); - DUMP_REG(DC_WINBUF_START_ADDR_U); - DUMP_REG(DC_WINBUF_START_ADDR_U_NS); - DUMP_REG(DC_WINBUF_START_ADDR_V); - DUMP_REG(DC_WINBUF_START_ADDR_V_NS); - DUMP_REG(DC_WINBUF_ADDR_H_OFFSET); - DUMP_REG(DC_WINBUF_ADDR_H_OFFSET_NS); - DUMP_REG(DC_WINBUF_ADDR_V_OFFSET); - DUMP_REG(DC_WINBUF_ADDR_V_OFFSET_NS); - DUMP_REG(DC_WINBUF_UFLOW_STATUS); - DUMP_REG(DC_WINBUF_AD_UFLOW_STATUS); - DUMP_REG(DC_WINBUF_BD_UFLOW_STATUS); - DUMP_REG(DC_WINBUF_CD_UFLOW_STATUS); - -#undef DUMP_REG - - return 0; -} - -static struct drm_info_list debugfs_files[] = { - { "regs", tegra_dc_show_regs, 0, NULL }, -}; - -static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor) -{ - unsigned int i; - char *name; - int err; - - name = kasprintf(GFP_KERNEL, "dc.%d", dc->pipe); - dc->debugfs = debugfs_create_dir(name, minor->debugfs_root); - kfree(name); - - if (!dc->debugfs) - return -ENOMEM; - - dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), - GFP_KERNEL); - if (!dc->debugfs_files) { - err = -ENOMEM; - goto remove; - } - - for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) - dc->debugfs_files[i].data = dc; - - err = drm_debugfs_create_files(dc->debugfs_files, - ARRAY_SIZE(debugfs_files), - dc->debugfs, minor); - if (err < 0) - goto free; - - dc->minor = minor; - - return 0; - -free: - kfree(dc->debugfs_files); - dc->debugfs_files = NULL; -remove: - debugfs_remove(dc->debugfs); - dc->debugfs = NULL; - - return err; -} - -static int tegra_dc_debugfs_exit(struct tegra_dc *dc) -{ - drm_debugfs_remove_files(dc->debugfs_files, ARRAY_SIZE(debugfs_files), - dc->minor); - dc->minor = NULL; - - kfree(dc->debugfs_files); - dc->debugfs_files = NULL; - - debugfs_remove(dc->debugfs); - dc->debugfs = NULL; - - return 0; -} - -static int tegra_dc_drm_init(struct host1x_client *client, - struct drm_device *drm) -{ - struct tegra_dc *dc = host1x_client_to_dc(client); - int err; - - dc->pipe = drm->mode_config.num_crtc; - - drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs); - drm_mode_crtc_set_gamma_size(&dc->base, 256); - drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs); - - err = tegra_dc_rgb_init(drm, dc); - if (err < 0 && err != -ENODEV) { - dev_err(dc->dev, "failed to initialize RGB output: %d\n", err); - return err; - } - - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_dc_debugfs_init(dc, drm->primary); - if (err < 0) - dev_err(dc->dev, "debugfs setup failed: %d\n", err); - } - - err = devm_request_irq(dc->dev, dc->irq, tegra_drm_irq, 0, - dev_name(dc->dev), dc); - if (err < 0) { - dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq, - err); - return err; - } - - return 0; -} - -static int tegra_dc_drm_exit(struct host1x_client *client) -{ - struct tegra_dc *dc = host1x_client_to_dc(client); - int err; - - devm_free_irq(dc->dev, dc->irq, dc); - - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_dc_debugfs_exit(dc); - if (err < 0) - dev_err(dc->dev, "debugfs cleanup failed: %d\n", err); - } - - err = tegra_dc_rgb_exit(dc); - if (err) { - dev_err(dc->dev, "failed to shutdown RGB output: %d\n", err); - return err; - } - - return 0; -} - -static const struct host1x_client_ops dc_client_ops = { - .drm_init = tegra_dc_drm_init, - .drm_exit = tegra_dc_drm_exit, -}; - -static int tegra_dc_probe(struct platform_device *pdev) -{ - struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); - struct resource *regs; - struct tegra_dc *dc; - int err; - - dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL); - if (!dc) - return -ENOMEM; - - INIT_LIST_HEAD(&dc->list); - dc->dev = &pdev->dev; - - dc->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(dc->clk)) { - dev_err(&pdev->dev, "failed to get clock\n"); - return PTR_ERR(dc->clk); - } - - err = clk_prepare_enable(dc->clk); - if (err < 0) - return err; - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) { - dev_err(&pdev->dev, "failed to get registers\n"); - return -ENXIO; - } - - dc->regs = devm_request_and_ioremap(&pdev->dev, regs); - if (!dc->regs) { - dev_err(&pdev->dev, "failed to remap registers\n"); - return -ENXIO; - } - - dc->irq = platform_get_irq(pdev, 0); - if (dc->irq < 0) { - dev_err(&pdev->dev, "failed to get IRQ\n"); - return -ENXIO; - } - - INIT_LIST_HEAD(&dc->client.list); - dc->client.ops = &dc_client_ops; - dc->client.dev = &pdev->dev; - - err = tegra_dc_rgb_probe(dc); - if (err < 0 && err != -ENODEV) { - dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err); - return err; - } - - err = host1x_register_client(host1x, &dc->client); - if (err < 0) { - dev_err(&pdev->dev, "failed to register host1x client: %d\n", - err); - return err; - } - - platform_set_drvdata(pdev, dc); - - return 0; -} - -static int tegra_dc_remove(struct platform_device *pdev) -{ - struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); - struct tegra_dc *dc = platform_get_drvdata(pdev); - int err; - - err = host1x_unregister_client(host1x, &dc->client); - if (err < 0) { - dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", - err); - return err; - } - - clk_disable_unprepare(dc->clk); - - return 0; -} - -static struct of_device_id tegra_dc_of_match[] = { - { .compatible = "nvidia,tegra30-dc", }, - { .compatible = "nvidia,tegra20-dc", }, - { }, -}; - -struct platform_driver tegra_dc_driver = { - .driver = { - .name = "tegra-dc", - .owner = THIS_MODULE, - .of_match_table = tegra_dc_of_match, - }, - .probe = tegra_dc_probe, - .remove = tegra_dc_remove, -}; diff --git a/trunk/drivers/gpu/drm/tegra/dc.h b/trunk/drivers/gpu/drm/tegra/dc.h deleted file mode 100644 index 99977b5d5c36..000000000000 --- a/trunk/drivers/gpu/drm/tegra/dc.h +++ /dev/null @@ -1,388 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef TEGRA_DC_H -#define TEGRA_DC_H 1 - -#define DC_CMD_GENERAL_INCR_SYNCPT 0x000 -#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001 -#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002 -#define DC_CMD_WIN_A_INCR_SYNCPT 0x008 -#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009 -#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR 0x00a -#define DC_CMD_WIN_B_INCR_SYNCPT 0x010 -#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL 0x011 -#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR 0x012 -#define DC_CMD_WIN_C_INCR_SYNCPT 0x018 -#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019 -#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a -#define DC_CMD_CONT_SYNCPT_VSYNC 0x028 -#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031 -#define DC_CMD_DISPLAY_COMMAND 0x032 -#define DISP_CTRL_MODE_STOP (0 << 5) -#define DISP_CTRL_MODE_C_DISPLAY (1 << 5) -#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5) -#define DC_CMD_SIGNAL_RAISE 0x033 -#define DC_CMD_DISPLAY_POWER_CONTROL 0x036 -#define PW0_ENABLE (1 << 0) -#define PW1_ENABLE (1 << 2) -#define PW2_ENABLE (1 << 4) -#define PW3_ENABLE (1 << 6) -#define PW4_ENABLE (1 << 8) -#define PM0_ENABLE (1 << 16) -#define PM1_ENABLE (1 << 18) - -#define DC_CMD_INT_STATUS 0x037 -#define DC_CMD_INT_MASK 0x038 -#define DC_CMD_INT_ENABLE 0x039 -#define DC_CMD_INT_TYPE 0x03a -#define DC_CMD_INT_POLARITY 0x03b -#define CTXSW_INT (1 << 0) -#define FRAME_END_INT (1 << 1) -#define VBLANK_INT (1 << 2) -#define WIN_A_UF_INT (1 << 8) -#define WIN_B_UF_INT (1 << 9) -#define WIN_C_UF_INT (1 << 10) -#define WIN_A_OF_INT (1 << 14) -#define WIN_B_OF_INT (1 << 15) -#define WIN_C_OF_INT (1 << 16) - -#define DC_CMD_SIGNAL_RAISE1 0x03c -#define DC_CMD_SIGNAL_RAISE2 0x03d -#define DC_CMD_SIGNAL_RAISE3 0x03e - -#define DC_CMD_STATE_ACCESS 0x040 - -#define DC_CMD_STATE_CONTROL 0x041 -#define GENERAL_ACT_REQ (1 << 0) -#define WIN_A_ACT_REQ (1 << 1) -#define WIN_B_ACT_REQ (1 << 2) -#define WIN_C_ACT_REQ (1 << 3) -#define GENERAL_UPDATE (1 << 8) -#define WIN_A_UPDATE (1 << 9) -#define WIN_B_UPDATE (1 << 10) -#define WIN_C_UPDATE (1 << 11) -#define NC_HOST_TRIG (1 << 24) - -#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042 -#define WINDOW_A_SELECT (1 << 4) -#define WINDOW_B_SELECT (1 << 5) -#define WINDOW_C_SELECT (1 << 6) - -#define DC_CMD_REG_ACT_CONTROL 0x043 - -#define DC_COM_CRC_CONTROL 0x300 -#define DC_COM_CRC_CHECKSUM 0x301 -#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x)) -#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x)) -#define LVS_OUTPUT_POLARITY_LOW (1 << 28) -#define LHS_OUTPUT_POLARITY_LOW (1 << 30) -#define DC_COM_PIN_OUTPUT_DATA(x) (0x30a + (x)) -#define DC_COM_PIN_INPUT_ENABLE(x) (0x30e + (x)) -#define DC_COM_PIN_INPUT_DATA(x) (0x312 + (x)) -#define DC_COM_PIN_OUTPUT_SELECT(x) (0x314 + (x)) - -#define DC_COM_PIN_MISC_CONTROL 0x31b -#define DC_COM_PIN_PM0_CONTROL 0x31c -#define DC_COM_PIN_PM0_DUTY_CYCLE 0x31d -#define DC_COM_PIN_PM1_CONTROL 0x31e -#define DC_COM_PIN_PM1_DUTY_CYCLE 0x31f - -#define DC_COM_SPI_CONTROL 0x320 -#define DC_COM_SPI_START_BYTE 0x321 -#define DC_COM_HSPI_WRITE_DATA_AB 0x322 -#define DC_COM_HSPI_WRITE_DATA_CD 0x323 -#define DC_COM_HSPI_CS_DC 0x324 -#define DC_COM_SCRATCH_REGISTER_A 0x325 -#define DC_COM_SCRATCH_REGISTER_B 0x326 -#define DC_COM_GPIO_CTRL 0x327 -#define DC_COM_GPIO_DEBOUNCE_COUNTER 0x328 -#define DC_COM_CRC_CHECKSUM_LATCHED 0x329 - -#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400 -#define H_PULSE_0_ENABLE (1 << 8) -#define H_PULSE_1_ENABLE (1 << 10) -#define H_PULSE_2_ENABLE (1 << 12) - -#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401 - -#define DC_DISP_DISP_WIN_OPTIONS 0x402 -#define HDMI_ENABLE (1 << 30) - -#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403 -#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24) -#define WINDOW_A_THRESHOLD(x) (((x) & 0x7f) << 16) -#define WINDOW_B_THRESHOLD(x) (((x) & 0x7f) << 8) -#define WINDOW_C_THRESHOLD(x) (((x) & 0xff) << 0) - -#define DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER 0x404 -#define CURSOR_DELAY(x) (((x) & 0x3f) << 24) -#define WINDOW_A_DELAY(x) (((x) & 0x3f) << 16) -#define WINDOW_B_DELAY(x) (((x) & 0x3f) << 8) -#define WINDOW_C_DELAY(x) (((x) & 0x3f) << 0) - -#define DC_DISP_DISP_TIMING_OPTIONS 0x405 -#define VSYNC_H_POSITION(x) ((x) & 0xfff) - -#define DC_DISP_REF_TO_SYNC 0x406 -#define DC_DISP_SYNC_WIDTH 0x407 -#define DC_DISP_BACK_PORCH 0x408 -#define DC_DISP_ACTIVE 0x409 -#define DC_DISP_FRONT_PORCH 0x40a -#define DC_DISP_H_PULSE0_CONTROL 0x40b -#define DC_DISP_H_PULSE0_POSITION_A 0x40c -#define DC_DISP_H_PULSE0_POSITION_B 0x40d -#define DC_DISP_H_PULSE0_POSITION_C 0x40e -#define DC_DISP_H_PULSE0_POSITION_D 0x40f -#define DC_DISP_H_PULSE1_CONTROL 0x410 -#define DC_DISP_H_PULSE1_POSITION_A 0x411 -#define DC_DISP_H_PULSE1_POSITION_B 0x412 -#define DC_DISP_H_PULSE1_POSITION_C 0x413 -#define DC_DISP_H_PULSE1_POSITION_D 0x414 -#define DC_DISP_H_PULSE2_CONTROL 0x415 -#define DC_DISP_H_PULSE2_POSITION_A 0x416 -#define DC_DISP_H_PULSE2_POSITION_B 0x417 -#define DC_DISP_H_PULSE2_POSITION_C 0x418 -#define DC_DISP_H_PULSE2_POSITION_D 0x419 -#define DC_DISP_V_PULSE0_CONTROL 0x41a -#define DC_DISP_V_PULSE0_POSITION_A 0x41b -#define DC_DISP_V_PULSE0_POSITION_B 0x41c -#define DC_DISP_V_PULSE0_POSITION_C 0x41d -#define DC_DISP_V_PULSE1_CONTROL 0x41e -#define DC_DISP_V_PULSE1_POSITION_A 0x41f -#define DC_DISP_V_PULSE1_POSITION_B 0x420 -#define DC_DISP_V_PULSE1_POSITION_C 0x421 -#define DC_DISP_V_PULSE2_CONTROL 0x422 -#define DC_DISP_V_PULSE2_POSITION_A 0x423 -#define DC_DISP_V_PULSE3_CONTROL 0x424 -#define DC_DISP_V_PULSE3_POSITION_A 0x425 -#define DC_DISP_M0_CONTROL 0x426 -#define DC_DISP_M1_CONTROL 0x427 -#define DC_DISP_DI_CONTROL 0x428 -#define DC_DISP_PP_CONTROL 0x429 -#define DC_DISP_PP_SELECT_A 0x42a -#define DC_DISP_PP_SELECT_B 0x42b -#define DC_DISP_PP_SELECT_C 0x42c -#define DC_DISP_PP_SELECT_D 0x42d - -#define PULSE_MODE_NORMAL (0 << 3) -#define PULSE_MODE_ONE_CLOCK (1 << 3) -#define PULSE_POLARITY_HIGH (0 << 4) -#define PULSE_POLARITY_LOW (1 << 4) -#define PULSE_QUAL_ALWAYS (0 << 6) -#define PULSE_QUAL_VACTIVE (2 << 6) -#define PULSE_QUAL_VACTIVE1 (3 << 6) -#define PULSE_LAST_START_A (0 << 8) -#define PULSE_LAST_END_A (1 << 8) -#define PULSE_LAST_START_B (2 << 8) -#define PULSE_LAST_END_B (3 << 8) -#define PULSE_LAST_START_C (4 << 8) -#define PULSE_LAST_END_C (5 << 8) -#define PULSE_LAST_START_D (6 << 8) -#define PULSE_LAST_END_D (7 << 8) - -#define PULSE_START(x) (((x) & 0xfff) << 0) -#define PULSE_END(x) (((x) & 0xfff) << 16) - -#define DC_DISP_DISP_CLOCK_CONTROL 0x42e -#define PIXEL_CLK_DIVIDER_PCD1 (0 << 8) -#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8) -#define PIXEL_CLK_DIVIDER_PCD2 (2 << 8) -#define PIXEL_CLK_DIVIDER_PCD3 (3 << 8) -#define PIXEL_CLK_DIVIDER_PCD4 (4 << 8) -#define PIXEL_CLK_DIVIDER_PCD6 (5 << 8) -#define PIXEL_CLK_DIVIDER_PCD8 (6 << 8) -#define PIXEL_CLK_DIVIDER_PCD9 (7 << 8) -#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8) -#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8) -#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8) -#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8) -#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8) -#define SHIFT_CLK_DIVIDER(x) ((x) & 0xff) - -#define DC_DISP_DISP_INTERFACE_CONTROL 0x42f -#define DISP_DATA_FORMAT_DF1P1C (0 << 0) -#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0) -#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0) -#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0) -#define DISP_DATA_FORMAT_DF2S (4 << 0) -#define DISP_DATA_FORMAT_DF3S (5 << 0) -#define DISP_DATA_FORMAT_DFSPI (6 << 0) -#define DISP_DATA_FORMAT_DF1P3C24B (7 << 0) -#define DISP_DATA_FORMAT_DF1P3C18B (8 << 0) -#define DISP_ALIGNMENT_MSB (0 << 8) -#define DISP_ALIGNMENT_LSB (1 << 8) -#define DISP_ORDER_RED_BLUE (0 << 9) -#define DISP_ORDER_BLUE_RED (1 << 9) - -#define DC_DISP_DISP_COLOR_CONTROL 0x430 -#define BASE_COLOR_SIZE666 (0 << 0) -#define BASE_COLOR_SIZE111 (1 << 0) -#define BASE_COLOR_SIZE222 (2 << 0) -#define BASE_COLOR_SIZE333 (3 << 0) -#define BASE_COLOR_SIZE444 (4 << 0) -#define BASE_COLOR_SIZE555 (5 << 0) -#define BASE_COLOR_SIZE565 (6 << 0) -#define BASE_COLOR_SIZE332 (7 << 0) -#define BASE_COLOR_SIZE888 (8 << 0) -#define DITHER_CONTROL_DISABLE (0 << 8) -#define DITHER_CONTROL_ORDERED (2 << 8) -#define DITHER_CONTROL_ERRDIFF (3 << 8) - -#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431 - -#define DC_DISP_DATA_ENABLE_OPTIONS 0x432 -#define DE_SELECT_ACTIVE_BLANK (0 << 0) -#define DE_SELECT_ACTIVE (1 << 0) -#define DE_SELECT_ACTIVE_IS (2 << 0) -#define DE_CONTROL_ONECLK (0 << 2) -#define DE_CONTROL_NORMAL (1 << 2) -#define DE_CONTROL_EARLY_EXT (2 << 2) -#define DE_CONTROL_EARLY (3 << 2) -#define DE_CONTROL_ACTIVE_BLANK (4 << 2) - -#define DC_DISP_SERIAL_INTERFACE_OPTIONS 0x433 -#define DC_DISP_LCD_SPI_OPTIONS 0x434 -#define DC_DISP_BORDER_COLOR 0x435 -#define DC_DISP_COLOR_KEY0_LOWER 0x436 -#define DC_DISP_COLOR_KEY0_UPPER 0x437 -#define DC_DISP_COLOR_KEY1_LOWER 0x438 -#define DC_DISP_COLOR_KEY1_UPPER 0x439 - -#define DC_DISP_CURSOR_FOREGROUND 0x43c -#define DC_DISP_CURSOR_BACKGROUND 0x43d - -#define DC_DISP_CURSOR_START_ADDR 0x43e -#define DC_DISP_CURSOR_START_ADDR_NS 0x43f - -#define DC_DISP_CURSOR_POSITION 0x440 -#define DC_DISP_CURSOR_POSITION_NS 0x441 - -#define DC_DISP_INIT_SEQ_CONTROL 0x442 -#define DC_DISP_SPI_INIT_SEQ_DATA_A 0x443 -#define DC_DISP_SPI_INIT_SEQ_DATA_B 0x444 -#define DC_DISP_SPI_INIT_SEQ_DATA_C 0x445 -#define DC_DISP_SPI_INIT_SEQ_DATA_D 0x446 - -#define DC_DISP_DC_MCCIF_FIFOCTRL 0x480 -#define DC_DISP_MCCIF_DISPLAY0A_HYST 0x481 -#define DC_DISP_MCCIF_DISPLAY0B_HYST 0x482 -#define DC_DISP_MCCIF_DISPLAY1A_HYST 0x483 -#define DC_DISP_MCCIF_DISPLAY1B_HYST 0x484 - -#define DC_DISP_DAC_CRT_CTRL 0x4c0 -#define DC_DISP_DISP_MISC_CONTROL 0x4c1 -#define DC_DISP_SD_CONTROL 0x4c2 -#define DC_DISP_SD_CSC_COEFF 0x4c3 -#define DC_DISP_SD_LUT(x) (0x4c4 + (x)) -#define DC_DISP_SD_FLICKER_CONTROL 0x4cd -#define DC_DISP_DC_PIXEL_COUNT 0x4ce -#define DC_DISP_SD_HISTOGRAM(x) (0x4cf + (x)) -#define DC_DISP_SD_BL_PARAMETERS 0x4d7 -#define DC_DISP_SD_BL_TF(x) (0x4d8 + (x)) -#define DC_DISP_SD_BL_CONTROL 0x4dc -#define DC_DISP_SD_HW_K_VALUES 0x4dd -#define DC_DISP_SD_MAN_K_VALUES 0x4de - -#define DC_WIN_WIN_OPTIONS 0x700 -#define COLOR_EXPAND (1 << 6) -#define WIN_ENABLE (1 << 30) - -#define DC_WIN_BYTE_SWAP 0x701 -#define BYTE_SWAP_NOSWAP (0 << 0) -#define BYTE_SWAP_SWAP2 (1 << 0) -#define BYTE_SWAP_SWAP4 (2 << 0) -#define BYTE_SWAP_SWAP4HW (3 << 0) - -#define DC_WIN_BUFFER_CONTROL 0x702 -#define BUFFER_CONTROL_HOST (0 << 0) -#define BUFFER_CONTROL_VI (1 << 0) -#define BUFFER_CONTROL_EPP (2 << 0) -#define BUFFER_CONTROL_MPEGE (3 << 0) -#define BUFFER_CONTROL_SB2D (4 << 0) - -#define DC_WIN_COLOR_DEPTH 0x703 -#define WIN_COLOR_DEPTH_P1 0 -#define WIN_COLOR_DEPTH_P2 1 -#define WIN_COLOR_DEPTH_P4 2 -#define WIN_COLOR_DEPTH_P8 3 -#define WIN_COLOR_DEPTH_B4G4R4A4 4 -#define WIN_COLOR_DEPTH_B5G5R5A 5 -#define WIN_COLOR_DEPTH_B5G6R5 6 -#define WIN_COLOR_DEPTH_AB5G5R5 7 -#define WIN_COLOR_DEPTH_B8G8R8A8 12 -#define WIN_COLOR_DEPTH_R8G8B8A8 13 -#define WIN_COLOR_DEPTH_B6x2G6x2R6x2A8 14 -#define WIN_COLOR_DEPTH_R6x2G6x2B6x2A8 15 -#define WIN_COLOR_DEPTH_YCbCr422 16 -#define WIN_COLOR_DEPTH_YUV422 17 -#define WIN_COLOR_DEPTH_YCbCr420P 18 -#define WIN_COLOR_DEPTH_YUV420P 19 -#define WIN_COLOR_DEPTH_YCbCr422P 20 -#define WIN_COLOR_DEPTH_YUV422P 21 -#define WIN_COLOR_DEPTH_YCbCr422R 22 -#define WIN_COLOR_DEPTH_YUV422R 23 -#define WIN_COLOR_DEPTH_YCbCr422RA 24 -#define WIN_COLOR_DEPTH_YUV422RA 25 - -#define DC_WIN_POSITION 0x704 -#define H_POSITION(x) (((x) & 0x1fff) << 0) -#define V_POSITION(x) (((x) & 0x1fff) << 16) - -#define DC_WIN_SIZE 0x705 -#define H_SIZE(x) (((x) & 0x1fff) << 0) -#define V_SIZE(x) (((x) & 0x1fff) << 16) - -#define DC_WIN_PRESCALED_SIZE 0x706 -#define H_PRESCALED_SIZE(x) (((x) & 0x7fff) << 0) -#define V_PRESCALED_SIZE(x) (((x) & 0x1fff) << 16) - -#define DC_WIN_H_INITIAL_DDA 0x707 -#define DC_WIN_V_INITIAL_DDA 0x708 -#define DC_WIN_DDA_INC 0x709 -#define H_DDA_INC(x) (((x) & 0xffff) << 0) -#define V_DDA_INC(x) (((x) & 0xffff) << 16) - -#define DC_WIN_LINE_STRIDE 0x70a -#define DC_WIN_BUF_STRIDE 0x70b -#define DC_WIN_UV_BUF_STRIDE 0x70c -#define DC_WIN_BUFFER_ADDR_MODE 0x70d -#define DC_WIN_DV_CONTROL 0x70e - -#define DC_WIN_BLEND_NOKEY 0x70f -#define DC_WIN_BLEND_1WIN 0x710 -#define DC_WIN_BLEND_2WIN_X 0x711 -#define DC_WIN_BLEND_2WIN_Y 0x712 -#define DC_WIN_BLEND32WIN_XY 0x713 - -#define DC_WIN_HP_FETCH_CONTROL 0x714 - -#define DC_WINBUF_START_ADDR 0x800 -#define DC_WINBUF_START_ADDR_NS 0x801 -#define DC_WINBUF_START_ADDR_U 0x802 -#define DC_WINBUF_START_ADDR_U_NS 0x803 -#define DC_WINBUF_START_ADDR_V 0x804 -#define DC_WINBUF_START_ADDR_V_NS 0x805 - -#define DC_WINBUF_ADDR_H_OFFSET 0x806 -#define DC_WINBUF_ADDR_H_OFFSET_NS 0x807 -#define DC_WINBUF_ADDR_V_OFFSET 0x808 -#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809 - -#define DC_WINBUF_UFLOW_STATUS 0x80a - -#define DC_WINBUF_AD_UFLOW_STATUS 0xbca -#define DC_WINBUF_BD_UFLOW_STATUS 0xdca -#define DC_WINBUF_CD_UFLOW_STATUS 0xfca - -/* synchronization points */ -#define SYNCPT_VBLANK0 26 -#define SYNCPT_VBLANK1 27 - -#endif /* TEGRA_DC_H */ diff --git a/trunk/drivers/gpu/drm/tegra/drm.c b/trunk/drivers/gpu/drm/tegra/drm.c deleted file mode 100644 index 3a503c9e4686..000000000000 --- a/trunk/drivers/gpu/drm/tegra/drm.c +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include - -#include -#include -#include - -#include "drm.h" - -#define DRIVER_NAME "tegra" -#define DRIVER_DESC "NVIDIA Tegra graphics" -#define DRIVER_DATE "20120330" -#define DRIVER_MAJOR 0 -#define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 0 - -static int tegra_drm_load(struct drm_device *drm, unsigned long flags) -{ - struct device *dev = drm->dev; - struct host1x *host1x; - int err; - - host1x = dev_get_drvdata(dev); - drm->dev_private = host1x; - host1x->drm = drm; - - drm_mode_config_init(drm); - - err = host1x_drm_init(host1x, drm); - if (err < 0) - return err; - - err = tegra_drm_fb_init(drm); - if (err < 0) - return err; - - drm_kms_helper_poll_init(drm); - - return 0; -} - -static int tegra_drm_unload(struct drm_device *drm) -{ - drm_kms_helper_poll_fini(drm); - tegra_drm_fb_exit(drm); - - drm_mode_config_cleanup(drm); - - return 0; -} - -static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) -{ - return 0; -} - -static void tegra_drm_lastclose(struct drm_device *drm) -{ - struct host1x *host1x = drm->dev_private; - - drm_fbdev_cma_restore_mode(host1x->fbdev); -} - -static struct drm_ioctl_desc tegra_drm_ioctls[] = { -}; - -static const struct file_operations tegra_drm_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - .mmap = drm_gem_cma_mmap, - .poll = drm_poll, - .fasync = drm_fasync, - .read = drm_read, -#ifdef CONFIG_COMPAT - .compat_ioctl = drm_compat_ioctl, -#endif - .llseek = noop_llseek, -}; - -struct drm_driver tegra_drm_driver = { - .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM, - .load = tegra_drm_load, - .unload = tegra_drm_unload, - .open = tegra_drm_open, - .lastclose = tegra_drm_lastclose, - - .gem_free_object = drm_gem_cma_free_object, - .gem_vm_ops = &drm_gem_cma_vm_ops, - .dumb_create = drm_gem_cma_dumb_create, - .dumb_map_offset = drm_gem_cma_dumb_map_offset, - .dumb_destroy = drm_gem_cma_dumb_destroy, - - .ioctls = tegra_drm_ioctls, - .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), - .fops = &tegra_drm_fops, - - .name = DRIVER_NAME, - .desc = DRIVER_DESC, - .date = DRIVER_DATE, - .major = DRIVER_MAJOR, - .minor = DRIVER_MINOR, - .patchlevel = DRIVER_PATCHLEVEL, -}; diff --git a/trunk/drivers/gpu/drm/tegra/drm.h b/trunk/drivers/gpu/drm/tegra/drm.h deleted file mode 100644 index 3a843a77ddc7..000000000000 --- a/trunk/drivers/gpu/drm/tegra/drm.h +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef TEGRA_DRM_H -#define TEGRA_DRM_H 1 - -#include -#include -#include -#include -#include -#include -#include - -struct tegra_framebuffer { - struct drm_framebuffer base; - struct drm_gem_cma_object *obj; -}; - -static inline struct tegra_framebuffer *to_tegra_fb(struct drm_framebuffer *fb) -{ - return container_of(fb, struct tegra_framebuffer, base); -} - -struct host1x { - struct drm_device *drm; - struct device *dev; - void __iomem *regs; - struct clk *clk; - int syncpt; - int irq; - - struct mutex drm_clients_lock; - struct list_head drm_clients; - struct list_head drm_active; - - struct mutex clients_lock; - struct list_head clients; - - struct drm_fbdev_cma *fbdev; - struct tegra_framebuffer fb; -}; - -struct host1x_client; - -struct host1x_client_ops { - int (*drm_init)(struct host1x_client *client, struct drm_device *drm); - int (*drm_exit)(struct host1x_client *client); -}; - -struct host1x_client { - struct host1x *host1x; - struct device *dev; - - const struct host1x_client_ops *ops; - - struct list_head list; -}; - -extern int host1x_drm_init(struct host1x *host1x, struct drm_device *drm); -extern int host1x_drm_exit(struct host1x *host1x); - -extern int host1x_register_client(struct host1x *host1x, - struct host1x_client *client); -extern int host1x_unregister_client(struct host1x *host1x, - struct host1x_client *client); - -struct tegra_output; - -struct tegra_dc { - struct host1x_client client; - - struct host1x *host1x; - struct device *dev; - - struct drm_crtc base; - int pipe; - - struct clk *clk; - - void __iomem *regs; - int irq; - - struct tegra_output *rgb; - - struct list_head list; - - struct drm_info_list *debugfs_files; - struct drm_minor *minor; - struct dentry *debugfs; -}; - -static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client) -{ - return container_of(client, struct tegra_dc, client); -} - -static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc) -{ - return container_of(crtc, struct tegra_dc, base); -} - -static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value, - unsigned long reg) -{ - writel(value, dc->regs + (reg << 2)); -} - -static inline unsigned long tegra_dc_readl(struct tegra_dc *dc, - unsigned long reg) -{ - return readl(dc->regs + (reg << 2)); -} - -struct tegra_output_ops { - int (*enable)(struct tegra_output *output); - int (*disable)(struct tegra_output *output); - int (*setup_clock)(struct tegra_output *output, struct clk *clk, - unsigned long pclk); - int (*check_mode)(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status); -}; - -enum tegra_output_type { - TEGRA_OUTPUT_RGB, - TEGRA_OUTPUT_HDMI, -}; - -struct tegra_output { - struct device_node *of_node; - struct device *dev; - - const struct tegra_output_ops *ops; - enum tegra_output_type type; - - struct i2c_adapter *ddc; - const struct edid *edid; - unsigned int hpd_irq; - int hpd_gpio; - - struct drm_encoder encoder; - struct drm_connector connector; -}; - -static inline struct tegra_output *encoder_to_output(struct drm_encoder *e) -{ - return container_of(e, struct tegra_output, encoder); -} - -static inline struct tegra_output *connector_to_output(struct drm_connector *c) -{ - return container_of(c, struct tegra_output, connector); -} - -static inline int tegra_output_enable(struct tegra_output *output) -{ - if (output && output->ops && output->ops->enable) - return output->ops->enable(output); - - return output ? -ENOSYS : -EINVAL; -} - -static inline int tegra_output_disable(struct tegra_output *output) -{ - if (output && output->ops && output->ops->disable) - return output->ops->disable(output); - - return output ? -ENOSYS : -EINVAL; -} - -static inline int tegra_output_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk) -{ - if (output && output->ops && output->ops->setup_clock) - return output->ops->setup_clock(output, clk, pclk); - - return output ? -ENOSYS : -EINVAL; -} - -static inline int tegra_output_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - if (output && output->ops && output->ops->check_mode) - return output->ops->check_mode(output, mode, status); - - return output ? -ENOSYS : -EINVAL; -} - -/* from rgb.c */ -extern int tegra_dc_rgb_probe(struct tegra_dc *dc); -extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc); -extern int tegra_dc_rgb_exit(struct tegra_dc *dc); - -/* from output.c */ -extern int tegra_output_parse_dt(struct tegra_output *output); -extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output); -extern int tegra_output_exit(struct tegra_output *output); - -/* from gem.c */ -extern struct tegra_gem_object *tegra_gem_alloc(struct drm_device *drm, - size_t size); -extern int tegra_gem_handle_create(struct drm_device *drm, - struct drm_file *file, size_t size, - unsigned long flags, uint32_t *handle); -extern int tegra_gem_dumb_create(struct drm_file *file, struct drm_device *drm, - struct drm_mode_create_dumb *args); -extern int tegra_gem_dumb_map_offset(struct drm_file *file, - struct drm_device *drm, uint32_t handle, - uint64_t *offset); -extern int tegra_gem_dumb_destroy(struct drm_file *file, - struct drm_device *drm, uint32_t handle); -extern int tegra_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); -extern int tegra_gem_init_object(struct drm_gem_object *obj); -extern void tegra_gem_free_object(struct drm_gem_object *obj); -extern struct vm_operations_struct tegra_gem_vm_ops; - -/* from fb.c */ -extern int tegra_drm_fb_init(struct drm_device *drm); -extern void tegra_drm_fb_exit(struct drm_device *drm); - -extern struct platform_driver tegra_host1x_driver; -extern struct platform_driver tegra_hdmi_driver; -extern struct platform_driver tegra_dc_driver; -extern struct drm_driver tegra_drm_driver; - -#endif /* TEGRA_DRM_H */ diff --git a/trunk/drivers/gpu/drm/tegra/fb.c b/trunk/drivers/gpu/drm/tegra/fb.c deleted file mode 100644 index 97993c6835fd..000000000000 --- a/trunk/drivers/gpu/drm/tegra/fb.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include "drm.h" - -static void tegra_drm_fb_output_poll_changed(struct drm_device *drm) -{ - struct host1x *host1x = drm->dev_private; - - drm_fbdev_cma_hotplug_event(host1x->fbdev); -} - -static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { - .fb_create = drm_fb_cma_create, - .output_poll_changed = tegra_drm_fb_output_poll_changed, -}; - -int tegra_drm_fb_init(struct drm_device *drm) -{ - struct host1x *host1x = drm->dev_private; - struct drm_fbdev_cma *fbdev; - - drm->mode_config.min_width = 0; - drm->mode_config.min_height = 0; - - drm->mode_config.max_width = 4096; - drm->mode_config.max_height = 4096; - - drm->mode_config.funcs = &tegra_drm_mode_funcs; - - fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc, - drm->mode_config.num_connector); - if (IS_ERR(fbdev)) - return PTR_ERR(fbdev); - -#ifndef CONFIG_FRAMEBUFFER_CONSOLE - drm_fbdev_cma_restore_mode(fbdev); -#endif - - host1x->fbdev = fbdev; - - return 0; -} - -void tegra_drm_fb_exit(struct drm_device *drm) -{ - struct host1x *host1x = drm->dev_private; - - drm_fbdev_cma_fini(host1x->fbdev); -} diff --git a/trunk/drivers/gpu/drm/tegra/hdmi.c b/trunk/drivers/gpu/drm/tegra/hdmi.c deleted file mode 100644 index ab4016412bbf..000000000000 --- a/trunk/drivers/gpu/drm/tegra/hdmi.c +++ /dev/null @@ -1,1334 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "hdmi.h" -#include "drm.h" -#include "dc.h" - -struct tegra_hdmi { - struct host1x_client client; - struct tegra_output output; - struct device *dev; - - struct regulator *vdd; - struct regulator *pll; - - void __iomem *regs; - unsigned int irq; - - struct clk *clk_parent; - struct clk *clk; - - unsigned int audio_source; - unsigned int audio_freq; - bool stereo; - bool dvi; - - struct drm_info_list *debugfs_files; - struct drm_minor *minor; - struct dentry *debugfs; -}; - -static inline struct tegra_hdmi * -host1x_client_to_hdmi(struct host1x_client *client) -{ - return container_of(client, struct tegra_hdmi, client); -} - -static inline struct tegra_hdmi *to_hdmi(struct tegra_output *output) -{ - return container_of(output, struct tegra_hdmi, output); -} - -#define HDMI_AUDIOCLK_FREQ 216000000 -#define HDMI_REKEY_DEFAULT 56 - -enum { - AUTO = 0, - SPDIF, - HDA, -}; - -static inline unsigned long tegra_hdmi_readl(struct tegra_hdmi *hdmi, - unsigned long reg) -{ - return readl(hdmi->regs + (reg << 2)); -} - -static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, unsigned long val, - unsigned long reg) -{ - writel(val, hdmi->regs + (reg << 2)); -} - -struct tegra_hdmi_audio_config { - unsigned int pclk; - unsigned int n; - unsigned int cts; - unsigned int aval; -}; - -static const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = { - { 25200000, 4096, 25200, 24000 }, - { 27000000, 4096, 27000, 24000 }, - { 74250000, 4096, 74250, 24000 }, - { 148500000, 4096, 148500, 24000 }, - { 0, 0, 0, 0 }, -}; - -static const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = { - { 25200000, 5880, 26250, 25000 }, - { 27000000, 5880, 28125, 25000 }, - { 74250000, 4704, 61875, 20000 }, - { 148500000, 4704, 123750, 20000 }, - { 0, 0, 0, 0 }, -}; - -static const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = { - { 25200000, 6144, 25200, 24000 }, - { 27000000, 6144, 27000, 24000 }, - { 74250000, 6144, 74250, 24000 }, - { 148500000, 6144, 148500, 24000 }, - { 0, 0, 0, 0 }, -}; - -static const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = { - { 25200000, 11760, 26250, 25000 }, - { 27000000, 11760, 28125, 25000 }, - { 74250000, 9408, 61875, 20000 }, - { 148500000, 9408, 123750, 20000 }, - { 0, 0, 0, 0 }, -}; - -static const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = { - { 25200000, 12288, 25200, 24000 }, - { 27000000, 12288, 27000, 24000 }, - { 74250000, 12288, 74250, 24000 }, - { 148500000, 12288, 148500, 24000 }, - { 0, 0, 0, 0 }, -}; - -static const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = { - { 25200000, 23520, 26250, 25000 }, - { 27000000, 23520, 28125, 25000 }, - { 74250000, 18816, 61875, 20000 }, - { 148500000, 18816, 123750, 20000 }, - { 0, 0, 0, 0 }, -}; - -static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = { - { 25200000, 24576, 25200, 24000 }, - { 27000000, 24576, 27000, 24000 }, - { 74250000, 24576, 74250, 24000 }, - { 148500000, 24576, 148500, 24000 }, - { 0, 0, 0, 0 }, -}; - -struct tmds_config { - unsigned int pclk; - u32 pll0; - u32 pll1; - u32 pe_current; - u32 drive_current; -}; - -static const struct tmds_config tegra2_tmds_config[] = { - { /* 480p modes */ - .pclk = 27000000, - .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | - SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) | - SOR_PLL_TX_REG_LOAD(3), - .pll1 = SOR_PLL_TMDS_TERM_ENABLE, - .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) | - PE_CURRENT1(PE_CURRENT_0_0_mA) | - PE_CURRENT2(PE_CURRENT_0_0_mA) | - PE_CURRENT3(PE_CURRENT_0_0_mA), - .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), - }, { /* 720p modes */ - .pclk = 74250000, - .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | - SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) | - SOR_PLL_TX_REG_LOAD(3), - .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, - .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) | - PE_CURRENT1(PE_CURRENT_6_0_mA) | - PE_CURRENT2(PE_CURRENT_6_0_mA) | - PE_CURRENT3(PE_CURRENT_6_0_mA), - .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), - }, { /* 1080p modes */ - .pclk = UINT_MAX, - .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | - SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) | - SOR_PLL_TX_REG_LOAD(3), - .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, - .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) | - PE_CURRENT1(PE_CURRENT_6_0_mA) | - PE_CURRENT2(PE_CURRENT_6_0_mA) | - PE_CURRENT3(PE_CURRENT_6_0_mA), - .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | - DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), - }, -}; - -static const struct tmds_config tegra3_tmds_config[] = { - { /* 480p modes */ - .pclk = 27000000, - .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | - SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) | - SOR_PLL_TX_REG_LOAD(0), - .pll1 = SOR_PLL_TMDS_TERM_ENABLE, - .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) | - PE_CURRENT1(PE_CURRENT_0_0_mA) | - PE_CURRENT2(PE_CURRENT_0_0_mA) | - PE_CURRENT3(PE_CURRENT_0_0_mA), - .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), - }, { /* 720p modes */ - .pclk = 74250000, - .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | - SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) | - SOR_PLL_TX_REG_LOAD(0), - .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, - .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) | - PE_CURRENT1(PE_CURRENT_5_0_mA) | - PE_CURRENT2(PE_CURRENT_5_0_mA) | - PE_CURRENT3(PE_CURRENT_5_0_mA), - .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), - }, { /* 1080p modes */ - .pclk = UINT_MAX, - .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | - SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) | - SOR_PLL_TX_REG_LOAD(0), - .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, - .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) | - PE_CURRENT1(PE_CURRENT_5_0_mA) | - PE_CURRENT2(PE_CURRENT_5_0_mA) | - PE_CURRENT3(PE_CURRENT_5_0_mA), - .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | - DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), - }, -}; - -static const struct tegra_hdmi_audio_config * -tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk) -{ - const struct tegra_hdmi_audio_config *table; - - switch (audio_freq) { - case 32000: - table = tegra_hdmi_audio_32k; - break; - - case 44100: - table = tegra_hdmi_audio_44_1k; - break; - - case 48000: - table = tegra_hdmi_audio_48k; - break; - - case 88200: - table = tegra_hdmi_audio_88_2k; - break; - - case 96000: - table = tegra_hdmi_audio_96k; - break; - - case 176400: - table = tegra_hdmi_audio_176_4k; - break; - - case 192000: - table = tegra_hdmi_audio_192k; - break; - - default: - return NULL; - } - - while (table->pclk) { - if (table->pclk == pclk) - return table; - - table++; - } - - return NULL; -} - -static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi) -{ - const unsigned int freqs[] = { - 32000, 44100, 48000, 88200, 96000, 176400, 192000 - }; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(freqs); i++) { - unsigned int f = freqs[i]; - unsigned int eight_half; - unsigned long value; - unsigned int delta; - - if (f > 96000) - delta = 2; - else if (f > 480000) - delta = 6; - else - delta = 9; - - eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128); - value = AUDIO_FS_LOW(eight_half - delta) | - AUDIO_FS_HIGH(eight_half + delta); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_FS(i)); - } -} - -static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk) -{ - struct device_node *node = hdmi->dev->of_node; - const struct tegra_hdmi_audio_config *config; - unsigned int offset = 0; - unsigned long value; - - switch (hdmi->audio_source) { - case HDA: - value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL; - break; - - case SPDIF: - value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF; - break; - - default: - value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO; - break; - } - - if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { - value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) | - AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0); - } else { - value |= AUDIO_CNTRL0_INJECT_NULLSMPL; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0); - - value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) | - AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0); - } - - config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk); - if (!config) { - dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n", - hdmi->audio_freq, pclk); - return -EINVAL; - } - - tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL); - - value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE | - AUDIO_N_VALUE(config->n - 1); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N); - - tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE, - HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH); - - value = ACR_SUBPACK_CTS(config->cts); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); - - value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE); - - value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_AUDIO_N); - value &= ~AUDIO_N_RESETF; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N); - - if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { - switch (hdmi->audio_freq) { - case 32000: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320; - break; - - case 44100: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441; - break; - - case 48000: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480; - break; - - case 88200: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882; - break; - - case 96000: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960; - break; - - case 176400: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764; - break; - - case 192000: - offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920; - break; - } - - tegra_hdmi_writel(hdmi, config->aval, offset); - } - - tegra_hdmi_setup_audio_fs_tables(hdmi); - - return 0; -} - -static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, - unsigned int offset, u8 type, - u8 version, void *data, size_t size) -{ - unsigned long value; - u8 *ptr = data; - u32 subpack[2]; - size_t i; - u8 csum; - - /* first byte of data is the checksum */ - csum = type + version + size - 1; - - for (i = 1; i < size; i++) - csum += ptr[i]; - - ptr[0] = 0x100 - csum; - - value = INFOFRAME_HEADER_TYPE(type) | - INFOFRAME_HEADER_VERSION(version) | - INFOFRAME_HEADER_LEN(size - 1); - tegra_hdmi_writel(hdmi, value, offset); - - /* The audio inforame only has one set of subpack registers. The hdmi - * block pads the rest of the data as per the spec so we have to fixup - * the length before filling in the subpacks. - */ - if (offset == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER) - size = 6; - - /* each subpack 7 bytes devided into: - * subpack_low - bytes 0 - 3 - * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00) - */ - for (i = 0; i < size; i++) { - size_t index = i % 7; - - if (index == 0) - memset(subpack, 0x0, sizeof(subpack)); - - ((u8 *)subpack)[index] = ptr[i]; - - if (index == 6 || (i + 1 == size)) { - unsigned int reg = offset + 1 + (i / 7) * 2; - - tegra_hdmi_writel(hdmi, subpack[0], reg); - tegra_hdmi_writel(hdmi, subpack[1], reg + 1); - } - } -} - -static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi, - struct drm_display_mode *mode) -{ - struct hdmi_avi_infoframe frame; - unsigned int h_front_porch; - unsigned int hsize = 16; - unsigned int vsize = 9; - - if (hdmi->dvi) { - tegra_hdmi_writel(hdmi, 0, - HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); - return; - } - - h_front_porch = mode->htotal - mode->hsync_end; - memset(&frame, 0, sizeof(frame)); - frame.r = HDMI_AVI_R_SAME; - - switch (mode->vdisplay) { - case 480: - if (mode->hdisplay == 640) { - frame.m = HDMI_AVI_M_4_3; - frame.vic = 1; - } else { - frame.m = HDMI_AVI_M_16_9; - frame.vic = 3; - } - break; - - case 576: - if (((hsize * 10) / vsize) > 14) { - frame.m = HDMI_AVI_M_16_9; - frame.vic = 18; - } else { - frame.m = HDMI_AVI_M_4_3; - frame.vic = 17; - } - break; - - case 720: - case 1470: /* stereo mode */ - frame.m = HDMI_AVI_M_16_9; - - if (h_front_porch == 110) - frame.vic = 4; - else - frame.vic = 19; - break; - - case 1080: - case 2205: /* stereo mode */ - frame.m = HDMI_AVI_M_16_9; - - switch (h_front_porch) { - case 88: - frame.vic = 16; - break; - - case 528: - frame.vic = 31; - break; - - default: - frame.vic = 32; - break; - } - break; - - default: - frame.m = HDMI_AVI_M_16_9; - frame.vic = 0; - break; - } - - tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER, - HDMI_INFOFRAME_TYPE_AVI, HDMI_AVI_VERSION, - &frame, sizeof(frame)); - - tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, - HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); -} - -static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi) -{ - struct hdmi_audio_infoframe frame; - - if (hdmi->dvi) { - tegra_hdmi_writel(hdmi, 0, - HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); - return; - } - - memset(&frame, 0, sizeof(frame)); - frame.cc = HDMI_AUDIO_CC_2; - - tegra_hdmi_write_infopack(hdmi, - HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER, - HDMI_INFOFRAME_TYPE_AUDIO, - HDMI_AUDIO_VERSION, - &frame, sizeof(frame)); - - tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, - HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); -} - -static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) -{ - struct hdmi_stereo_infoframe frame; - unsigned long value; - - if (!hdmi->stereo) { - value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - value &= ~GENERIC_CTRL_ENABLE; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - return; - } - - memset(&frame, 0, sizeof(frame)); - frame.regid0 = 0x03; - frame.regid1 = 0x0c; - frame.regid2 = 0x00; - frame.hdmi_video_format = 2; - - /* TODO: 74 MHz limit? */ - if (1) { - frame._3d_structure = 0; - } else { - frame._3d_structure = 8; - frame._3d_ext_data = 0; - } - - tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_HEADER, - HDMI_INFOFRAME_TYPE_VENDOR, - HDMI_VENDOR_VERSION, &frame, 6); - - value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - value |= GENERIC_CTRL_ENABLE; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); -} - -static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi, - const struct tmds_config *tmds) -{ - unsigned long value; - - tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0); - tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1); - tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT); - - value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT); -} - -static int tegra_output_hdmi_enable(struct tegra_output *output) -{ - unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey; - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - struct drm_display_mode *mode = &dc->base.mode; - struct tegra_hdmi *hdmi = to_hdmi(output); - struct device_node *node = hdmi->dev->of_node; - unsigned int pulse_start, div82, pclk; - const struct tmds_config *tmds; - unsigned int num_tmds; - unsigned long value; - int retries = 1000; - int err; - - pclk = mode->clock * 1000; - h_sync_width = mode->hsync_end - mode->hsync_start; - h_front_porch = mode->htotal - mode->hsync_end; - h_back_porch = mode->hsync_start - mode->hdisplay; - - err = regulator_enable(hdmi->vdd); - if (err < 0) { - dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err); - return err; - } - - err = regulator_enable(hdmi->pll); - if (err < 0) { - dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err); - return err; - } - - /* - * This assumes that the display controller will divide its parent - * clock by 2 to generate the pixel clock. - */ - err = tegra_output_setup_clock(output, hdmi->clk, pclk * 2); - if (err < 0) { - dev_err(hdmi->dev, "failed to setup clock: %d\n", err); - return err; - } - - err = clk_set_rate(hdmi->clk, pclk); - if (err < 0) - return err; - - err = clk_enable(hdmi->clk); - if (err < 0) { - dev_err(hdmi->dev, "failed to enable clock: %d\n", err); - return err; - } - - tegra_periph_reset_assert(hdmi->clk); - usleep_range(1000, 2000); - tegra_periph_reset_deassert(hdmi->clk); - - tegra_dc_writel(dc, VSYNC_H_POSITION(1), - DC_DISP_DISP_TIMING_OPTIONS); - tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888, - DC_DISP_DISP_COLOR_CONTROL); - - /* video_preamble uses h_pulse2 */ - pulse_start = 1 + h_sync_width + h_back_porch - 10; - - tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0); - - value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE | - PULSE_LAST_END_A; - tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL); - - value = PULSE_START(pulse_start) | PULSE_END(pulse_start + 8); - tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A); - - value = VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) | - VSYNC_WINDOW_ENABLE; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW); - - if (dc->pipe) - value = HDMI_SRC_DISPLAYB; - else - value = HDMI_SRC_DISPLAYA; - - if ((mode->hdisplay == 720) && ((mode->vdisplay == 480) || - (mode->vdisplay == 576))) - tegra_hdmi_writel(hdmi, - value | ARM_VIDEO_RANGE_FULL, - HDMI_NV_PDISP_INPUT_CONTROL); - else - tegra_hdmi_writel(hdmi, - value | ARM_VIDEO_RANGE_LIMITED, - HDMI_NV_PDISP_INPUT_CONTROL); - - div82 = clk_get_rate(hdmi->clk) / 1000000 * 4; - value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK); - - if (!hdmi->dvi) { - err = tegra_hdmi_setup_audio(hdmi, pclk); - if (err < 0) - hdmi->dvi = true; - } - - if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) { - /* - * TODO: add ELD support - */ - } - - rekey = HDMI_REKEY_DEFAULT; - value = HDMI_CTRL_REKEY(rekey); - value |= HDMI_CTRL_MAX_AC_PACKET((h_sync_width + h_back_porch + - h_front_porch - rekey - 18) / 32); - - if (!hdmi->dvi) - value |= HDMI_CTRL_ENABLE; - - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL); - - if (hdmi->dvi) - tegra_hdmi_writel(hdmi, 0x0, - HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - else - tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO, - HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - - tegra_hdmi_setup_avi_infoframe(hdmi, mode); - tegra_hdmi_setup_audio_infoframe(hdmi); - tegra_hdmi_setup_stereo_infoframe(hdmi); - - /* TMDS CONFIG */ - if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { - num_tmds = ARRAY_SIZE(tegra3_tmds_config); - tmds = tegra3_tmds_config; - } else { - num_tmds = ARRAY_SIZE(tegra2_tmds_config); - tmds = tegra2_tmds_config; - } - - for (i = 0; i < num_tmds; i++) { - if (pclk <= tmds[i].pclk) { - tegra_hdmi_setup_tmds(hdmi, &tmds[i]); - break; - } - } - - tegra_hdmi_writel(hdmi, - SOR_SEQ_CTL_PU_PC(0) | - SOR_SEQ_PU_PC_ALT(0) | - SOR_SEQ_PD_PC(8) | - SOR_SEQ_PD_PC_ALT(8), - HDMI_NV_PDISP_SOR_SEQ_CTL); - - value = SOR_SEQ_INST_WAIT_TIME(1) | - SOR_SEQ_INST_WAIT_UNITS_VSYNC | - SOR_SEQ_INST_HALT | - SOR_SEQ_INST_PIN_A_LOW | - SOR_SEQ_INST_PIN_B_LOW | - SOR_SEQ_INST_DRIVE_PWM_OUT_LO; - - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0)); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8)); - - value = 0x1c800; - value &= ~SOR_CSTM_ROTCLK(~0); - value |= SOR_CSTM_ROTCLK(2); - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM); - - tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND); - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); - - /* start SOR */ - tegra_hdmi_writel(hdmi, - SOR_PWR_NORMAL_STATE_PU | - SOR_PWR_NORMAL_START_NORMAL | - SOR_PWR_SAFE_STATE_PD | - SOR_PWR_SETTING_NEW_TRIGGER, - HDMI_NV_PDISP_SOR_PWR); - tegra_hdmi_writel(hdmi, - SOR_PWR_NORMAL_STATE_PU | - SOR_PWR_NORMAL_START_NORMAL | - SOR_PWR_SAFE_STATE_PD | - SOR_PWR_SETTING_NEW_DONE, - HDMI_NV_PDISP_SOR_PWR); - - do { - BUG_ON(--retries < 0); - value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR); - } while (value & SOR_PWR_SETTING_NEW_PENDING); - - value = SOR_STATE_ASY_CRCMODE_COMPLETE | - SOR_STATE_ASY_OWNER_HEAD0 | - SOR_STATE_ASY_SUBOWNER_BOTH | - SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A | - SOR_STATE_ASY_DEPOL_POS; - - /* setup sync polarities */ - if (mode->flags & DRM_MODE_FLAG_PHSYNC) - value |= SOR_STATE_ASY_HSYNCPOL_POS; - - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - value |= SOR_STATE_ASY_HSYNCPOL_NEG; - - if (mode->flags & DRM_MODE_FLAG_PVSYNC) - value |= SOR_STATE_ASY_VSYNCPOL_POS; - - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - value |= SOR_STATE_ASY_VSYNCPOL_NEG; - - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE2); - - value = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL; - tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE1); - - tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0); - tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0); - tegra_hdmi_writel(hdmi, value | SOR_STATE_ATTACHED, - HDMI_NV_PDISP_SOR_STATE1); - tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0); - - tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS); - - value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - value = DISP_CTRL_MODE_C_DISPLAY; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); - - /* TODO: add HDCP support */ - - return 0; -} - -static int tegra_output_hdmi_disable(struct tegra_output *output) -{ - struct tegra_hdmi *hdmi = to_hdmi(output); - - tegra_periph_reset_assert(hdmi->clk); - clk_disable(hdmi->clk); - regulator_disable(hdmi->pll); - regulator_disable(hdmi->vdd); - - return 0; -} - -static int tegra_output_hdmi_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk) -{ - struct tegra_hdmi *hdmi = to_hdmi(output); - struct clk *base; - int err; - - err = clk_set_parent(clk, hdmi->clk_parent); - if (err < 0) { - dev_err(output->dev, "failed to set parent: %d\n", err); - return err; - } - - base = clk_get_parent(hdmi->clk_parent); - - /* - * This assumes that the parent clock is pll_d_out0 or pll_d2_out - * respectively, each of which divides the base pll_d by 2. - */ - err = clk_set_rate(base, pclk * 2); - if (err < 0) - dev_err(output->dev, - "failed to set base clock rate to %lu Hz\n", - pclk * 2); - - return 0; -} - -static int tegra_output_hdmi_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - struct tegra_hdmi *hdmi = to_hdmi(output); - unsigned long pclk = mode->clock * 1000; - struct clk *parent; - long err; - - parent = clk_get_parent(hdmi->clk_parent); - - err = clk_round_rate(parent, pclk * 4); - if (err < 0) - *status = MODE_NOCLOCK; - else - *status = MODE_OK; - - return 0; -} - -static const struct tegra_output_ops hdmi_ops = { - .enable = tegra_output_hdmi_enable, - .disable = tegra_output_hdmi_disable, - .setup_clock = tegra_output_hdmi_setup_clock, - .check_mode = tegra_output_hdmi_check_mode, -}; - -static int tegra_hdmi_show_regs(struct seq_file *s, void *data) -{ - struct drm_info_node *node = s->private; - struct tegra_hdmi *hdmi = node->info_ent->data; - -#define DUMP_REG(name) \ - seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \ - tegra_hdmi_readl(hdmi, name)) - - DUMP_REG(HDMI_CTXSW); - DUMP_REG(HDMI_NV_PDISP_SOR_STATE0); - DUMP_REG(HDMI_NV_PDISP_SOR_STATE1); - DUMP_REG(HDMI_NV_PDISP_SOR_STATE2); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB); - DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); - DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS); - DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER); - DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH); - DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL); - DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT); - DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW); - DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL); - DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS); - DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK); - DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1); - DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2); - DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0); - DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1); - DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA); - DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE); - DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1); - DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2); - DUMP_REG(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL); - DUMP_REG(HDMI_NV_PDISP_SOR_CAP); - DUMP_REG(HDMI_NV_PDISP_SOR_PWR); - DUMP_REG(HDMI_NV_PDISP_SOR_TEST); - DUMP_REG(HDMI_NV_PDISP_SOR_PLL0); - DUMP_REG(HDMI_NV_PDISP_SOR_PLL1); - DUMP_REG(HDMI_NV_PDISP_SOR_PLL2); - DUMP_REG(HDMI_NV_PDISP_SOR_CSTM); - DUMP_REG(HDMI_NV_PDISP_SOR_LVDS); - DUMP_REG(HDMI_NV_PDISP_SOR_CRCA); - DUMP_REG(HDMI_NV_PDISP_SOR_CRCB); - DUMP_REG(HDMI_NV_PDISP_SOR_BLANK); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(0)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(1)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(2)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(3)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(4)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(5)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(6)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(7)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(8)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(9)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(10)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(11)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(12)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(13)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(14)); - DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(15)); - DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0); - DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1); - DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0); - DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1); - DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0); - DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1); - DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0); - DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1); - DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0); - DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1); - DUMP_REG(HDMI_NV_PDISP_SOR_TRIG); - DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK); - DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT); - DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0); - DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1); - DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2); - DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0)); - DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1)); - DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2)); - DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3)); - DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4)); - DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5)); - DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6)); - DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH); - DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD); - DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0); - DUMP_REG(HDMI_NV_PDISP_AUDIO_N); - DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING); - DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK); - DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL); - DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL); - DUMP_REG(HDMI_NV_PDISP_SCRATCH); - DUMP_REG(HDMI_NV_PDISP_PE_CURRENT); - DUMP_REG(HDMI_NV_PDISP_KEY_CTRL); - DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0); - DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1); - DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2); - DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0); - DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1); - DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2); - DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3); - DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG); - DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX); - DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0); - DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR); - DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE); - -#undef DUMP_REG - - return 0; -} - -static struct drm_info_list debugfs_files[] = { - { "regs", tegra_hdmi_show_regs, 0, NULL }, -}; - -static int tegra_hdmi_debugfs_init(struct tegra_hdmi *hdmi, - struct drm_minor *minor) -{ - unsigned int i; - int err; - - hdmi->debugfs = debugfs_create_dir("hdmi", minor->debugfs_root); - if (!hdmi->debugfs) - return -ENOMEM; - - hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), - GFP_KERNEL); - if (!hdmi->debugfs_files) { - err = -ENOMEM; - goto remove; - } - - for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) - hdmi->debugfs_files[i].data = hdmi; - - err = drm_debugfs_create_files(hdmi->debugfs_files, - ARRAY_SIZE(debugfs_files), - hdmi->debugfs, minor); - if (err < 0) - goto free; - - hdmi->minor = minor; - - return 0; - -free: - kfree(hdmi->debugfs_files); - hdmi->debugfs_files = NULL; -remove: - debugfs_remove(hdmi->debugfs); - hdmi->debugfs = NULL; - - return err; -} - -static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi) -{ - drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files), - hdmi->minor); - hdmi->minor = NULL; - - kfree(hdmi->debugfs_files); - hdmi->debugfs_files = NULL; - - debugfs_remove(hdmi->debugfs); - hdmi->debugfs = NULL; - - return 0; -} - -static int tegra_hdmi_drm_init(struct host1x_client *client, - struct drm_device *drm) -{ - struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); - int err; - - hdmi->output.type = TEGRA_OUTPUT_HDMI; - hdmi->output.dev = client->dev; - hdmi->output.ops = &hdmi_ops; - - err = tegra_output_init(drm, &hdmi->output); - if (err < 0) { - dev_err(client->dev, "output setup failed: %d\n", err); - return err; - } - - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_hdmi_debugfs_init(hdmi, drm->primary); - if (err < 0) - dev_err(client->dev, "debugfs setup failed: %d\n", err); - } - - return 0; -} - -static int tegra_hdmi_drm_exit(struct host1x_client *client) -{ - struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); - int err; - - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_hdmi_debugfs_exit(hdmi); - if (err < 0) - dev_err(client->dev, "debugfs cleanup failed: %d\n", - err); - } - - err = tegra_output_disable(&hdmi->output); - if (err < 0) { - dev_err(client->dev, "output failed to disable: %d\n", err); - return err; - } - - err = tegra_output_exit(&hdmi->output); - if (err < 0) { - dev_err(client->dev, "output cleanup failed: %d\n", err); - return err; - } - - return 0; -} - -static const struct host1x_client_ops hdmi_client_ops = { - .drm_init = tegra_hdmi_drm_init, - .drm_exit = tegra_hdmi_drm_exit, -}; - -static int tegra_hdmi_probe(struct platform_device *pdev) -{ - struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); - struct tegra_hdmi *hdmi; - struct resource *regs; - int err; - - hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); - if (!hdmi) - return -ENOMEM; - - hdmi->dev = &pdev->dev; - hdmi->audio_source = AUTO; - hdmi->audio_freq = 44100; - hdmi->stereo = false; - hdmi->dvi = false; - - hdmi->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(hdmi->clk)) { - dev_err(&pdev->dev, "failed to get clock\n"); - return PTR_ERR(hdmi->clk); - } - - err = clk_prepare(hdmi->clk); - if (err < 0) - return err; - - hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent"); - if (IS_ERR(hdmi->clk_parent)) - return PTR_ERR(hdmi->clk_parent); - - err = clk_prepare(hdmi->clk_parent); - if (err < 0) - return err; - - err = clk_set_parent(hdmi->clk, hdmi->clk_parent); - if (err < 0) { - dev_err(&pdev->dev, "failed to setup clocks: %d\n", err); - return err; - } - - hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd"); - if (IS_ERR(hdmi->vdd)) { - dev_err(&pdev->dev, "failed to get VDD regulator\n"); - return PTR_ERR(hdmi->vdd); - } - - hdmi->pll = devm_regulator_get(&pdev->dev, "pll"); - if (IS_ERR(hdmi->pll)) { - dev_err(&pdev->dev, "failed to get PLL regulator\n"); - return PTR_ERR(hdmi->pll); - } - - hdmi->output.dev = &pdev->dev; - - err = tegra_output_parse_dt(&hdmi->output); - if (err < 0) - return err; - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) - return -ENXIO; - - hdmi->regs = devm_request_and_ioremap(&pdev->dev, regs); - if (!hdmi->regs) - return -EADDRNOTAVAIL; - - err = platform_get_irq(pdev, 0); - if (err < 0) - return err; - - hdmi->irq = err; - - hdmi->client.ops = &hdmi_client_ops; - INIT_LIST_HEAD(&hdmi->client.list); - hdmi->client.dev = &pdev->dev; - - err = host1x_register_client(host1x, &hdmi->client); - if (err < 0) { - dev_err(&pdev->dev, "failed to register host1x client: %d\n", - err); - return err; - } - - platform_set_drvdata(pdev, hdmi); - - return 0; -} - -static int tegra_hdmi_remove(struct platform_device *pdev) -{ - struct host1x *host1x = dev_get_drvdata(pdev->dev.parent); - struct tegra_hdmi *hdmi = platform_get_drvdata(pdev); - int err; - - err = host1x_unregister_client(host1x, &hdmi->client); - if (err < 0) { - dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", - err); - return err; - } - - clk_unprepare(hdmi->clk_parent); - clk_unprepare(hdmi->clk); - - return 0; -} - -static struct of_device_id tegra_hdmi_of_match[] = { - { .compatible = "nvidia,tegra30-hdmi", }, - { .compatible = "nvidia,tegra20-hdmi", }, - { }, -}; - -struct platform_driver tegra_hdmi_driver = { - .driver = { - .name = "tegra-hdmi", - .owner = THIS_MODULE, - .of_match_table = tegra_hdmi_of_match, - }, - .probe = tegra_hdmi_probe, - .remove = tegra_hdmi_remove, -}; diff --git a/trunk/drivers/gpu/drm/tegra/hdmi.h b/trunk/drivers/gpu/drm/tegra/hdmi.h deleted file mode 100644 index 1477f36eb45a..000000000000 --- a/trunk/drivers/gpu/drm/tegra/hdmi.h +++ /dev/null @@ -1,575 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef TEGRA_HDMI_H -#define TEGRA_HDMI_H 1 - -#define HDMI_INFOFRAME_TYPE_VENDOR 0x81 -#define HDMI_INFOFRAME_TYPE_AVI 0x82 -#define HDMI_INFOFRAME_TYPE_SPD 0x83 -#define HDMI_INFOFRAME_TYPE_AUDIO 0x84 -#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85 -#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86 - -/* all fields little endian */ -struct hdmi_avi_infoframe { - /* PB0 */ - u8 csum; - - /* PB1 */ - unsigned s:2; /* scan information */ - unsigned b:2; /* bar info data valid */ - unsigned a:1; /* active info present */ - unsigned y:2; /* RGB or YCbCr */ - unsigned res1:1; - - /* PB2 */ - unsigned r:4; /* active format aspect ratio */ - unsigned m:2; /* picture aspect ratio */ - unsigned c:2; /* colorimetry */ - - /* PB3 */ - unsigned sc:2; /* scan information */ - unsigned q:2; /* quantization range */ - unsigned ec:3; /* extended colorimetry */ - unsigned itc:1; /* it content */ - - /* PB4 */ - unsigned vic:7; /* video format id code */ - unsigned res4:1; - - /* PB5 */ - unsigned pr:4; /* pixel repetition factor */ - unsigned cn:2; /* it content type*/ - unsigned yq:2; /* ycc quantization range */ - - /* PB6-7 */ - u16 top_bar_end_line; - - /* PB8-9 */ - u16 bot_bar_start_line; - - /* PB10-11 */ - u16 left_bar_end_pixel; - - /* PB12-13 */ - u16 right_bar_start_pixel; -} __packed; - -#define HDMI_AVI_VERSION 0x02 - -#define HDMI_AVI_Y_RGB 0x0 -#define HDMI_AVI_Y_YCBCR_422 0x1 -#define HDMI_AVI_Y_YCBCR_444 0x2 - -#define HDMI_AVI_B_VERT 0x1 -#define HDMI_AVI_B_HORIZ 0x2 - -#define HDMI_AVI_S_NONE 0x0 -#define HDMI_AVI_S_OVERSCAN 0x1 -#define HDMI_AVI_S_UNDERSCAN 0x2 - -#define HDMI_AVI_C_NONE 0x0 -#define HDMI_AVI_C_SMPTE 0x1 -#define HDMI_AVI_C_ITU_R 0x2 -#define HDMI_AVI_C_EXTENDED 0x4 - -#define HDMI_AVI_M_4_3 0x1 -#define HDMI_AVI_M_16_9 0x2 - -#define HDMI_AVI_R_SAME 0x8 -#define HDMI_AVI_R_4_3_CENTER 0x9 -#define HDMI_AVI_R_16_9_CENTER 0xa -#define HDMI_AVI_R_14_9_CENTER 0xb - -/* all fields little endian */ -struct hdmi_audio_infoframe { - /* PB0 */ - u8 csum; - - /* PB1 */ - unsigned cc:3; /* channel count */ - unsigned res1:1; - unsigned ct:4; /* coding type */ - - /* PB2 */ - unsigned ss:2; /* sample size */ - unsigned sf:3; /* sample frequency */ - unsigned res2:3; - - /* PB3 */ - unsigned cxt:5; /* coding extention type */ - unsigned res3:3; - - /* PB4 */ - u8 ca; /* channel/speaker allocation */ - - /* PB5 */ - unsigned res5:3; - unsigned lsv:4; /* level shift value */ - unsigned dm_inh:1; /* downmix inhibit */ - - /* PB6-10 reserved */ - u8 res6; - u8 res7; - u8 res8; - u8 res9; - u8 res10; -} __packed; - -#define HDMI_AUDIO_VERSION 0x01 - -#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */ -#define HDMI_AUDIO_CC_2 0x1 -#define HDMI_AUDIO_CC_3 0x2 -#define HDMI_AUDIO_CC_4 0x3 -#define HDMI_AUDIO_CC_5 0x4 -#define HDMI_AUDIO_CC_6 0x5 -#define HDMI_AUDIO_CC_7 0x6 -#define HDMI_AUDIO_CC_8 0x7 - -#define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */ -#define HDMI_AUDIO_CT_PCM 0x1 -#define HDMI_AUDIO_CT_AC3 0x2 -#define HDMI_AUDIO_CT_MPEG1 0x3 -#define HDMI_AUDIO_CT_MP3 0x4 -#define HDMI_AUDIO_CT_MPEG2 0x5 -#define HDMI_AUDIO_CT_AAC_LC 0x6 -#define HDMI_AUDIO_CT_DTS 0x7 -#define HDMI_AUDIO_CT_ATRAC 0x8 -#define HDMI_AUDIO_CT_DSD 0x9 -#define HDMI_AUDIO_CT_E_AC3 0xa -#define HDMI_AUDIO_CT_DTS_HD 0xb -#define HDMI_AUDIO_CT_MLP 0xc -#define HDMI_AUDIO_CT_DST 0xd -#define HDMI_AUDIO_CT_WMA_PRO 0xe -#define HDMI_AUDIO_CT_CXT 0xf - -#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */ -#define HDMI_AUIDO_SF_32K 0x1 -#define HDMI_AUDIO_SF_44_1K 0x2 -#define HDMI_AUDIO_SF_48K 0x3 -#define HDMI_AUDIO_SF_88_2K 0x4 -#define HDMI_AUDIO_SF_96K 0x5 -#define HDMI_AUDIO_SF_176_4K 0x6 -#define HDMI_AUDIO_SF_192K 0x7 - -#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */ -#define HDMI_AUDIO_SS_16BIT 0x1 -#define HDMI_AUDIO_SS_20BIT 0x2 -#define HDMI_AUDIO_SS_24BIT 0x3 - -#define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */ -#define HDMI_AUDIO_CXT_HE_AAC 0x1 -#define HDMI_AUDIO_CXT_HE_AAC_V2 0x2 -#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3 - -/* all fields little endian */ -struct hdmi_stereo_infoframe { - /* PB0 */ - u8 csum; - - /* PB1 */ - u8 regid0; - - /* PB2 */ - u8 regid1; - - /* PB3 */ - u8 regid2; - - /* PB4 */ - unsigned res1:5; - unsigned hdmi_video_format:3; - - /* PB5 */ - unsigned res2:4; - unsigned _3d_structure:4; - - /* PB6*/ - unsigned res3:4; - unsigned _3d_ext_data:4; -} __packed; - -#define HDMI_VENDOR_VERSION 0x01 - -/* register definitions */ -#define HDMI_CTXSW 0x00 - -#define HDMI_NV_PDISP_SOR_STATE0 0x01 -#define SOR_STATE_UPDATE (1 << 0) - -#define HDMI_NV_PDISP_SOR_STATE1 0x02 -#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0) -#define SOR_STATE_ASY_ORMODE_NORMAL (1 << 2) -#define SOR_STATE_ATTACHED (1 << 3) - -#define HDMI_NV_PDISP_SOR_STATE2 0x03 -#define SOR_STATE_ASY_OWNER_NONE (0 << 0) -#define SOR_STATE_ASY_OWNER_HEAD0 (1 << 0) -#define SOR_STATE_ASY_SUBOWNER_NONE (0 << 4) -#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0 (1 << 4) -#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1 (2 << 4) -#define SOR_STATE_ASY_SUBOWNER_BOTH (3 << 4) -#define SOR_STATE_ASY_CRCMODE_ACTIVE (0 << 6) -#define SOR_STATE_ASY_CRCMODE_COMPLETE (1 << 6) -#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 << 6) -#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8) -#define SOR_STATE_ASY_PROTOCOL_CUSTOM (15 << 8) -#define SOR_STATE_ASY_HSYNCPOL_POS (0 << 12) -#define SOR_STATE_ASY_HSYNCPOL_NEG (1 << 12) -#define SOR_STATE_ASY_VSYNCPOL_POS (0 << 13) -#define SOR_STATE_ASY_VSYNCPOL_NEG (1 << 13) -#define SOR_STATE_ASY_DEPOL_POS (0 << 14) -#define SOR_STATE_ASY_DEPOL_NEG (1 << 14) - -#define HDMI_NV_PDISP_RG_HDCP_AN_MSB 0x04 -#define HDMI_NV_PDISP_RG_HDCP_AN_LSB 0x05 -#define HDMI_NV_PDISP_RG_HDCP_CN_MSB 0x06 -#define HDMI_NV_PDISP_RG_HDCP_CN_LSB 0x07 -#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB 0x08 -#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB 0x09 -#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB 0x0a -#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB 0x0b -#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB 0x0c -#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB 0x0d -#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB 0x0e -#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB 0x0f -#define HDMI_NV_PDISP_RG_HDCP_CTRL 0x10 -#define HDMI_NV_PDISP_RG_HDCP_CMODE 0x11 -#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB 0x12 -#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB 0x13 -#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB 0x14 -#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2 0x15 -#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1 0x16 -#define HDMI_NV_PDISP_RG_HDCP_RI 0x17 -#define HDMI_NV_PDISP_RG_HDCP_CS_MSB 0x18 -#define HDMI_NV_PDISP_RG_HDCP_CS_LSB 0x19 -#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0 0x1a -#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0 0x1b -#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1 0x1c -#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2 0x1d - -#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL 0x1e -#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS 0x1f -#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER 0x20 -#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW 0x21 -#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH 0x22 -#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL 0x23 -#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS 0x24 -#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER 0x25 -#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW 0x26 -#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH 0x27 -#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW 0x28 -#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH 0x29 - -#define INFOFRAME_CTRL_ENABLE (1 << 0) - -#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0) -#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8) -#define INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16) - -#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL 0x2a -#define GENERIC_CTRL_ENABLE (1 << 0) -#define GENERIC_CTRL_OTHER (1 << 4) -#define GENERIC_CTRL_SINGLE (1 << 8) -#define GENERIC_CTRL_HBLANK (1 << 12) -#define GENERIC_CTRL_AUDIO (1 << 16) - -#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS 0x2b -#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER 0x2c -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW 0x2d -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH 0x2e -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW 0x2f -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH 0x30 -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW 0x31 -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH 0x32 -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW 0x33 -#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH 0x34 - -#define HDMI_NV_PDISP_HDMI_ACR_CTRL 0x35 -#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW 0x36 -#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH 0x37 -#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW 0x38 -#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH 0x39 -#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW 0x3a -#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH 0x3b -#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW 0x3c -#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH 0x3d -#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW 0x3e -#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH 0x3f -#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW 0x40 -#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH 0x41 -#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW 0x42 -#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH 0x43 - -#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8) -#define ACR_SUBPACK_N(x) (((x) & 0xffffff) << 0) -#define ACR_ENABLE (1 << 31) - -#define HDMI_NV_PDISP_HDMI_CTRL 0x44 -#define HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0) -#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16) -#define HDMI_CTRL_ENABLE (1 << 30) - -#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT 0x45 -#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW 0x46 -#define VSYNC_WINDOW_END(x) (((x) & 0x3ff) << 0) -#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16) -#define VSYNC_WINDOW_ENABLE (1 << 31) - -#define HDMI_NV_PDISP_HDMI_GCP_CTRL 0x47 -#define HDMI_NV_PDISP_HDMI_GCP_STATUS 0x48 -#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK 0x49 -#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1 0x4a -#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2 0x4b -#define HDMI_NV_PDISP_HDMI_EMU0 0x4c -#define HDMI_NV_PDISP_HDMI_EMU1 0x4d -#define HDMI_NV_PDISP_HDMI_EMU1_RDATA 0x4e - -#define HDMI_NV_PDISP_HDMI_SPARE 0x4f -#define SPARE_HW_CTS (1 << 0) -#define SPARE_FORCE_SW_CTS (1 << 1) -#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16) - -#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1 0x50 -#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2 0x51 -#define HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL 0x53 -#define HDMI_NV_PDISP_SOR_CAP 0x54 -#define HDMI_NV_PDISP_SOR_PWR 0x55 -#define SOR_PWR_NORMAL_STATE_PD (0 << 0) -#define SOR_PWR_NORMAL_STATE_PU (1 << 0) -#define SOR_PWR_NORMAL_START_NORMAL (0 << 1) -#define SOR_PWR_NORMAL_START_ALT (1 << 1) -#define SOR_PWR_SAFE_STATE_PD (0 << 16) -#define SOR_PWR_SAFE_STATE_PU (1 << 16) -#define SOR_PWR_SETTING_NEW_DONE (0 << 31) -#define SOR_PWR_SETTING_NEW_PENDING (1 << 31) -#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31) - -#define HDMI_NV_PDISP_SOR_TEST 0x56 -#define HDMI_NV_PDISP_SOR_PLL0 0x57 -#define SOR_PLL_PWR (1 << 0) -#define SOR_PLL_PDBG (1 << 1) -#define SOR_PLL_VCAPD (1 << 2) -#define SOR_PLL_PDPORT (1 << 3) -#define SOR_PLL_RESISTORSEL (1 << 4) -#define SOR_PLL_PULLDOWN (1 << 5) -#define SOR_PLL_VCOCAP(x) (((x) & 0xf) << 8) -#define SOR_PLL_BG_V17_S(x) (((x) & 0xf) << 12) -#define SOR_PLL_FILTER(x) (((x) & 0xf) << 16) -#define SOR_PLL_ICHPMP(x) (((x) & 0xf) << 24) -#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0xf) << 28) - -#define HDMI_NV_PDISP_SOR_PLL1 0x58 -#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8) -#define SOR_PLL_TMDS_TERMADJ(x) (((x) & 0xf) << 9) -#define SOR_PLL_LOADADJ(x) (((x) & 0xf) << 20) -#define SOR_PLL_PE_EN (1 << 28) -#define SOR_PLL_HALF_FULL_PE (1 << 29) -#define SOR_PLL_S_D_PIN_PE (1 << 30) - -#define HDMI_NV_PDISP_SOR_PLL2 0x59 - -#define HDMI_NV_PDISP_SOR_CSTM 0x5a -#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24) - -#define HDMI_NV_PDISP_SOR_LVDS 0x5b -#define HDMI_NV_PDISP_SOR_CRCA 0x5c -#define HDMI_NV_PDISP_SOR_CRCB 0x5d -#define HDMI_NV_PDISP_SOR_BLANK 0x5e -#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f -#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0) -#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4) -#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8) -#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12) -#define SOR_SEQ_PC(x) (((x) & 0xf) << 16) -#define SOR_SEQ_STATUS (1 << 28) -#define SOR_SEQ_SWITCH (1 << 30) - -#define HDMI_NV_PDISP_SOR_SEQ_INST(x) (0x60 + (x)) - -#define SOR_SEQ_INST_WAIT_TIME(x) (((x) & 0x3ff) << 0) -#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12) -#define SOR_SEQ_INST_HALT (1 << 15) -#define SOR_SEQ_INST_PIN_A_LOW (0 << 21) -#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21) -#define SOR_SEQ_INST_PIN_B_LOW (0 << 22) -#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22) -#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23) - -#define HDMI_NV_PDISP_SOR_VCRCA0 0x72 -#define HDMI_NV_PDISP_SOR_VCRCA1 0x73 -#define HDMI_NV_PDISP_SOR_CCRCA0 0x74 -#define HDMI_NV_PDISP_SOR_CCRCA1 0x75 -#define HDMI_NV_PDISP_SOR_EDATAA0 0x76 -#define HDMI_NV_PDISP_SOR_EDATAA1 0x77 -#define HDMI_NV_PDISP_SOR_COUNTA0 0x78 -#define HDMI_NV_PDISP_SOR_COUNTA1 0x79 -#define HDMI_NV_PDISP_SOR_DEBUGA0 0x7a -#define HDMI_NV_PDISP_SOR_DEBUGA1 0x7b -#define HDMI_NV_PDISP_SOR_TRIG 0x7c -#define HDMI_NV_PDISP_SOR_MSCHECK 0x7d - -#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT 0x7e -#define DRIVE_CURRENT_LANE0(x) (((x) & 0x3f) << 0) -#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8) -#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16) -#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24) -#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31) - -#define DRIVE_CURRENT_1_500_mA 0x00 -#define DRIVE_CURRENT_1_875_mA 0x01 -#define DRIVE_CURRENT_2_250_mA 0x02 -#define DRIVE_CURRENT_2_625_mA 0x03 -#define DRIVE_CURRENT_3_000_mA 0x04 -#define DRIVE_CURRENT_3_375_mA 0x05 -#define DRIVE_CURRENT_3_750_mA 0x06 -#define DRIVE_CURRENT_4_125_mA 0x07 -#define DRIVE_CURRENT_4_500_mA 0x08 -#define DRIVE_CURRENT_4_875_mA 0x09 -#define DRIVE_CURRENT_5_250_mA 0x0a -#define DRIVE_CURRENT_5_625_mA 0x0b -#define DRIVE_CURRENT_6_000_mA 0x0c -#define DRIVE_CURRENT_6_375_mA 0x0d -#define DRIVE_CURRENT_6_750_mA 0x0e -#define DRIVE_CURRENT_7_125_mA 0x0f -#define DRIVE_CURRENT_7_500_mA 0x10 -#define DRIVE_CURRENT_7_875_mA 0x11 -#define DRIVE_CURRENT_8_250_mA 0x12 -#define DRIVE_CURRENT_8_625_mA 0x13 -#define DRIVE_CURRENT_9_000_mA 0x14 -#define DRIVE_CURRENT_9_375_mA 0x15 -#define DRIVE_CURRENT_9_750_mA 0x16 -#define DRIVE_CURRENT_10_125_mA 0x17 -#define DRIVE_CURRENT_10_500_mA 0x18 -#define DRIVE_CURRENT_10_875_mA 0x19 -#define DRIVE_CURRENT_11_250_mA 0x1a -#define DRIVE_CURRENT_11_625_mA 0x1b -#define DRIVE_CURRENT_12_000_mA 0x1c -#define DRIVE_CURRENT_12_375_mA 0x1d -#define DRIVE_CURRENT_12_750_mA 0x1e -#define DRIVE_CURRENT_13_125_mA 0x1f -#define DRIVE_CURRENT_13_500_mA 0x20 -#define DRIVE_CURRENT_13_875_mA 0x21 -#define DRIVE_CURRENT_14_250_mA 0x22 -#define DRIVE_CURRENT_14_625_mA 0x23 -#define DRIVE_CURRENT_15_000_mA 0x24 -#define DRIVE_CURRENT_15_375_mA 0x25 -#define DRIVE_CURRENT_15_750_mA 0x26 -#define DRIVE_CURRENT_16_125_mA 0x27 -#define DRIVE_CURRENT_16_500_mA 0x28 -#define DRIVE_CURRENT_16_875_mA 0x29 -#define DRIVE_CURRENT_17_250_mA 0x2a -#define DRIVE_CURRENT_17_625_mA 0x2b -#define DRIVE_CURRENT_18_000_mA 0x2c -#define DRIVE_CURRENT_18_375_mA 0x2d -#define DRIVE_CURRENT_18_750_mA 0x2e -#define DRIVE_CURRENT_19_125_mA 0x2f -#define DRIVE_CURRENT_19_500_mA 0x30 -#define DRIVE_CURRENT_19_875_mA 0x31 -#define DRIVE_CURRENT_20_250_mA 0x32 -#define DRIVE_CURRENT_20_625_mA 0x33 -#define DRIVE_CURRENT_21_000_mA 0x34 -#define DRIVE_CURRENT_21_375_mA 0x35 -#define DRIVE_CURRENT_21_750_mA 0x36 -#define DRIVE_CURRENT_22_125_mA 0x37 -#define DRIVE_CURRENT_22_500_mA 0x38 -#define DRIVE_CURRENT_22_875_mA 0x39 -#define DRIVE_CURRENT_23_250_mA 0x3a -#define DRIVE_CURRENT_23_625_mA 0x3b -#define DRIVE_CURRENT_24_000_mA 0x3c -#define DRIVE_CURRENT_24_375_mA 0x3d -#define DRIVE_CURRENT_24_750_mA 0x3e - -#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f -#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80 -#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81 - -#define HDMI_NV_PDISP_AUDIO_FS(x) (0x82 + (x)) -#define AUDIO_FS_LOW(x) (((x) & 0xfff) << 0) -#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16) - -#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH 0x89 -#define HDMI_NV_PDISP_AUDIO_THRESHOLD 0x8a -#define HDMI_NV_PDISP_AUDIO_CNTRL0 0x8b -#define AUDIO_CNTRL0_ERROR_TOLERANCE(x) (((x) & 0xff) << 0) -#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20) -#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20) -#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20) -#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24) - -#define HDMI_NV_PDISP_AUDIO_N 0x8c -#define AUDIO_N_VALUE(x) (((x) & 0xfffff) << 0) -#define AUDIO_N_RESETF (1 << 20) -#define AUDIO_N_GENERATE_NORMAL (0 << 24) -#define AUDIO_N_GENERATE_ALTERNATE (1 << 24) - -#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING 0x94 -#define HDMI_NV_PDISP_SOR_REFCLK 0x95 -#define SOR_REFCLK_DIV_INT(x) (((x) & 0xff) << 8) -#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x03) << 6) - -#define HDMI_NV_PDISP_CRC_CONTROL 0x96 -#define HDMI_NV_PDISP_INPUT_CONTROL 0x97 -#define HDMI_SRC_DISPLAYA (0 << 0) -#define HDMI_SRC_DISPLAYB (1 << 0) -#define ARM_VIDEO_RANGE_FULL (0 << 1) -#define ARM_VIDEO_RANGE_LIMITED (1 << 1) - -#define HDMI_NV_PDISP_SCRATCH 0x98 -#define HDMI_NV_PDISP_PE_CURRENT 0x99 -#define PE_CURRENT0(x) (((x) & 0xf) << 0) -#define PE_CURRENT1(x) (((x) & 0xf) << 8) -#define PE_CURRENT2(x) (((x) & 0xf) << 16) -#define PE_CURRENT3(x) (((x) & 0xf) << 24) - -#define PE_CURRENT_0_0_mA 0x0 -#define PE_CURRENT_0_5_mA 0x1 -#define PE_CURRENT_1_0_mA 0x2 -#define PE_CURRENT_1_5_mA 0x3 -#define PE_CURRENT_2_0_mA 0x4 -#define PE_CURRENT_2_5_mA 0x5 -#define PE_CURRENT_3_0_mA 0x6 -#define PE_CURRENT_3_5_mA 0x7 -#define PE_CURRENT_4_0_mA 0x8 -#define PE_CURRENT_4_5_mA 0x9 -#define PE_CURRENT_5_0_mA 0xa -#define PE_CURRENT_5_5_mA 0xb -#define PE_CURRENT_6_0_mA 0xc -#define PE_CURRENT_6_5_mA 0xd -#define PE_CURRENT_7_0_mA 0xe -#define PE_CURRENT_7_5_mA 0xf - -#define HDMI_NV_PDISP_KEY_CTRL 0x9a -#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b -#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c -#define HDMI_NV_PDISP_KEY_DEBUG2 0x9d -#define HDMI_NV_PDISP_KEY_HDCP_KEY_0 0x9e -#define HDMI_NV_PDISP_KEY_HDCP_KEY_1 0x9f -#define HDMI_NV_PDISP_KEY_HDCP_KEY_2 0xa0 -#define HDMI_NV_PDISP_KEY_HDCP_KEY_3 0xa1 -#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG 0xa2 -#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3 - -#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac -#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29) -#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc -#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd - -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0 -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 0xc1 -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 0xc2 -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 0xc3 -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 0xc4 -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5 -#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5 - -#endif /* TEGRA_HDMI_H */ diff --git a/trunk/drivers/gpu/drm/tegra/host1x.c b/trunk/drivers/gpu/drm/tegra/host1x.c deleted file mode 100644 index bdb97a564d82..000000000000 --- a/trunk/drivers/gpu/drm/tegra/host1x.c +++ /dev/null @@ -1,325 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include - -#include "drm.h" - -struct host1x_drm_client { - struct host1x_client *client; - struct device_node *np; - struct list_head list; -}; - -static int host1x_add_drm_client(struct host1x *host1x, struct device_node *np) -{ - struct host1x_drm_client *client; - - client = kzalloc(sizeof(*client), GFP_KERNEL); - if (!client) - return -ENOMEM; - - INIT_LIST_HEAD(&client->list); - client->np = of_node_get(np); - - list_add_tail(&client->list, &host1x->drm_clients); - - return 0; -} - -static int host1x_activate_drm_client(struct host1x *host1x, - struct host1x_drm_client *drm, - struct host1x_client *client) -{ - mutex_lock(&host1x->drm_clients_lock); - list_del_init(&drm->list); - list_add_tail(&drm->list, &host1x->drm_active); - drm->client = client; - mutex_unlock(&host1x->drm_clients_lock); - - return 0; -} - -static int host1x_remove_drm_client(struct host1x *host1x, - struct host1x_drm_client *client) -{ - mutex_lock(&host1x->drm_clients_lock); - list_del_init(&client->list); - mutex_unlock(&host1x->drm_clients_lock); - - of_node_put(client->np); - kfree(client); - - return 0; -} - -static int host1x_parse_dt(struct host1x *host1x) -{ - static const char * const compat[] = { - "nvidia,tegra20-dc", - "nvidia,tegra20-hdmi", - "nvidia,tegra30-dc", - "nvidia,tegra30-hdmi", - }; - unsigned int i; - int err; - - for (i = 0; i < ARRAY_SIZE(compat); i++) { - struct device_node *np; - - for_each_child_of_node(host1x->dev->of_node, np) { - if (of_device_is_compatible(np, compat[i]) && - of_device_is_available(np)) { - err = host1x_add_drm_client(host1x, np); - if (err < 0) - return err; - } - } - } - - return 0; -} - -static int tegra_host1x_probe(struct platform_device *pdev) -{ - struct host1x *host1x; - struct resource *regs; - int err; - - host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL); - if (!host1x) - return -ENOMEM; - - mutex_init(&host1x->drm_clients_lock); - INIT_LIST_HEAD(&host1x->drm_clients); - INIT_LIST_HEAD(&host1x->drm_active); - mutex_init(&host1x->clients_lock); - INIT_LIST_HEAD(&host1x->clients); - host1x->dev = &pdev->dev; - - err = host1x_parse_dt(host1x); - if (err < 0) { - dev_err(&pdev->dev, "failed to parse DT: %d\n", err); - return err; - } - - host1x->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(host1x->clk)) - return PTR_ERR(host1x->clk); - - err = clk_prepare_enable(host1x->clk); - if (err < 0) - return err; - - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) { - err = -ENXIO; - goto err; - } - - err = platform_get_irq(pdev, 0); - if (err < 0) - goto err; - - host1x->syncpt = err; - - err = platform_get_irq(pdev, 1); - if (err < 0) - goto err; - - host1x->irq = err; - - host1x->regs = devm_request_and_ioremap(&pdev->dev, regs); - if (!host1x->regs) { - err = -EADDRNOTAVAIL; - goto err; - } - - platform_set_drvdata(pdev, host1x); - - return 0; - -err: - clk_disable_unprepare(host1x->clk); - return err; -} - -static int tegra_host1x_remove(struct platform_device *pdev) -{ - struct host1x *host1x = platform_get_drvdata(pdev); - - clk_disable_unprepare(host1x->clk); - - return 0; -} - -int host1x_drm_init(struct host1x *host1x, struct drm_device *drm) -{ - struct host1x_client *client; - - mutex_lock(&host1x->clients_lock); - - list_for_each_entry(client, &host1x->clients, list) { - if (client->ops && client->ops->drm_init) { - int err = client->ops->drm_init(client, drm); - if (err < 0) { - dev_err(host1x->dev, - "DRM setup failed for %s: %d\n", - dev_name(client->dev), err); - return err; - } - } - } - - mutex_unlock(&host1x->clients_lock); - - return 0; -} - -int host1x_drm_exit(struct host1x *host1x) -{ - struct platform_device *pdev = to_platform_device(host1x->dev); - struct host1x_client *client; - - if (!host1x->drm) - return 0; - - mutex_lock(&host1x->clients_lock); - - list_for_each_entry_reverse(client, &host1x->clients, list) { - if (client->ops && client->ops->drm_exit) { - int err = client->ops->drm_exit(client); - if (err < 0) { - dev_err(host1x->dev, - "DRM cleanup failed for %s: %d\n", - dev_name(client->dev), err); - return err; - } - } - } - - mutex_unlock(&host1x->clients_lock); - - drm_platform_exit(&tegra_drm_driver, pdev); - host1x->drm = NULL; - - return 0; -} - -int host1x_register_client(struct host1x *host1x, struct host1x_client *client) -{ - struct host1x_drm_client *drm, *tmp; - int err; - - mutex_lock(&host1x->clients_lock); - list_add_tail(&client->list, &host1x->clients); - mutex_unlock(&host1x->clients_lock); - - list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list) - if (drm->np == client->dev->of_node) - host1x_activate_drm_client(host1x, drm, client); - - if (list_empty(&host1x->drm_clients)) { - struct platform_device *pdev = to_platform_device(host1x->dev); - - err = drm_platform_init(&tegra_drm_driver, pdev); - if (err < 0) { - dev_err(host1x->dev, "drm_platform_init(): %d\n", err); - return err; - } - } - - return 0; -} - -int host1x_unregister_client(struct host1x *host1x, - struct host1x_client *client) -{ - struct host1x_drm_client *drm, *tmp; - int err; - - list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) { - if (drm->client == client) { - err = host1x_drm_exit(host1x); - if (err < 0) { - dev_err(host1x->dev, "host1x_drm_exit(): %d\n", - err); - return err; - } - - host1x_remove_drm_client(host1x, drm); - break; - } - } - - mutex_lock(&host1x->clients_lock); - list_del_init(&client->list); - mutex_unlock(&host1x->clients_lock); - - return 0; -} - -static struct of_device_id tegra_host1x_of_match[] = { - { .compatible = "nvidia,tegra30-host1x", }, - { .compatible = "nvidia,tegra20-host1x", }, - { }, -}; -MODULE_DEVICE_TABLE(of, tegra_host1x_of_match); - -struct platform_driver tegra_host1x_driver = { - .driver = { - .name = "tegra-host1x", - .owner = THIS_MODULE, - .of_match_table = tegra_host1x_of_match, - }, - .probe = tegra_host1x_probe, - .remove = tegra_host1x_remove, -}; - -static int __init tegra_host1x_init(void) -{ - int err; - - err = platform_driver_register(&tegra_host1x_driver); - if (err < 0) - return err; - - err = platform_driver_register(&tegra_dc_driver); - if (err < 0) - goto unregister_host1x; - - err = platform_driver_register(&tegra_hdmi_driver); - if (err < 0) - goto unregister_dc; - - return 0; - -unregister_dc: - platform_driver_unregister(&tegra_dc_driver); -unregister_host1x: - platform_driver_unregister(&tegra_host1x_driver); - return err; -} -module_init(tegra_host1x_init); - -static void __exit tegra_host1x_exit(void) -{ - platform_driver_unregister(&tegra_hdmi_driver); - platform_driver_unregister(&tegra_dc_driver); - platform_driver_unregister(&tegra_host1x_driver); -} -module_exit(tegra_host1x_exit); - -MODULE_AUTHOR("Thierry Reding "); -MODULE_DESCRIPTION("NVIDIA Tegra DRM driver"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/gpu/drm/tegra/output.c b/trunk/drivers/gpu/drm/tegra/output.c deleted file mode 100644 index 8140fc6c34d8..000000000000 --- a/trunk/drivers/gpu/drm/tegra/output.c +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include - -#include "drm.h" - -static int tegra_connector_get_modes(struct drm_connector *connector) -{ - struct tegra_output *output = connector_to_output(connector); - struct edid *edid = NULL; - int err = 0; - - if (output->edid) - edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL); - else if (output->ddc) - edid = drm_get_edid(connector, output->ddc); - - drm_mode_connector_update_edid_property(connector, edid); - - if (edid) { - err = drm_add_edid_modes(connector, edid); - kfree(edid); - } - - return err; -} - -static int tegra_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct tegra_output *output = connector_to_output(connector); - enum drm_mode_status status = MODE_OK; - int err; - - err = tegra_output_check_mode(output, mode, &status); - if (err < 0) - return MODE_ERROR; - - return status; -} - -static struct drm_encoder * -tegra_connector_best_encoder(struct drm_connector *connector) -{ - struct tegra_output *output = connector_to_output(connector); - - return &output->encoder; -} - -static const struct drm_connector_helper_funcs connector_helper_funcs = { - .get_modes = tegra_connector_get_modes, - .mode_valid = tegra_connector_mode_valid, - .best_encoder = tegra_connector_best_encoder, -}; - -static enum drm_connector_status -tegra_connector_detect(struct drm_connector *connector, bool force) -{ - struct tegra_output *output = connector_to_output(connector); - enum drm_connector_status status = connector_status_unknown; - - if (gpio_is_valid(output->hpd_gpio)) { - if (gpio_get_value(output->hpd_gpio) == 0) - status = connector_status_disconnected; - else - status = connector_status_connected; - } else { - if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) - status = connector_status_connected; - } - - return status; -} - -static void tegra_connector_destroy(struct drm_connector *connector) -{ - drm_sysfs_connector_remove(connector); - drm_connector_cleanup(connector); -} - -static const struct drm_connector_funcs connector_funcs = { - .dpms = drm_helper_connector_dpms, - .detect = tegra_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = tegra_connector_destroy, -}; - -static void tegra_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs encoder_funcs = { - .destroy = tegra_encoder_destroy, -}; - -static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode) -{ -} - -static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted) -{ - return true; -} - -static void tegra_encoder_prepare(struct drm_encoder *encoder) -{ -} - -static void tegra_encoder_commit(struct drm_encoder *encoder) -{ -} - -static void tegra_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted) -{ - struct tegra_output *output = encoder_to_output(encoder); - int err; - - err = tegra_output_enable(output); - if (err < 0) - dev_err(encoder->dev->dev, "tegra_output_enable(): %d\n", err); -} - -static const struct drm_encoder_helper_funcs encoder_helper_funcs = { - .dpms = tegra_encoder_dpms, - .mode_fixup = tegra_encoder_mode_fixup, - .prepare = tegra_encoder_prepare, - .commit = tegra_encoder_commit, - .mode_set = tegra_encoder_mode_set, -}; - -static irqreturn_t hpd_irq(int irq, void *data) -{ - struct tegra_output *output = data; - - drm_helper_hpd_irq_event(output->connector.dev); - - return IRQ_HANDLED; -} - -int tegra_output_parse_dt(struct tegra_output *output) -{ - enum of_gpio_flags flags; - struct device_node *ddc; - size_t size; - int err; - - if (!output->of_node) - output->of_node = output->dev->of_node; - - output->edid = of_get_property(output->of_node, "nvidia,edid", &size); - - ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0); - if (ddc) { - output->ddc = of_find_i2c_adapter_by_node(ddc); - if (!output->ddc) { - err = -EPROBE_DEFER; - of_node_put(ddc); - return err; - } - - of_node_put(ddc); - } - - if (!output->edid && !output->ddc) - return -ENODEV; - - output->hpd_gpio = of_get_named_gpio_flags(output->of_node, - "nvidia,hpd-gpio", 0, - &flags); - - return 0; -} - -int tegra_output_init(struct drm_device *drm, struct tegra_output *output) -{ - int connector, encoder, err; - - if (gpio_is_valid(output->hpd_gpio)) { - unsigned long flags; - - err = gpio_request_one(output->hpd_gpio, GPIOF_DIR_IN, - "HDMI hotplug detect"); - if (err < 0) { - dev_err(output->dev, "gpio_request_one(): %d\n", err); - return err; - } - - err = gpio_to_irq(output->hpd_gpio); - if (err < 0) { - dev_err(output->dev, "gpio_to_irq(): %d\n", err); - goto free_hpd; - } - - output->hpd_irq = err; - - flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | - IRQF_ONESHOT; - - err = request_threaded_irq(output->hpd_irq, NULL, hpd_irq, - flags, "hpd", output); - if (err < 0) { - dev_err(output->dev, "failed to request IRQ#%u: %d\n", - output->hpd_irq, err); - goto free_hpd; - } - - output->connector.polled = DRM_CONNECTOR_POLL_HPD; - } - - switch (output->type) { - case TEGRA_OUTPUT_RGB: - connector = DRM_MODE_CONNECTOR_LVDS; - encoder = DRM_MODE_ENCODER_LVDS; - break; - - case TEGRA_OUTPUT_HDMI: - connector = DRM_MODE_CONNECTOR_HDMIA; - encoder = DRM_MODE_ENCODER_TMDS; - break; - - default: - connector = DRM_MODE_CONNECTOR_Unknown; - encoder = DRM_MODE_ENCODER_NONE; - break; - } - - drm_connector_init(drm, &output->connector, &connector_funcs, - connector); - drm_connector_helper_add(&output->connector, &connector_helper_funcs); - - drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder); - drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs); - - drm_mode_connector_attach_encoder(&output->connector, &output->encoder); - drm_sysfs_connector_add(&output->connector); - - output->encoder.possible_crtcs = 0x3; - - return 0; - -free_hpd: - gpio_free(output->hpd_gpio); - - return err; -} - -int tegra_output_exit(struct tegra_output *output) -{ - if (gpio_is_valid(output->hpd_gpio)) { - free_irq(output->hpd_irq, output); - gpio_free(output->hpd_gpio); - } - - if (output->ddc) - put_device(&output->ddc->dev); - - return 0; -} diff --git a/trunk/drivers/gpu/drm/tegra/rgb.c b/trunk/drivers/gpu/drm/tegra/rgb.c deleted file mode 100644 index ed4416f20260..000000000000 --- a/trunk/drivers/gpu/drm/tegra/rgb.c +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Copyright (C) 2012 Avionic Design GmbH - * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include - -#include "drm.h" -#include "dc.h" - -struct tegra_rgb { - struct tegra_output output; - struct clk *clk_parent; - struct clk *clk; -}; - -static inline struct tegra_rgb *to_rgb(struct tegra_output *output) -{ - return container_of(output, struct tegra_rgb, output); -} - -struct reg_entry { - unsigned long offset; - unsigned long value; -}; - -static const struct reg_entry rgb_enable[] = { - { DC_COM_PIN_OUTPUT_ENABLE(0), 0x00000000 }, - { DC_COM_PIN_OUTPUT_ENABLE(1), 0x00000000 }, - { DC_COM_PIN_OUTPUT_ENABLE(2), 0x00000000 }, - { DC_COM_PIN_OUTPUT_ENABLE(3), 0x00000000 }, - { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 }, - { DC_COM_PIN_OUTPUT_POLARITY(1), 0x01000000 }, - { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 }, - { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 }, - { DC_COM_PIN_OUTPUT_DATA(0), 0x00000000 }, - { DC_COM_PIN_OUTPUT_DATA(1), 0x00000000 }, - { DC_COM_PIN_OUTPUT_DATA(2), 0x00000000 }, - { DC_COM_PIN_OUTPUT_DATA(3), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(4), 0x00210222 }, - { DC_COM_PIN_OUTPUT_SELECT(5), 0x00002200 }, - { DC_COM_PIN_OUTPUT_SELECT(6), 0x00020000 }, -}; - -static const struct reg_entry rgb_disable[] = { - { DC_COM_PIN_OUTPUT_SELECT(6), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(5), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(4), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 }, - { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 }, - { DC_COM_PIN_OUTPUT_DATA(3), 0xaaaaaaaa }, - { DC_COM_PIN_OUTPUT_DATA(2), 0xaaaaaaaa }, - { DC_COM_PIN_OUTPUT_DATA(1), 0xaaaaaaaa }, - { DC_COM_PIN_OUTPUT_DATA(0), 0xaaaaaaaa }, - { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 }, - { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 }, - { DC_COM_PIN_OUTPUT_POLARITY(1), 0x00000000 }, - { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 }, - { DC_COM_PIN_OUTPUT_ENABLE(3), 0x55555555 }, - { DC_COM_PIN_OUTPUT_ENABLE(2), 0x55555555 }, - { DC_COM_PIN_OUTPUT_ENABLE(1), 0x55150005 }, - { DC_COM_PIN_OUTPUT_ENABLE(0), 0x55555555 }, -}; - -static void tegra_dc_write_regs(struct tegra_dc *dc, - const struct reg_entry *table, - unsigned int num) -{ - unsigned int i; - - for (i = 0; i < num; i++) - tegra_dc_writel(dc, table[i].value, table[i].offset); -} - -static int tegra_output_rgb_enable(struct tegra_output *output) -{ - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - - tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable)); - - return 0; -} - -static int tegra_output_rgb_disable(struct tegra_output *output) -{ - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - - tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable)); - - return 0; -} - -static int tegra_output_rgb_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk) -{ - struct tegra_rgb *rgb = to_rgb(output); - - return clk_set_parent(clk, rgb->clk_parent); -} - -static int tegra_output_rgb_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - /* - * FIXME: For now, always assume that the mode is okay. There are - * unresolved issues with clk_round_rate(), which doesn't always - * reliably report whether a frequency can be set or not. - */ - - *status = MODE_OK; - - return 0; -} - -static const struct tegra_output_ops rgb_ops = { - .enable = tegra_output_rgb_enable, - .disable = tegra_output_rgb_disable, - .setup_clock = tegra_output_rgb_setup_clock, - .check_mode = tegra_output_rgb_check_mode, -}; - -int tegra_dc_rgb_probe(struct tegra_dc *dc) -{ - struct device_node *np; - struct tegra_rgb *rgb; - int err; - - np = of_get_child_by_name(dc->dev->of_node, "rgb"); - if (!np || !of_device_is_available(np)) - return -ENODEV; - - rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL); - if (!rgb) - return -ENOMEM; - - rgb->clk = devm_clk_get(dc->dev, NULL); - if (IS_ERR(rgb->clk)) { - dev_err(dc->dev, "failed to get clock\n"); - return PTR_ERR(rgb->clk); - } - - rgb->clk_parent = devm_clk_get(dc->dev, "parent"); - if (IS_ERR(rgb->clk_parent)) { - dev_err(dc->dev, "failed to get parent clock\n"); - return PTR_ERR(rgb->clk_parent); - } - - err = clk_set_parent(rgb->clk, rgb->clk_parent); - if (err < 0) { - dev_err(dc->dev, "failed to set parent clock: %d\n", err); - return err; - } - - rgb->output.dev = dc->dev; - rgb->output.of_node = np; - - err = tegra_output_parse_dt(&rgb->output); - if (err < 0) - return err; - - dc->rgb = &rgb->output; - - return 0; -} - -int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) -{ - struct tegra_rgb *rgb = to_rgb(dc->rgb); - int err; - - if (!dc->rgb) - return -ENODEV; - - rgb->output.type = TEGRA_OUTPUT_RGB; - rgb->output.ops = &rgb_ops; - - err = tegra_output_init(dc->base.dev, &rgb->output); - if (err < 0) { - dev_err(dc->dev, "output setup failed: %d\n", err); - return err; - } - - /* - * By default, outputs can be associated with each display controller. - * RGB outputs are an exception, so we make sure they can be attached - * to only their parent display controller. - */ - rgb->output.encoder.possible_crtcs = 1 << dc->pipe; - - return 0; -} - -int tegra_dc_rgb_exit(struct tegra_dc *dc) -{ - if (dc->rgb) { - int err; - - err = tegra_output_disable(dc->rgb); - if (err < 0) { - dev_err(dc->dev, "output failed to disable: %d\n", err); - return err; - } - - err = tegra_output_exit(dc->rgb); - if (err < 0) { - dev_err(dc->dev, "output cleanup failed: %d\n", err); - return err; - } - - dc->rgb = NULL; - } - - return 0; -} diff --git a/trunk/drivers/gpu/drm/ttm/ttm_bo.c b/trunk/drivers/gpu/drm/ttm/ttm_bo.c index a9151337d5b9..bf6e4b5a73b5 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_bo.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_bo.c @@ -162,9 +162,9 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) { if (interruptible) { return wait_event_interruptible(bo->event_queue, - !ttm_bo_is_reserved(bo)); + atomic_read(&bo->reserved) == 0); } else { - wait_event(bo->event_queue, !ttm_bo_is_reserved(bo)); + wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); return 0; } } @@ -175,7 +175,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man; - BUG_ON(!ttm_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->reserved)); if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { @@ -220,7 +220,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, struct ttm_bo_global *glob = bo->glob; int ret; - while (unlikely(atomic_read(&bo->reserved) != 0)) { + while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { /** * Deadlock avoidance for multi-bo reserving. */ @@ -249,7 +249,6 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, return ret; } - atomic_set(&bo->reserved, 1); if (use_sequence) { /** * Wake up waiters that may need to recheck for deadlock, @@ -366,7 +365,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem, bool evict, bool interruptible, - bool no_wait_gpu) + bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); @@ -420,12 +419,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) - ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); + ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, interruptible, - no_wait_gpu, mem); + no_wait_reserve, no_wait_gpu, mem); else - ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); + ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem); if (ret) { if (bdev->driver->move_notify) { @@ -488,33 +487,40 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) ttm_bo_mem_put(bo, &bo->mem); atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); /* - * Since the final reference to this bo may not be dropped by - * the current task we have to put a memory barrier here to make - * sure the changes done in this function are always visible. - * - * This function only needs protection against the final kref_put. + * Make processes trying to reserve really pick it up. */ - smp_mb__before_atomic_dec(); + smp_mb__after_atomic_dec(); + wake_up_all(&bo->event_queue); } static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; - struct ttm_bo_driver *driver = bdev->driver; + struct ttm_bo_driver *driver; void *sync_obj = NULL; + void *sync_obj_arg; int put_count; int ret; - spin_lock(&glob->lru_lock); - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); - spin_lock(&bdev->fence_lock); (void) ttm_bo_wait(bo, false, false, true); - if (!ret && !bo->sync_obj) { + if (!bo->sync_obj) { + + spin_lock(&glob->lru_lock); + + /** + * Lock inversion between bo:reserve and bdev::fence_lock here, + * but that's OK, since we're only trylocking. + */ + + ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + + if (unlikely(ret == -EBUSY)) + goto queue; + spin_unlock(&bdev->fence_lock); put_count = ttm_bo_del_from_lru(bo); @@ -524,22 +530,22 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ttm_bo_list_ref_sub(bo, put_count, true); return; + } else { + spin_lock(&glob->lru_lock); } +queue: + driver = bdev->driver; if (bo->sync_obj) sync_obj = driver->sync_obj_ref(bo->sync_obj); - spin_unlock(&bdev->fence_lock); - - if (!ret) { - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); - } + sync_obj_arg = bo->sync_obj_arg; kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); spin_unlock(&glob->lru_lock); + spin_unlock(&bdev->fence_lock); if (sync_obj) { - driver->sync_obj_flush(sync_obj); + driver->sync_obj_flush(sync_obj, sync_obj_arg); driver->sync_obj_unref(&sync_obj); } schedule_delayed_work(&bdev->wq, @@ -547,84 +553,68 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) } /** - * function ttm_bo_cleanup_refs_and_unlock + * function ttm_bo_cleanup_refs * If bo idle, remove from delayed- and lru lists, and unref. * If not idle, do nothing. * - * Must be called with lru_lock and reservation held, this function - * will drop both before returning. - * * @interruptible Any sleeps should occur interruptibly. + * @no_wait_reserve Never wait for reserve. Return -EBUSY instead. * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. */ -static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, - bool interruptible, - bool no_wait_gpu) +static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, + bool interruptible, + bool no_wait_reserve, + bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_driver *driver = bdev->driver; struct ttm_bo_global *glob = bo->glob; int put_count; - int ret; + int ret = 0; +retry: spin_lock(&bdev->fence_lock); - ret = ttm_bo_wait(bo, false, false, true); + ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); + spin_unlock(&bdev->fence_lock); - if (ret && !no_wait_gpu) { - void *sync_obj; + if (unlikely(ret != 0)) + return ret; - /* - * Take a reference to the fence and unreserve, - * at this point the buffer should be dead, so - * no new sync objects can be attached. - */ - sync_obj = driver->sync_obj_ref(&bo->sync_obj); - spin_unlock(&bdev->fence_lock); +retry_reserve: + spin_lock(&glob->lru_lock); - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + if (unlikely(list_empty(&bo->ddestroy))) { spin_unlock(&glob->lru_lock); + return 0; + } - ret = driver->sync_obj_wait(sync_obj, false, interruptible); - driver->sync_obj_unref(&sync_obj); - if (ret) - return ret; + ret = ttm_bo_reserve_locked(bo, false, true, false, 0); - /* - * remove sync_obj with ttm_bo_wait, the wait should be - * finished, and no new wait object should have been added. - */ - spin_lock(&bdev->fence_lock); - ret = ttm_bo_wait(bo, false, false, true); - WARN_ON(ret); - spin_unlock(&bdev->fence_lock); - if (ret) + if (unlikely(ret == -EBUSY)) { + spin_unlock(&glob->lru_lock); + if (likely(!no_wait_reserve)) + ret = ttm_bo_wait_unreserved(bo, interruptible); + if (unlikely(ret != 0)) return ret; - spin_lock(&glob->lru_lock); - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + goto retry_reserve; + } - /* - * We raced, and lost, someone else holds the reservation now, - * and is probably busy in ttm_bo_cleanup_memtype_use. - * - * Even if it's not the case, because we finished waiting any - * delayed destruction would succeed, so just return success - * here. - */ - if (ret) { - spin_unlock(&glob->lru_lock); - return 0; - } - } else - spin_unlock(&bdev->fence_lock); + BUG_ON(ret != 0); + + /** + * We can re-check for sync object without taking + * the bo::lock since setting the sync object requires + * also bo::reserved. A busy object at this point may + * be caused by another thread recently starting an accelerated + * eviction. + */ - if (ret || unlikely(list_empty(&bo->ddestroy))) { + if (unlikely(bo->sync_obj)) { atomic_set(&bo->reserved, 0); wake_up_all(&bo->event_queue); spin_unlock(&glob->lru_lock); - return ret; + goto retry; } put_count = ttm_bo_del_from_lru(bo); @@ -667,13 +657,9 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) kref_get(&nentry->list_kref); } - ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0); - if (!ret) - ret = ttm_bo_cleanup_refs_and_unlock(entry, false, - !remove_all); - else - spin_unlock(&glob->lru_lock); - + spin_unlock(&glob->lru_lock); + ret = ttm_bo_cleanup_refs(entry, false, !remove_all, + !remove_all); kref_put(&entry->list_kref, ttm_bo_release_list); entry = nentry; @@ -711,7 +697,6 @@ static void ttm_bo_release(struct kref *kref) struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; - write_lock(&bdev->vm_lock); if (likely(bo->vm_node != NULL)) { rb_erase(&bo->vm_rb, &bdev->addr_space_rb); drm_mm_put_block(bo->vm_node); @@ -723,14 +708,18 @@ static void ttm_bo_release(struct kref *kref) ttm_mem_io_unlock(man); ttm_bo_cleanup_refs_or_queue(bo); kref_put(&bo->list_kref, ttm_bo_release_list); + write_lock(&bdev->vm_lock); } void ttm_bo_unref(struct ttm_buffer_object **p_bo) { struct ttm_buffer_object *bo = *p_bo; + struct ttm_bo_device *bdev = bo->bdev; *p_bo = NULL; + write_lock(&bdev->vm_lock); kref_put(&bo->kref, ttm_bo_release); + write_unlock(&bdev->vm_lock); } EXPORT_SYMBOL(ttm_bo_unref); @@ -749,7 +738,7 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, - bool no_wait_gpu) + bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_reg evict_mem; @@ -767,7 +756,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, goto out; } - BUG_ON(!ttm_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->reserved)); evict_mem = bo->mem; evict_mem.mm_node = NULL; @@ -780,7 +769,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, placement.num_busy_placement = 0; bdev->driver->evict_flags(bo, &placement); ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, - no_wait_gpu); + no_wait_reserve, no_wait_gpu); if (ret) { if (ret != -ERESTARTSYS) { pr_err("Failed to find memory space for buffer 0x%p eviction\n", @@ -791,7 +780,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, } ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, - no_wait_gpu); + no_wait_reserve, no_wait_gpu); if (ret) { if (ret != -ERESTARTSYS) pr_err("Buffer eviction failed\n"); @@ -805,35 +794,51 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, static int ttm_mem_evict_first(struct ttm_bo_device *bdev, uint32_t mem_type, - bool interruptible, + bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_buffer_object *bo; - int ret = -EBUSY, put_count; + int ret, put_count = 0; +retry: spin_lock(&glob->lru_lock); - list_for_each_entry(bo, &man->lru, lru) { - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); - if (!ret) - break; - } - - if (ret) { + if (list_empty(&man->lru)) { spin_unlock(&glob->lru_lock); - return ret; + return -EBUSY; } + bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); kref_get(&bo->list_kref); if (!list_empty(&bo->ddestroy)) { - ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, - no_wait_gpu); + spin_unlock(&glob->lru_lock); + ret = ttm_bo_cleanup_refs(bo, interruptible, + no_wait_reserve, no_wait_gpu); kref_put(&bo->list_kref, ttm_bo_release_list); + return ret; } + ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + + if (unlikely(ret == -EBUSY)) { + spin_unlock(&glob->lru_lock); + if (likely(!no_wait_reserve)) + ret = ttm_bo_wait_unreserved(bo, interruptible); + + kref_put(&bo->list_kref, ttm_bo_release_list); + + /** + * We *need* to retry after releasing the lru lock. + */ + + if (unlikely(ret != 0)) + return ret; + goto retry; + } + put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); @@ -841,7 +846,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, ttm_bo_list_ref_sub(bo, put_count, true); - ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); + ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); ttm_bo_unreserve(bo); kref_put(&bo->list_kref, ttm_bo_release_list); @@ -866,6 +871,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem, bool interruptible, + bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; @@ -878,8 +884,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ret; if (mem->mm_node) break; - ret = ttm_mem_evict_first(bdev, mem_type, - interruptible, no_wait_gpu); + ret = ttm_mem_evict_first(bdev, mem_type, interruptible, + no_wait_reserve, no_wait_gpu); if (unlikely(ret != 0)) return ret; } while (1); @@ -944,7 +950,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem, - bool interruptible, + bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; @@ -1035,7 +1041,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, } ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, - interruptible, no_wait_gpu); + interruptible, no_wait_reserve, no_wait_gpu); if (ret == 0 && mem->mm_node) { mem->placement = cur_flags; return 0; @@ -1048,16 +1054,26 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_mem_space); +int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) +{ + if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) + return -EBUSY; + + return wait_event_interruptible(bo->event_queue, + atomic_read(&bo->cpu_writers) == 0); +} +EXPORT_SYMBOL(ttm_bo_wait_cpu); + int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_placement *placement, - bool interruptible, + bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { int ret = 0; struct ttm_mem_reg mem; struct ttm_bo_device *bdev = bo->bdev; - BUG_ON(!ttm_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->reserved)); /* * FIXME: It's possible to pipeline buffer moves. @@ -1077,12 +1093,10 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, /* * Determine where to move the buffer. */ - ret = ttm_bo_mem_space(bo, placement, &mem, - interruptible, no_wait_gpu); + ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu); if (ret) goto out_unlock; - ret = ttm_bo_handle_move_mem(bo, &mem, false, - interruptible, no_wait_gpu); + ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); out_unlock: if (ret && mem.mm_node) ttm_bo_mem_put(bo, &mem); @@ -1111,12 +1125,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, int ttm_bo_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, - bool interruptible, + bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { int ret; - BUG_ON(!ttm_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->reserved)); /* Check that range is valid */ if (placement->lpfn || placement->fpfn) if (placement->fpfn > placement->lpfn || @@ -1127,8 +1141,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, */ ret = ttm_bo_mem_compat(placement, &bo->mem); if (ret < 0) { - ret = ttm_bo_move_buffer(bo, placement, interruptible, - no_wait_gpu); + ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu); if (ret) return ret; } else { @@ -1166,6 +1179,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, + unsigned long buffer_start, bool interruptible, struct file *persistent_swap_storage, size_t acc_size, @@ -1186,6 +1200,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, return -ENOMEM; } + size += buffer_start & ~PAGE_MASK; num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; if (num_pages == 0) { pr_err("Illegal buffer object size\n"); @@ -1218,6 +1233,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, bo->mem.page_alignment = page_alignment; bo->mem.bus.io_reserved_vm = false; bo->mem.bus.io_reserved_count = 0; + bo->buffer_start = buffer_start & PAGE_MASK; bo->priv_flags = 0; bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); bo->seq_valid = false; @@ -1241,7 +1257,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, goto out_err; } - ret = ttm_bo_validate(bo, placement, interruptible, false); + ret = ttm_bo_validate(bo, placement, interruptible, false, false); if (ret) goto out_err; @@ -1290,6 +1306,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, + unsigned long buffer_start, bool interruptible, struct file *persistent_swap_storage, struct ttm_buffer_object **p_bo) @@ -1304,8 +1321,8 @@ int ttm_bo_create(struct ttm_bo_device *bdev, acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, - interruptible, persistent_swap_storage, acc_size, - NULL, NULL); + buffer_start, interruptible, + persistent_swap_storage, acc_size, NULL, NULL); if (likely(ret == 0)) *p_bo = bo; @@ -1327,7 +1344,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, spin_lock(&glob->lru_lock); while (!list_empty(&man->lru)) { spin_unlock(&glob->lru_lock); - ret = ttm_mem_evict_first(bdev, mem_type, false, false); + ret = ttm_mem_evict_first(bdev, mem_type, false, false, false); if (ret) { if (allow_errors) { return ret; @@ -1560,6 +1577,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, goto out_no_addr_mm; INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); + bdev->nice_mode = true; INIT_LIST_HEAD(&bdev->ddestroy); bdev->dev_mapping = NULL; bdev->glob = glob; @@ -1703,6 +1721,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, struct ttm_bo_driver *driver = bo->bdev->driver; struct ttm_bo_device *bdev = bo->bdev; void *sync_obj; + void *sync_obj_arg; int ret = 0; if (likely(bo->sync_obj == NULL)) @@ -1710,7 +1729,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, while (bo->sync_obj) { - if (driver->sync_obj_signaled(bo->sync_obj)) { + if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) { void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); @@ -1724,8 +1743,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, return -EBUSY; sync_obj = driver->sync_obj_ref(bo->sync_obj); + sync_obj_arg = bo->sync_obj_arg; spin_unlock(&bdev->fence_lock); - ret = driver->sync_obj_wait(sync_obj, + ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, lazy, interruptible); if (unlikely(ret != 0)) { driver->sync_obj_unref(&sync_obj); @@ -1733,7 +1753,8 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, return ret; } spin_lock(&bdev->fence_lock); - if (likely(bo->sync_obj == sync_obj)) { + if (likely(bo->sync_obj == sync_obj && + bo->sync_obj_arg == sync_obj_arg)) { void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, @@ -1776,7 +1797,8 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) { - atomic_dec(&bo->cpu_writers); + if (atomic_dec_and_test(&bo->cpu_writers)) + wake_up_all(&bo->event_queue); } EXPORT_SYMBOL(ttm_bo_synccpu_write_release); @@ -1795,25 +1817,40 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); spin_lock(&glob->lru_lock); - list_for_each_entry(bo, &glob->swap_lru, swap) { - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); - if (!ret) - break; - } + while (ret == -EBUSY) { + if (unlikely(list_empty(&glob->swap_lru))) { + spin_unlock(&glob->lru_lock); + return -EBUSY; + } - if (ret) { - spin_unlock(&glob->lru_lock); - return ret; - } + bo = list_first_entry(&glob->swap_lru, + struct ttm_buffer_object, swap); + kref_get(&bo->list_kref); - kref_get(&bo->list_kref); + if (!list_empty(&bo->ddestroy)) { + spin_unlock(&glob->lru_lock); + (void) ttm_bo_cleanup_refs(bo, false, false, false); + kref_put(&bo->list_kref, ttm_bo_release_list); + spin_lock(&glob->lru_lock); + continue; + } - if (!list_empty(&bo->ddestroy)) { - ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); - kref_put(&bo->list_kref, ttm_bo_release_list); - return ret; + /** + * Reserve buffer. Since we unlock while sleeping, we need + * to re-check that nobody removed us from the swap-list while + * we slept. + */ + + ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + if (unlikely(ret == -EBUSY)) { + spin_unlock(&glob->lru_lock); + ttm_bo_wait_unreserved(bo, false); + kref_put(&bo->list_kref, ttm_bo_release_list); + spin_lock(&glob->lru_lock); + } } + BUG_ON(ret != 0); put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); @@ -1839,7 +1876,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) evict_mem.mem_type = TTM_PL_SYSTEM; ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, - false, false); + false, false, false); if (unlikely(ret != 0)) goto out; } diff --git a/trunk/drivers/gpu/drm/ttm/ttm_bo_util.c b/trunk/drivers/gpu/drm/ttm/ttm_bo_util.c index 9e9c5d2a5c74..2026060f03e0 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -43,7 +43,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) } int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool evict, + bool evict, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_tt *ttm = bo->ttm; @@ -314,7 +314,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, } int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool evict, bool no_wait_gpu, + bool evict, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; @@ -611,7 +611,8 @@ EXPORT_SYMBOL(ttm_bo_kunmap); int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, void *sync_obj, - bool evict, + void *sync_obj_arg, + bool evict, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { @@ -629,6 +630,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, bo->sync_obj = NULL; } bo->sync_obj = driver->sync_obj_ref(sync_obj); + bo->sync_obj_arg = sync_obj_arg; if (evict) { ret = ttm_bo_wait(bo, false, false, false); spin_unlock(&bdev->fence_lock); diff --git a/trunk/drivers/gpu/drm/ttm/ttm_bo_vm.c b/trunk/drivers/gpu/drm/ttm/ttm_bo_vm.c index 74705f329d99..3ba72dbdc4bd 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -259,8 +259,8 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, read_lock(&bdev->vm_lock); bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, (vma->vm_end - vma->vm_start) >> PAGE_SHIFT); - if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref)) - bo = NULL; + if (likely(bo != NULL)) + ttm_bo_reference(bo); read_unlock(&bdev->vm_lock); if (unlikely(bo == NULL)) { diff --git a/trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c index cd9e4523dc56..1937069432c5 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -185,7 +185,10 @@ int ttm_eu_reserve_buffers(struct list_head *list) ttm_eu_backoff_reservation_locked(list); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); - return -EBUSY; + ret = ttm_bo_wait_cpu(bo, false); + if (ret) + return ret; + goto retry; } } @@ -213,18 +216,19 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) driver = bdev->driver; glob = bo->glob; - spin_lock(&glob->lru_lock); spin_lock(&bdev->fence_lock); + spin_lock(&glob->lru_lock); list_for_each_entry(entry, list, head) { bo = entry->bo; entry->old_sync_obj = bo->sync_obj; bo->sync_obj = driver->sync_obj_ref(sync_obj); + bo->sync_obj_arg = entry->new_sync_obj_arg; ttm_bo_unreserve_locked(bo); entry->reserved = false; } - spin_unlock(&bdev->fence_lock); spin_unlock(&glob->lru_lock); + spin_unlock(&bdev->fence_lock); list_for_each_entry(entry, list, head) { if (entry->old_sync_obj) diff --git a/trunk/drivers/gpu/drm/ttm/ttm_memory.c b/trunk/drivers/gpu/drm/ttm/ttm_memory.c index dbc2def887cd..479c6b0467ca 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_memory.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_memory.c @@ -367,6 +367,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) spin_lock_init(&glob->lock); glob->swap_queue = create_singlethread_workqueue("ttm_swap"); INIT_WORK(&glob->work, ttm_shrink_work); + init_waitqueue_head(&glob->queue); ret = kobject_init_and_add( &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting"); if (unlikely(ret != 0)) { diff --git a/trunk/drivers/gpu/drm/ttm/ttm_object.c b/trunk/drivers/gpu/drm/ttm/ttm_object.c index 58a5f3261c0b..c7857874956a 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_object.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_object.c @@ -80,7 +80,7 @@ struct ttm_object_file { */ struct ttm_object_device { - spinlock_t object_lock; + rwlock_t object_lock; struct drm_open_hash object_hash; atomic_t object_count; struct ttm_mem_global *mem_glob; @@ -157,12 +157,12 @@ int ttm_base_object_init(struct ttm_object_file *tfile, base->refcount_release = refcount_release; base->ref_obj_release = ref_obj_release; base->object_type = object_type; + write_lock(&tdev->object_lock); kref_init(&base->refcount); - spin_lock(&tdev->object_lock); - ret = drm_ht_just_insert_please_rcu(&tdev->object_hash, - &base->hash, - (unsigned long)base, 31, 0, 0); - spin_unlock(&tdev->object_lock); + ret = drm_ht_just_insert_please(&tdev->object_hash, + &base->hash, + (unsigned long)base, 31, 0, 0); + write_unlock(&tdev->object_lock); if (unlikely(ret != 0)) goto out_err0; @@ -174,9 +174,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, return 0; out_err1: - spin_lock(&tdev->object_lock); - (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash); - spin_unlock(&tdev->object_lock); + (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); out_err0: return ret; } @@ -188,29 +186,30 @@ static void ttm_release_base(struct kref *kref) container_of(kref, struct ttm_base_object, refcount); struct ttm_object_device *tdev = base->tfile->tdev; - spin_lock(&tdev->object_lock); - (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash); - spin_unlock(&tdev->object_lock); - - /* - * Note: We don't use synchronize_rcu() here because it's far - * too slow. It's up to the user to free the object using - * call_rcu() or ttm_base_object_kfree(). - */ - + (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); + write_unlock(&tdev->object_lock); if (base->refcount_release) { ttm_object_file_unref(&base->tfile); base->refcount_release(&base); } + write_lock(&tdev->object_lock); } void ttm_base_object_unref(struct ttm_base_object **p_base) { struct ttm_base_object *base = *p_base; + struct ttm_object_device *tdev = base->tfile->tdev; *p_base = NULL; + /* + * Need to take the lock here to avoid racing with + * users trying to look up the object. + */ + + write_lock(&tdev->object_lock); kref_put(&base->refcount, ttm_release_base); + write_unlock(&tdev->object_lock); } EXPORT_SYMBOL(ttm_base_object_unref); @@ -222,14 +221,14 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, struct drm_hash_item *hash; int ret; - rcu_read_lock(); - ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash); + read_lock(&tdev->object_lock); + ret = drm_ht_find_item(&tdev->object_hash, key, &hash); if (likely(ret == 0)) { base = drm_hash_entry(hash, struct ttm_base_object, hash); - ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL; + kref_get(&base->refcount); } - rcu_read_unlock(); + read_unlock(&tdev->object_lock); if (unlikely(ret != 0)) return NULL; @@ -427,7 +426,7 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global return NULL; tdev->mem_glob = mem_glob; - spin_lock_init(&tdev->object_lock); + rwlock_init(&tdev->object_lock); atomic_set(&tdev->object_count, 0); ret = drm_ht_create(&tdev->object_hash, hash_order); @@ -445,9 +444,9 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) *p_tdev = NULL; - spin_lock(&tdev->object_lock); + write_lock(&tdev->object_lock); drm_ht_remove(&tdev->object_hash); - spin_unlock(&tdev->object_lock); + write_unlock(&tdev->object_lock); kfree(tdev); } diff --git a/trunk/drivers/gpu/drm/udl/udl_connector.c b/trunk/drivers/gpu/drm/udl/udl_connector.c index 512f44add89f..b3b2cedf6745 100644 --- a/trunk/drivers/gpu/drm/udl/udl_connector.c +++ b/trunk/drivers/gpu/drm/udl/udl_connector.c @@ -84,8 +84,7 @@ udl_detect(struct drm_connector *connector, bool force) return connector_status_connected; } -static struct drm_encoder* -udl_best_single_encoder(struct drm_connector *connector) +struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; struct drm_mode_object *obj; @@ -98,9 +97,8 @@ udl_best_single_encoder(struct drm_connector *connector) return encoder; } -static int udl_connector_set_property(struct drm_connector *connector, - struct drm_property *property, - uint64_t val) +int udl_connector_set_property(struct drm_connector *connector, struct drm_property *property, + uint64_t val) { return 0; } @@ -112,13 +110,13 @@ static void udl_connector_destroy(struct drm_connector *connector) kfree(connector); } -static struct drm_connector_helper_funcs udl_connector_helper_funcs = { +struct drm_connector_helper_funcs udl_connector_helper_funcs = { .get_modes = udl_get_modes, .mode_valid = udl_mode_valid, .best_encoder = udl_best_single_encoder, }; -static struct drm_connector_funcs udl_connector_funcs = { +struct drm_connector_funcs udl_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = udl_detect, .fill_modes = drm_helper_probe_single_connector_modes, @@ -140,7 +138,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder) drm_sysfs_connector_add(connector); drm_mode_connector_attach_encoder(connector, encoder); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.dirty_info_property, 1); return 0; diff --git a/trunk/drivers/gpu/drm/vmwgfx/Makefile b/trunk/drivers/gpu/drm/vmwgfx/Makefile index 2cc6cd91ac11..586869c8c11f 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/Makefile +++ b/trunk/drivers/gpu/drm/vmwgfx/Makefile @@ -5,7 +5,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ - vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ - vmwgfx_surface.o + vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/trunk/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/trunk/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h deleted file mode 100644 index 8369c3ba10fe..000000000000 --- a/trunk/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h +++ /dev/null @@ -1,909 +0,0 @@ -/************************************************************************** - * - * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#ifdef __KERNEL__ - -#include -#define surf_size_struct struct drm_vmw_size - -#else /* __KERNEL__ */ - -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0])) -#endif /* ARRAY_SIZE */ - -#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) -#define max_t(type, x, y) ((x) > (y) ? (x) : (y)) -#define surf_size_struct SVGA3dSize -#define u32 uint32 - -#endif /* __KERNEL__ */ - -#include "svga3d_reg.h" - -/* - * enum svga3d_block_desc describes the active data channels in a block. - * - * There can be at-most four active channels in a block: - * 1. Red, bump W, luminance and depth are stored in the first channel. - * 2. Green, bump V and stencil are stored in the second channel. - * 3. Blue and bump U are stored in the third channel. - * 4. Alpha and bump Q are stored in the fourth channel. - * - * Block channels can be used to store compressed and buffer data: - * 1. For compressed formats, only the data channel is used and its size - * is equal to that of a singular block in the compression scheme. - * 2. For buffer formats, only the data channel is used and its size is - * exactly one byte in length. - * 3. In each case the bit depth represent the size of a singular block. - * - * Note: Compressed and IEEE formats do not use the bitMask structure. - */ - -enum svga3d_block_desc { - SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */ - SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel - data */ - SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel - data */ - SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video - U and V */ - SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel - data */ - SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel - data */ - SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil - channel */ - SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel - data */ - SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel - data */ - SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel - data */ - SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance - data */ - SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */ - SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha - channel */ - SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel - data */ - SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of - data */ - SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of - data depending on the - compression method used */ - SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE - floating point - representation in - all channels */ - SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store - data. */ - SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */ - SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */ - SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */ - SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */ - SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV, - e.g., NV12. */ - SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate - Y, U, V, e.g., YV12. */ - - SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_GREEN, - SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG | - SVGA3DBLOCKDESC_BLUE, - SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB | - SVGA3DBLOCKDESC_ALPHA, - SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U | - SVGA3DBLOCKDESC_V, - SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV | - SVGA3DBLOCKDESC_LUMINANCE, - SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV | - SVGA3DBLOCKDESC_W, - SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW | - SVGA3DBLOCKDESC_ALPHA, - SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U | - SVGA3DBLOCKDESC_V | - SVGA3DBLOCKDESC_W | - SVGA3DBLOCKDESC_Q, - SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE | - SVGA3DBLOCKDESC_ALPHA, - SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED | - SVGA3DBLOCKDESC_IEEE_FP, - SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP | - SVGA3DBLOCKDESC_GREEN, - SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP | - SVGA3DBLOCKDESC_BLUE, - SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP | - SVGA3DBLOCKDESC_ALPHA, - SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH | - SVGA3DBLOCKDESC_STENCIL, - SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO | - SVGA3DBLOCKDESC_Y, - SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA | - SVGA3DBLOCKDESC_Y | - SVGA3DBLOCKDESC_U_VIDEO | - SVGA3DBLOCKDESC_V_VIDEO, - SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB | - SVGA3DBLOCKDESC_EXP, - SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED | - SVGA3DBLOCKDESC_SRGB, - SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV | - SVGA3DBLOCKDESC_2PLANAR_YUV, - SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV | - SVGA3DBLOCKDESC_3PLANAR_YUV, -}; - -/* - * SVGA3dSurfaceDesc describes the actual pixel data. - * - * This structure provides the following information: - * 1. Block description. - * 2. Dimensions of a block in the surface. - * 3. Size of block in bytes. - * 4. Bit depth of the pixel data. - * 5. Channel bit depths and masks (if applicable). - */ -#define SVGA3D_CHANNEL_DEF(type) \ - struct { \ - union { \ - type blue; \ - type u; \ - type uv_video; \ - type u_video; \ - }; \ - union { \ - type green; \ - type v; \ - type stencil; \ - type v_video; \ - }; \ - union { \ - type red; \ - type w; \ - type luminance; \ - type y; \ - type depth; \ - type data; \ - }; \ - union { \ - type alpha; \ - type q; \ - type exp; \ - }; \ - } - -struct svga3d_surface_desc { - enum svga3d_block_desc block_desc; - surf_size_struct block_size; - u32 bytes_per_block; - u32 pitch_bytes_per_block; - - struct { - u32 total; - SVGA3D_CHANNEL_DEF(uint8); - } bit_depth; - - struct { - SVGA3D_CHANNEL_DEF(uint8); - } bit_offset; -}; - -static const struct svga3d_surface_desc svga3d_surface_descs[] = { - {SVGA3DBLOCKDESC_NONE, - {1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } }, - {{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } }, - {{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } }, - {{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } }, - {{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } }, - {{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */ - - {SVGA3DBLOCKDESC_LUMINANCE, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */ - - {SVGA3DBLOCKDESC_LA, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } }, - {{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */ - - {SVGA3DBLOCKDESC_LUMINANCE, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */ - - {SVGA3DBLOCKDESC_LA, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } }, - {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } }, - {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */ - - {SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } }, - {{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */ - - {SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } }, - {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */ - - {SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } }, - {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */ - - {SVGA3DBLOCKDESC_RGBA_FP, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */ - - {SVGA3DBLOCKDESC_RGBA_FP, - {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } }, - {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } }, - {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */ - - {SVGA3DBLOCKDESC_UVWQ, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } }, - {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */ - - {SVGA3DBLOCKDESC_UVL, - {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } }, - {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */ - - {SVGA3DBLOCKDESC_UVWA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */ - - {SVGA3DBLOCKDESC_ALPHA, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */ - - {SVGA3DBLOCKDESC_R_FP, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */ - - {SVGA3DBLOCKDESC_R_FP, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */ - - {SVGA3DBLOCKDESC_RG_FP, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */ - - {SVGA3DBLOCKDESC_RG_FP, - {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */ - - {SVGA3DBLOCKDESC_BUFFER, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } }, - {{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */ - - {SVGA3DBLOCKDESC_YUV, - {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } }, - {{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */ - - {SVGA3DBLOCKDESC_YUV, - {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } }, - {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */ - - {SVGA3DBLOCKDESC_NV12, - {2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */ - - {SVGA3DBLOCKDESC_AYUV, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } }, - {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } }, - {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */ - - {SVGA3DBLOCKDESC_UVWQ, - {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } }, - {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } }, - {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */ - - {SVGA3DBLOCKDESC_RGB_FP, - {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } }, - {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } }, - {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */ - - {SVGA3DBLOCKDESC_UVW, - {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } }, - {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */ - - {SVGA3DBLOCKDESC_UVWQ, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */ - - {SVGA3DBLOCKDESC_UVWQ, - {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } }, - {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */ - - {SVGA3DBLOCKDESC_R_FP, - {1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */ - - {SVGA3DBLOCKDESC_GREEN, - {1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } }, - {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */ - - {SVGA3DBLOCKDESC_RGB_FP, - {1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } }, - {{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */ - - {SVGA3DBLOCKDESC_RGBA_SRGB, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */ - - {SVGA3DBLOCKDESC_RG_FP, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } }, - {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */ - - {SVGA3DBLOCKDESC_GREEN, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */ - - {SVGA3DBLOCKDESC_UV, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */ - - {SVGA3DBLOCKDESC_U, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */ - - {SVGA3DBLOCKDESC_U, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */ - - {SVGA3DBLOCKDESC_RED, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */ - - {SVGA3DBLOCKDESC_U, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */ - - {SVGA3DBLOCKDESC_U, - {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */ - - {SVGA3DBLOCKDESC_RED, - {8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */ - - {SVGA3DBLOCKDESC_RGBE, - {1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } }, - {{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */ - - {SVGA3DBLOCKDESC_RG, - {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } }, - {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED_SRGB, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED_SRGB, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED_SRGB, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */ - - {SVGA3DBLOCKDESC_COMPRESSED, - {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } }, - {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */ - - {SVGA3DBLOCKDESC_RGBA, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */ - - {SVGA3DBLOCKDESC_RGBA_SRGB, - {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_RGB, - {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */ - - {SVGA3DBLOCKDESC_RGB_SRGB, - {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } }, - {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */ - - {SVGA3DBLOCKDESC_DEPTH, - {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } }, - {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */ - - {SVGA3DBLOCKDESC_DS, - {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } }, - {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */ -}; - -static inline u32 clamped_umul32(u32 a, u32 b) -{ - uint64_t tmp = (uint64_t) a*b; - return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; -} - -static inline const struct svga3d_surface_desc * -svga3dsurface_get_desc(SVGA3dSurfaceFormat format) -{ - if (format < ARRAY_SIZE(svga3d_surface_descs)) - return &svga3d_surface_descs[format]; - - return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID]; -} - -/* - *---------------------------------------------------------------------- - * - * svga3dsurface_get_mip_size -- - * - * Given a base level size and the mip level, compute the size of - * the mip level. - * - * Results: - * See above. - * - * Side effects: - * None. - * - *---------------------------------------------------------------------- - */ - -static inline surf_size_struct -svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level) -{ - surf_size_struct size; - - size.width = max_t(u32, base_level.width >> mip_level, 1); - size.height = max_t(u32, base_level.height >> mip_level, 1); - size.depth = max_t(u32, base_level.depth >> mip_level, 1); - return size; -} - -static inline void -svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc, - const surf_size_struct *pixel_size, - surf_size_struct *block_size) -{ - block_size->width = DIV_ROUND_UP(pixel_size->width, - desc->block_size.width); - block_size->height = DIV_ROUND_UP(pixel_size->height, - desc->block_size.height); - block_size->depth = DIV_ROUND_UP(pixel_size->depth, - desc->block_size.depth); -} - -static inline bool -svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc) -{ - return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0; -} - -static inline u32 -svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc, - const surf_size_struct *size) -{ - u32 pitch; - surf_size_struct blocks; - - svga3dsurface_get_size_in_blocks(desc, size, &blocks); - - pitch = blocks.width * desc->pitch_bytes_per_block; - - return pitch; -} - -/* - *----------------------------------------------------------------------------- - * - * svga3dsurface_get_image_buffer_size -- - * - * Return the number of bytes of buffer space required to store - * one image of a surface, optionally using the specified pitch. - * - * If pitch is zero, it is assumed that rows are tightly packed. - * - * This function is overflow-safe. If the result would have - * overflowed, instead we return MAX_UINT32. - * - * Results: - * Byte count. - * - * Side effects: - * None. - * - *----------------------------------------------------------------------------- - */ - -static inline u32 -svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc, - const surf_size_struct *size, - u32 pitch) -{ - surf_size_struct image_blocks; - u32 slice_size, total_size; - - svga3dsurface_get_size_in_blocks(desc, size, &image_blocks); - - if (svga3dsurface_is_planar_surface(desc)) { - total_size = clamped_umul32(image_blocks.width, - image_blocks.height); - total_size = clamped_umul32(total_size, image_blocks.depth); - total_size = clamped_umul32(total_size, desc->bytes_per_block); - return total_size; - } - - if (pitch == 0) - pitch = svga3dsurface_calculate_pitch(desc, size); - - slice_size = clamped_umul32(image_blocks.height, pitch); - total_size = clamped_umul32(slice_size, image_blocks.depth); - - return total_size; -} - -static inline u32 -svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, - surf_size_struct base_level_size, - u32 num_mip_levels, - bool cubemap) -{ - const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); - u32 total_size = 0; - u32 mip; - - for (mip = 0; mip < num_mip_levels; mip++) { - surf_size_struct size = - svga3dsurface_get_mip_size(base_level_size, mip); - total_size += svga3dsurface_get_image_buffer_size(desc, - &size, 0); - } - - if (cubemap) - total_size *= SVGA3D_MAX_SURFACE_FACES; - - return total_size; -} - - -/** - * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel - * in an image (or volume). - * - * @width: The image width in pixels. - * @height: The image height in pixels - */ -static inline u32 -svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format, - u32 width, u32 height, - u32 x, u32 y, u32 z) -{ - const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); - const u32 bw = desc->block_size.width, bh = desc->block_size.height; - const u32 bd = desc->block_size.depth; - const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block; - const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride; - const u32 offset = (z / bd * imgstride + - y / bh * rowstride + - x / bw * desc->bytes_per_block); - return offset; -} - - -static inline u32 -svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format, - surf_size_struct baseLevelSize, - u32 numMipLevels, - u32 face, - u32 mip) - -{ - u32 offset; - u32 mipChainBytes; - u32 mipChainBytesToLevel; - u32 i; - const struct svga3d_surface_desc *desc; - surf_size_struct mipSize; - u32 bytes; - - desc = svga3dsurface_get_desc(format); - - mipChainBytes = 0; - mipChainBytesToLevel = 0; - for (i = 0; i < numMipLevels; i++) { - mipSize = svga3dsurface_get_mip_size(baseLevelSize, i); - bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0); - mipChainBytes += bytes; - if (i < mip) - mipChainBytesToLevel += bytes; - } - - offset = mipChainBytes * face + mipChainBytesToLevel; - - return offset; -} diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 96dc84dc34d0..9826fbc88154 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -248,12 +248,13 @@ void vmw_evict_flags(struct ttm_buffer_object *bo, *placement = vmw_sys_placement; } +/** + * FIXME: Proper access checks on buffers. + */ + static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) { - struct ttm_object_file *tfile = - vmw_fpriv((struct drm_file *)filp->private_data)->tfile; - - return vmw_user_dmabuf_verify_access(bo, tfile); + return 0; } static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) @@ -309,23 +310,27 @@ static void vmw_sync_obj_unref(void **sync_obj) vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj); } -static int vmw_sync_obj_flush(void *sync_obj) +static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg) { vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj); return 0; } -static bool vmw_sync_obj_signaled(void *sync_obj) +static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg) { + unsigned long flags = (unsigned long) sync_arg; return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj, - DRM_VMW_FENCE_FLAG_EXEC); + (uint32_t) flags); } -static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) +static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, + bool lazy, bool interruptible) { + unsigned long flags = (unsigned long) sync_arg; + return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, - DRM_VMW_FENCE_FLAG_EXEC, + (uint32_t) flags, lazy, interruptible, VMW_FENCE_WAIT_TIMEOUT); } diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_context.c deleted file mode 100644 index 00ae0925aca8..000000000000 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ /dev/null @@ -1,274 +0,0 @@ -/************************************************************************** - * - * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#include "vmwgfx_drv.h" -#include "vmwgfx_resource_priv.h" -#include "ttm/ttm_placement.h" - -struct vmw_user_context { - struct ttm_base_object base; - struct vmw_resource res; -}; - -static void vmw_user_context_free(struct vmw_resource *res); -static struct vmw_resource * -vmw_user_context_base_to_res(struct ttm_base_object *base); - -static uint64_t vmw_user_context_size; - -static const struct vmw_user_resource_conv user_context_conv = { - .object_type = VMW_RES_CONTEXT, - .base_obj_to_res = vmw_user_context_base_to_res, - .res_free = vmw_user_context_free -}; - -const struct vmw_user_resource_conv *user_context_converter = - &user_context_conv; - - -static const struct vmw_res_func vmw_legacy_context_func = { - .res_type = vmw_res_context, - .needs_backup = false, - .may_evict = false, - .type_name = "legacy contexts", - .backup_placement = NULL, - .create = NULL, - .destroy = NULL, - .bind = NULL, - .unbind = NULL -}; - -/** - * Context management: - */ - -static void vmw_hw_context_destroy(struct vmw_resource *res) -{ - - struct vmw_private *dev_priv = res->dev_priv; - struct { - SVGA3dCmdHeader header; - SVGA3dCmdDestroyContext body; - } *cmd; - - - vmw_execbuf_release_pinned_bo(dev_priv); - cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed reserving FIFO space for surface " - "destruction.\n"); - return; - } - - cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); - cmd->header.size = cpu_to_le32(sizeof(cmd->body)); - cmd->body.cid = cpu_to_le32(res->id); - - vmw_fifo_commit(dev_priv, sizeof(*cmd)); - vmw_3d_resource_dec(dev_priv, false); -} - -static int vmw_context_init(struct vmw_private *dev_priv, - struct vmw_resource *res, - void (*res_free) (struct vmw_resource *res)) -{ - int ret; - - struct { - SVGA3dCmdHeader header; - SVGA3dCmdDefineContext body; - } *cmd; - - ret = vmw_resource_init(dev_priv, res, false, - res_free, &vmw_legacy_context_func); - - if (unlikely(ret != 0)) { - DRM_ERROR("Failed to allocate a resource id.\n"); - goto out_early; - } - - if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) { - DRM_ERROR("Out of hw context ids.\n"); - vmw_resource_unreference(&res); - return -ENOMEM; - } - - cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Fifo reserve failed.\n"); - vmw_resource_unreference(&res); - return -ENOMEM; - } - - cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); - cmd->header.size = cpu_to_le32(sizeof(cmd->body)); - cmd->body.cid = cpu_to_le32(res->id); - - vmw_fifo_commit(dev_priv, sizeof(*cmd)); - (void) vmw_3d_resource_inc(dev_priv, false); - vmw_resource_activate(res, vmw_hw_context_destroy); - return 0; - -out_early: - if (res_free == NULL) - kfree(res); - else - res_free(res); - return ret; -} - -struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) -{ - struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); - int ret; - - if (unlikely(res == NULL)) - return NULL; - - ret = vmw_context_init(dev_priv, res, NULL); - - return (ret == 0) ? res : NULL; -} - -/** - * User-space context management: - */ - -static struct vmw_resource * -vmw_user_context_base_to_res(struct ttm_base_object *base) -{ - return &(container_of(base, struct vmw_user_context, base)->res); -} - -static void vmw_user_context_free(struct vmw_resource *res) -{ - struct vmw_user_context *ctx = - container_of(res, struct vmw_user_context, res); - struct vmw_private *dev_priv = res->dev_priv; - - ttm_base_object_kfree(ctx, base); - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_context_size); -} - -/** - * This function is called when user space has no more references on the - * base object. It releases the base-object's reference on the resource object. - */ - -static void vmw_user_context_base_release(struct ttm_base_object **p_base) -{ - struct ttm_base_object *base = *p_base; - struct vmw_user_context *ctx = - container_of(base, struct vmw_user_context, base); - struct vmw_resource *res = &ctx->res; - - *p_base = NULL; - vmw_resource_unreference(&res); -} - -int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - - return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); -} - -int vmw_context_define_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct vmw_private *dev_priv = vmw_priv(dev); - struct vmw_user_context *ctx; - struct vmw_resource *res; - struct vmw_resource *tmp; - struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct vmw_master *vmaster = vmw_master(file_priv->master); - int ret; - - - /* - * Approximate idr memory usage with 128 bytes. It will be limited - * by maximum number_of contexts anyway. - */ - - if (unlikely(vmw_user_context_size == 0)) - vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; - - ret = ttm_read_lock(&vmaster->lock, true); - if (unlikely(ret != 0)) - return ret; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_user_context_size, - false, true); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for context" - " creation.\n"); - goto out_unlock; - } - - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (unlikely(ctx == NULL)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_context_size); - ret = -ENOMEM; - goto out_unlock; - } - - res = &ctx->res; - ctx->base.shareable = false; - ctx->base.tfile = NULL; - - /* - * From here on, the destructor takes over resource freeing. - */ - - ret = vmw_context_init(dev_priv, res, vmw_user_context_free); - if (unlikely(ret != 0)) - goto out_unlock; - - tmp = vmw_resource_reference(&ctx->res); - ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, - &vmw_user_context_base_release, NULL); - - if (unlikely(ret != 0)) { - vmw_resource_unreference(&tmp); - goto out_err; - } - - arg->cid = ctx->base.hash.key; -out_err: - vmw_resource_unreference(&res); -out_unlock: - ttm_read_unlock(&vmaster->lock); - return ret; - -} diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c index 5fae06ad7e25..d1498bfd7873 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c @@ -60,13 +60,13 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - vmw_execbuf_release_pinned_bo(dev_priv); + vmw_execbuf_release_pinned_bo(dev_priv, false, 0); ret = ttm_bo_reserve(bo, interruptible, false, false, 0); if (unlikely(ret != 0)) goto err; - ret = ttm_bo_validate(bo, placement, interruptible, false); + ret = ttm_bo_validate(bo, placement, interruptible, false, false); ttm_bo_unreserve(bo); @@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, return ret; if (pin) - vmw_execbuf_release_pinned_bo(dev_priv); + vmw_execbuf_release_pinned_bo(dev_priv, false, 0); ret = ttm_bo_reserve(bo, interruptible, false, false, 0); if (unlikely(ret != 0)) @@ -123,7 +123,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, else placement = &vmw_vram_gmr_placement; - ret = ttm_bo_validate(bo, placement, interruptible, false); + ret = ttm_bo_validate(bo, placement, interruptible, false, false); if (likely(ret == 0) || ret == -ERESTARTSYS) goto err_unreserve; @@ -138,7 +138,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, else placement = &vmw_vram_placement; - ret = ttm_bo_validate(bo, placement, interruptible, false); + ret = ttm_bo_validate(bo, placement, interruptible, false, false); err_unreserve: ttm_bo_unreserve(bo); @@ -214,7 +214,8 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, return ret; if (pin) - vmw_execbuf_release_pinned_bo(dev_priv); + vmw_execbuf_release_pinned_bo(dev_priv, false, 0); + ret = ttm_bo_reserve(bo, interruptible, false, false, 0); if (unlikely(ret != 0)) goto err_unlock; @@ -223,9 +224,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, if (bo->mem.mem_type == TTM_PL_VRAM && bo->mem.start < bo->num_pages && bo->mem.start > 0) - (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false); + (void) ttm_bo_validate(bo, &vmw_sys_placement, false, + false, false); - ret = ttm_bo_validate(bo, &placement, interruptible, false); + ret = ttm_bo_validate(bo, &placement, interruptible, false, false); /* For some reason we didn't up at the start of vram */ WARN_ON(ret == 0 && bo->offset != 0); @@ -302,7 +304,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) uint32_t old_mem_type = bo->mem.mem_type; int ret; - BUG_ON(!ttm_bo_is_reserved(bo)); + BUG_ON(!atomic_read(&bo->reserved)); BUG_ON(old_mem_type != TTM_PL_VRAM && old_mem_type != VMW_PL_GMR); @@ -314,7 +316,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) placement.num_placement = 1; placement.placement = &pl_flags; - ret = ttm_bo_validate(bo, &placement, false, true); + ret = ttm_bo_validate(bo, &placement, false, true, true); BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type); } diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 161f8b2549aa..2dd185e42f21 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -292,7 +292,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) PAGE_SIZE, ttm_bo_type_device, &vmw_vram_sys_placement, - 0, false, NULL, + 0, 0, false, NULL, &dev_priv->dummy_query_bo); } @@ -432,7 +432,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) struct vmw_private *dev_priv; int ret; uint32_t svga_id; - enum vmw_res_type i; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (unlikely(dev_priv == NULL)) { @@ -449,18 +448,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->release_mutex); rwlock_init(&dev_priv->resource_lock); - - for (i = vmw_res_context; i < vmw_res_max; ++i) { - idr_init(&dev_priv->res_idr[i]); - INIT_LIST_HEAD(&dev_priv->res_lru[i]); - } - + idr_init(&dev_priv->context_idr); + idr_init(&dev_priv->surface_idr); + idr_init(&dev_priv->stream_idr); mutex_init(&dev_priv->init_mutex); init_waitqueue_head(&dev_priv->fence_queue); init_waitqueue_head(&dev_priv->fifo_queue); dev_priv->fence_queue_waiters = 0; atomic_set(&dev_priv->fifo_queue_waiters, 0); - + INIT_LIST_HEAD(&dev_priv->surface_lru); dev_priv->used_memory_size = 0; dev_priv->io_start = pci_resource_start(dev->pdev, 0); @@ -613,18 +609,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) } } - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { - ret = drm_irq_install(dev); - if (ret != 0) { - DRM_ERROR("Failed installing irq: %d\n", ret); - goto out_no_irq; - } - } - dev_priv->fman = vmw_fence_manager_init(dev_priv); if (unlikely(dev_priv->fman == NULL)) goto out_no_fman; + /* Need to start the fifo to check if we can do screen objects */ + ret = vmw_3d_resource_inc(dev_priv, true); + if (unlikely(ret != 0)) + goto out_no_fifo; vmw_kms_save_vga(dev_priv); /* Start kms and overlay systems, needs fifo. */ @@ -633,11 +625,25 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_no_kms; vmw_overlay_init(dev_priv); + /* 3D Depends on Screen Objects being used. */ + DRM_INFO("Detected %sdevice 3D availability.\n", + vmw_fifo_have_3d(dev_priv) ? + "" : "no "); + + /* We might be done with the fifo now */ if (dev_priv->enable_fb) { - ret = vmw_3d_resource_inc(dev_priv, true); - if (unlikely(ret != 0)) - goto out_no_fifo; vmw_fb_init(dev_priv); + } else { + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv, true); + } + + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { + ret = drm_irq_install(dev); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed installing irq: %d\n", ret); + goto out_no_irq; + } } dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; @@ -645,16 +651,20 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) return 0; -out_no_fifo: +out_no_irq: + if (dev_priv->enable_fb) + vmw_fb_close(dev_priv); vmw_overlay_close(dev_priv); vmw_kms_close(dev_priv); out_no_kms: - vmw_kms_restore_vga(dev_priv); + /* We still have a 3D resource reference held */ + if (dev_priv->enable_fb) { + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv, false); + } +out_no_fifo: vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) - drm_irq_uninstall(dev_priv->dev); -out_no_irq: if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else @@ -674,9 +684,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) out_err1: vmw_ttm_global_release(dev_priv); out_err0: - for (i = vmw_res_context; i < vmw_res_max; ++i) - idr_destroy(&dev_priv->res_idr[i]); - + idr_destroy(&dev_priv->surface_idr); + idr_destroy(&dev_priv->context_idr); + idr_destroy(&dev_priv->stream_idr); kfree(dev_priv); return ret; } @@ -684,14 +694,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) static int vmw_driver_unload(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); - enum vmw_res_type i; unregister_pm_notifier(&dev_priv->pm_nb); - if (dev_priv->ctx.res_ht_initialized) - drm_ht_remove(&dev_priv->ctx.res_ht); if (dev_priv->ctx.cmd_bounce) vfree(dev_priv->ctx.cmd_bounce); + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) + drm_irq_uninstall(dev_priv->dev); if (dev_priv->enable_fb) { vmw_fb_close(dev_priv); vmw_kms_restore_vga(dev_priv); @@ -700,8 +709,6 @@ static int vmw_driver_unload(struct drm_device *dev) vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); vmw_fence_manager_takedown(dev_priv->fman); - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) - drm_irq_uninstall(dev_priv->dev); if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else @@ -716,9 +723,9 @@ static int vmw_driver_unload(struct drm_device *dev) (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); (void)ttm_bo_device_release(&dev_priv->bdev); vmw_ttm_global_release(dev_priv); - - for (i = vmw_res_context; i < vmw_res_max; ++i) - idr_destroy(&dev_priv->res_idr[i]); + idr_destroy(&dev_priv->surface_idr); + idr_destroy(&dev_priv->context_idr); + idr_destroy(&dev_priv->stream_idr); kfree(dev_priv); @@ -917,11 +924,11 @@ static int vmw_master_set(struct drm_device *dev, out_no_active_lock: if (!dev_priv->enable_fb) { - vmw_kms_restore_vga(dev_priv); - vmw_3d_resource_dec(dev_priv, true); mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); mutex_unlock(&dev_priv->hw_mutex); + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv, true); } return ret; } @@ -942,7 +949,7 @@ static void vmw_master_drop(struct drm_device *dev, vmw_fp->locked_master = drm_master_get(file_priv->master); ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); - vmw_execbuf_release_pinned_bo(dev_priv); + vmw_execbuf_release_pinned_bo(dev_priv, false, 0); if (unlikely((ret != 0))) { DRM_ERROR("Unable to lock TTM at VT switch.\n"); @@ -955,11 +962,11 @@ static void vmw_master_drop(struct drm_device *dev, ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); if (unlikely(ret != 0)) DRM_ERROR("Unable to clean VRAM on master drop.\n"); - vmw_kms_restore_vga(dev_priv); - vmw_3d_resource_dec(dev_priv, true); mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); mutex_unlock(&dev_priv->hw_mutex); + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv, true); } dev_priv->active_master = &dev_priv->fbdev_master; @@ -994,8 +1001,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, * This empties VRAM and unbinds all GMR bindings. * Buffer contents is moved to swappable memory. */ - vmw_execbuf_release_pinned_bo(dev_priv); - vmw_resource_evict_all(dev_priv); + vmw_execbuf_release_pinned_bo(dev_priv, false, 0); ttm_bo_swapout_all(&dev_priv->bdev); break; diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 13aeda71280e..88a179e26de9 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -67,46 +67,31 @@ struct vmw_fpriv { struct vmw_dma_buffer { struct ttm_buffer_object base; - struct list_head res_list; + struct list_head validate_list; + bool gmr_bound; + uint32_t cur_validate_node; + bool on_validate_list; }; -/** - * struct vmw_validate_buffer - Carries validation info about buffers. - * - * @base: Validation info for TTM. - * @hash: Hash entry for quick lookup of the TTM buffer object. - * - * This structure contains also driver private validation info - * on top of the info needed by TTM. - */ -struct vmw_validate_buffer { - struct ttm_validate_buffer base; - struct drm_hash_item hash; -}; - -struct vmw_res_func; struct vmw_resource { struct kref kref; struct vmw_private *dev_priv; + struct idr *idr; int id; + enum ttm_object_type res_type; bool avail; - unsigned long backup_size; - bool res_dirty; /* Protected by backup buffer reserved */ - bool backup_dirty; /* Protected by backup buffer reserved */ - struct vmw_dma_buffer *backup; - unsigned long backup_offset; - const struct vmw_res_func *func; - struct list_head lru_head; /* Protected by the resource lock */ - struct list_head mob_head; /* Protected by @backup reserved */ - void (*res_free) (struct vmw_resource *res); + void (*remove_from_lists) (struct vmw_resource *res); void (*hw_destroy) (struct vmw_resource *res); -}; - -enum vmw_res_type { - vmw_res_context, - vmw_res_surface, - vmw_res_stream, - vmw_res_max + void (*res_free) (struct vmw_resource *res); + struct list_head validate_head; + struct list_head query_head; /* Protected by the cmdbuf mutex */ + /* TODO is a generic snooper needed? */ +#if 0 + void (*snoop)(struct vmw_resource *res, + struct ttm_object_file *tfile, + SVGA3dCmdHeader *header); + void *snoop_priv; +#endif }; struct vmw_cursor_snooper { @@ -120,18 +105,20 @@ struct vmw_surface_offset; struct vmw_surface { struct vmw_resource res; + struct list_head lru_head; /* Protected by the resource lock */ uint32_t flags; uint32_t format; uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; - struct drm_vmw_size base_size; struct drm_vmw_size *sizes; uint32_t num_sizes; + bool scanout; + /* TODO so far just a extra pointer */ struct vmw_cursor_snooper snooper; + struct ttm_buffer_object *backup; struct vmw_surface_offset *offsets; - SVGA3dTextureFilter autogen_filter; - uint32_t multisample_count; + uint32_t backup_size; }; struct vmw_marker_queue { @@ -158,46 +145,29 @@ struct vmw_relocation { uint32_t index; }; -/** - * struct vmw_res_cache_entry - resource information cache entry - * - * @valid: Whether the entry is valid, which also implies that the execbuf - * code holds a reference to the resource, and it's placed on the - * validation list. - * @handle: User-space handle of a resource. - * @res: Non-ref-counted pointer to the resource. - * - * Used to avoid frequent repeated user-space handle lookups of the - * same resource. - */ -struct vmw_res_cache_entry { - bool valid; - uint32_t handle; - struct vmw_resource *res; - struct vmw_resource_val_node *node; -}; - struct vmw_sw_context{ - struct drm_open_hash res_ht; - bool res_ht_initialized; + struct ida bo_list; + uint32_t last_cid; + bool cid_valid; bool kernel; /**< is the called made from the kernel */ + struct vmw_resource *cur_ctx; + uint32_t last_sid; + uint32_t sid_translation; + bool sid_valid; struct ttm_object_file *tfile; struct list_head validate_nodes; struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; uint32_t cur_reloc; - struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; + struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; uint32_t cur_val_buf; uint32_t *cmd_bounce; uint32_t cmd_bounce_size; struct list_head resource_list; uint32_t fence_flags; + struct list_head query_list; struct ttm_buffer_object *cur_query_bo; - struct list_head res_relocations; - uint32_t *buf_start; - struct vmw_res_cache_entry res_cache[vmw_res_max]; - struct vmw_resource *last_query_ctx; - bool needs_post_query_barrier; - struct vmw_resource *error_resource; + uint32_t cur_query_cid; + bool query_cid_valid; }; struct vmw_legacy_display; @@ -272,7 +242,10 @@ struct vmw_private { */ rwlock_t resource_lock; - struct idr res_idr[vmw_res_max]; + struct idr context_idr; + struct idr surface_idr; + struct idr stream_idr; + /* * Block lastclose from racing with firstopen. */ @@ -347,7 +320,6 @@ struct vmw_private { struct ttm_buffer_object *dummy_query_bo; struct ttm_buffer_object *pinned_bo; uint32_t query_cid; - uint32_t query_cid_valid; bool dummy_query_bo_pinned; /* @@ -357,15 +329,10 @@ struct vmw_private { * protected by the cmdbuf mutex for simplicity. */ - struct list_head res_lru[vmw_res_max]; + struct list_head surface_lru; uint32_t used_memory_size; }; -static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) -{ - return container_of(res, struct vmw_surface, res); -} - static inline struct vmw_private *vmw_priv(struct drm_device *dev) { return (struct vmw_private *)dev->dev_private; @@ -414,16 +381,10 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); /** * Resource utilities - vmwgfx_resource.c */ -struct vmw_user_resource_conv; -extern const struct vmw_user_resource_conv *user_surface_converter; -extern const struct vmw_user_resource_conv *user_context_converter; extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); extern void vmw_resource_unreference(struct vmw_resource **p_res); extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); -extern int vmw_resource_validate(struct vmw_resource *res); -extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); -extern bool vmw_resource_needs_backup(const struct vmw_resource *res); extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, @@ -437,13 +398,14 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, uint32_t handle, struct vmw_surface **out_surf, struct vmw_dma_buffer **out_buf); -extern int vmw_user_resource_lookup_handle( - struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t handle, - const struct vmw_user_resource_conv *converter, - struct vmw_resource **p_res); extern void vmw_surface_res_free(struct vmw_resource *res); +extern int vmw_surface_init(struct vmw_private *dev_priv, + struct vmw_surface *srf, + void (*res_free) (struct vmw_resource *res)); +extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t handle, + struct vmw_surface **out); extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, @@ -461,8 +423,6 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv, size_t size, struct ttm_placement *placement, bool interuptable, void (*bo_free) (struct ttm_buffer_object *bo)); -extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, - struct ttm_object_file *tfile); extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, @@ -480,14 +440,7 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, struct ttm_object_file *tfile, uint32_t *inout_id, struct vmw_resource **out); -extern void vmw_resource_unreserve(struct vmw_resource *res, - struct vmw_dma_buffer *new_backup, - unsigned long new_backup_offset); -extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); -extern void vmw_fence_single_bo(struct ttm_buffer_object *bo, - struct vmw_fence_obj *fence); -extern void vmw_resource_evict_all(struct vmw_private *dev_priv); +extern void vmw_resource_unreserve(struct list_head *list); /** * DMA buffer helper routines - vmwgfx_dmabuf.c @@ -585,9 +538,10 @@ extern int vmw_execbuf_process(struct drm_file *file_priv, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj **out_fence); -extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, - struct vmw_fence_obj *fence); -extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv); + +extern void +vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, + bool only_on_cid_match, uint32_t cid); extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, struct vmw_private *dev_priv, @@ -745,13 +699,10 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) { struct vmw_dma_buffer *tmp_buf = *buf; - + struct ttm_buffer_object *bo = &tmp_buf->base; *buf = NULL; - if (tmp_buf != NULL) { - struct ttm_buffer_object *bo = &tmp_buf->base; - ttm_bo_unref(&bo); - } + ttm_bo_unref(&bo); } static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 394e6476105b..30654b4cc972 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -30,181 +30,6 @@ #include #include -#define VMW_RES_HT_ORDER 12 - -/** - * struct vmw_resource_relocation - Relocation info for resources - * - * @head: List head for the software context's relocation list. - * @res: Non-ref-counted pointer to the resource. - * @offset: Offset of 4 byte entries into the command buffer where the - * id that needs fixup is located. - */ -struct vmw_resource_relocation { - struct list_head head; - const struct vmw_resource *res; - unsigned long offset; -}; - -/** - * struct vmw_resource_val_node - Validation info for resources - * - * @head: List head for the software context's resource list. - * @hash: Hash entry for quick resouce to val_node lookup. - * @res: Ref-counted pointer to the resource. - * @switch_backup: Boolean whether to switch backup buffer on unreserve. - * @new_backup: Refcounted pointer to the new backup buffer. - * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. - * @first_usage: Set to true the first time the resource is referenced in - * the command stream. - * @no_buffer_needed: Resources do not need to allocate buffer backup on - * reservation. The command stream will provide one. - */ -struct vmw_resource_val_node { - struct list_head head; - struct drm_hash_item hash; - struct vmw_resource *res; - struct vmw_dma_buffer *new_backup; - unsigned long new_backup_offset; - bool first_usage; - bool no_buffer_needed; -}; - -/** - * vmw_resource_unreserve - unreserve resources previously reserved for - * command submission. - * - * @list_head: list of resources to unreserve. - * @backoff: Whether command submission failed. - */ -static void vmw_resource_list_unreserve(struct list_head *list, - bool backoff) -{ - struct vmw_resource_val_node *val; - - list_for_each_entry(val, list, head) { - struct vmw_resource *res = val->res; - struct vmw_dma_buffer *new_backup = - backoff ? NULL : val->new_backup; - - vmw_resource_unreserve(res, new_backup, - val->new_backup_offset); - vmw_dmabuf_unreference(&val->new_backup); - } -} - - -/** - * vmw_resource_val_add - Add a resource to the software context's - * resource list if it's not already on it. - * - * @sw_context: Pointer to the software context. - * @res: Pointer to the resource. - * @p_node On successful return points to a valid pointer to a - * struct vmw_resource_val_node, if non-NULL on entry. - */ -static int vmw_resource_val_add(struct vmw_sw_context *sw_context, - struct vmw_resource *res, - struct vmw_resource_val_node **p_node) -{ - struct vmw_resource_val_node *node; - struct drm_hash_item *hash; - int ret; - - if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res, - &hash) == 0)) { - node = container_of(hash, struct vmw_resource_val_node, hash); - node->first_usage = false; - if (unlikely(p_node != NULL)) - *p_node = node; - return 0; - } - - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (unlikely(node == NULL)) { - DRM_ERROR("Failed to allocate a resource validation " - "entry.\n"); - return -ENOMEM; - } - - node->hash.key = (unsigned long) res; - ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed to initialize a resource validation " - "entry.\n"); - kfree(node); - return ret; - } - list_add_tail(&node->head, &sw_context->resource_list); - node->res = vmw_resource_reference(res); - node->first_usage = true; - - if (unlikely(p_node != NULL)) - *p_node = node; - - return 0; -} - -/** - * vmw_resource_relocation_add - Add a relocation to the relocation list - * - * @list: Pointer to head of relocation list. - * @res: The resource. - * @offset: Offset into the command buffer currently being parsed where the - * id that needs fixup is located. Granularity is 4 bytes. - */ -static int vmw_resource_relocation_add(struct list_head *list, - const struct vmw_resource *res, - unsigned long offset) -{ - struct vmw_resource_relocation *rel; - - rel = kmalloc(sizeof(*rel), GFP_KERNEL); - if (unlikely(rel == NULL)) { - DRM_ERROR("Failed to allocate a resource relocation.\n"); - return -ENOMEM; - } - - rel->res = res; - rel->offset = offset; - list_add_tail(&rel->head, list); - - return 0; -} - -/** - * vmw_resource_relocations_free - Free all relocations on a list - * - * @list: Pointer to the head of the relocation list. - */ -static void vmw_resource_relocations_free(struct list_head *list) -{ - struct vmw_resource_relocation *rel, *n; - - list_for_each_entry_safe(rel, n, list, head) { - list_del(&rel->head); - kfree(rel); - } -} - -/** - * vmw_resource_relocations_apply - Apply all relocations on a list - * - * @cb: Pointer to the start of the command buffer bein patch. This need - * not be the same buffer as the one being parsed when the relocation - * list was built, but the contents must be the same modulo the - * resource ids. - * @list: Pointer to the head of the relocation list. - */ -static void vmw_resource_relocations_apply(uint32_t *cb, - struct list_head *list) -{ - struct vmw_resource_relocation *rel; - - list_for_each_entry(rel, list, head) - cb[rel->offset] = rel->res->id; -} - static int vmw_cmd_invalid(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) @@ -219,11 +44,25 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, return 0; } +static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context, + struct vmw_resource **p_res) +{ + struct vmw_resource *res = *p_res; + + if (list_empty(&res->validate_head)) { + list_add_tail(&res->validate_head, &sw_context->resource_list); + *p_res = NULL; + } else + vmw_resource_unreference(p_res); +} + /** * vmw_bo_to_validate_list - add a bo to a validate list * * @sw_context: The software context used for this command submission batch. * @bo: The buffer object to add. + * @fence_flags: Fence flags to be or'ed with any other fence flags for + * this buffer on this submission batch. * @p_val_node: If non-NULL Will be updated with the validate node number * on return. * @@ -232,43 +71,31 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, */ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, struct ttm_buffer_object *bo, + uint32_t fence_flags, uint32_t *p_val_node) { uint32_t val_node; - struct vmw_validate_buffer *vval_buf; struct ttm_validate_buffer *val_buf; - struct drm_hash_item *hash; - int ret; - if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, - &hash) == 0)) { - vval_buf = container_of(hash, struct vmw_validate_buffer, - hash); - val_buf = &vval_buf->base; - val_node = vval_buf - sw_context->val_bufs; - } else { - val_node = sw_context->cur_val_buf; - if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { - DRM_ERROR("Max number of DMA buffers per submission " - "exceeded.\n"); - return -EINVAL; - } - vval_buf = &sw_context->val_bufs[val_node]; - vval_buf->hash.key = (unsigned long) bo; - ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed to initialize a buffer validation " - "entry.\n"); - return ret; - } - ++sw_context->cur_val_buf; - val_buf = &vval_buf->base; + val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); + + if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { + DRM_ERROR("Max number of DMA buffers per submission" + " exceeded.\n"); + return -EINVAL; + } + + val_buf = &sw_context->val_bufs[val_node]; + if (unlikely(val_node == sw_context->cur_val_buf)) { + val_buf->new_sync_obj_arg = NULL; val_buf->bo = ttm_bo_reference(bo); - val_buf->reserved = false; list_add_tail(&val_buf->head, &sw_context->validate_nodes); + ++sw_context->cur_val_buf; } - sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; + val_buf->new_sync_obj_arg = (void *) + ((unsigned long) val_buf->new_sync_obj_arg | fence_flags); + sw_context->fence_flags |= fence_flags; if (p_val_node) *p_val_node = val_node; @@ -276,174 +103,85 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, return 0; } -/** - * vmw_resources_reserve - Reserve all resources on the sw_context's - * resource list. - * - * @sw_context: Pointer to the software context. - * - * Note that since vmware's command submission currently is protected by - * the cmdbuf mutex, no fancy deadlock avoidance is required for resources, - * since only a single thread at once will attempt this. - */ -static int vmw_resources_reserve(struct vmw_sw_context *sw_context) +static int vmw_cmd_cid_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) { - struct vmw_resource_val_node *val; - int ret; - - list_for_each_entry(val, &sw_context->resource_list, head) { - struct vmw_resource *res = val->res; - - ret = vmw_resource_reserve(res, val->no_buffer_needed); - if (unlikely(ret != 0)) - return ret; + struct vmw_resource *ctx; - if (res->backup) { - struct ttm_buffer_object *bo = &res->backup->base; + struct vmw_cid_cmd { + SVGA3dCmdHeader header; + __le32 cid; + } *cmd; + int ret; - ret = vmw_bo_to_validate_list - (sw_context, bo, NULL); + cmd = container_of(header, struct vmw_cid_cmd, header); + if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid)) + return 0; - if (unlikely(ret != 0)) - return ret; - } + ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid, + &ctx); + if (unlikely(ret != 0)) { + DRM_ERROR("Could not find or use context %u\n", + (unsigned) cmd->cid); + return ret; } - return 0; -} -/** - * vmw_resources_validate - Validate all resources on the sw_context's - * resource list. - * - * @sw_context: Pointer to the software context. - * - * Before this function is called, all resource backup buffers must have - * been validated. - */ -static int vmw_resources_validate(struct vmw_sw_context *sw_context) -{ - struct vmw_resource_val_node *val; - int ret; - - list_for_each_entry(val, &sw_context->resource_list, head) { - struct vmw_resource *res = val->res; + sw_context->last_cid = cmd->cid; + sw_context->cid_valid = true; + sw_context->cur_ctx = ctx; + vmw_resource_to_validate_list(sw_context, &ctx); - ret = vmw_resource_validate(res); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Failed to validate resource.\n"); - return ret; - } - } return 0; } -/** - * vmw_cmd_res_check - Check that a resource is present and if so, put it - * on the resource validate list unless it's already there. - * - * @dev_priv: Pointer to a device private structure. - * @sw_context: Pointer to the software context. - * @res_type: Resource type. - * @converter: User-space visisble type specific information. - * @id: Pointer to the location in the command buffer currently being - * parsed from where the user-space resource id handle is located. - */ -static int vmw_cmd_res_check(struct vmw_private *dev_priv, +static int vmw_cmd_sid_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, - enum vmw_res_type res_type, - const struct vmw_user_resource_conv *converter, - uint32_t *id, - struct vmw_resource_val_node **p_val) + uint32_t *sid) { - struct vmw_res_cache_entry *rcache = - &sw_context->res_cache[res_type]; - struct vmw_resource *res; - struct vmw_resource_val_node *node; + struct vmw_surface *srf; int ret; + struct vmw_resource *res; - if (*id == SVGA3D_INVALID_ID) + if (*sid == SVGA3D_INVALID_ID) return 0; - /* - * Fastpath in case of repeated commands referencing the same - * resource - */ - - if (likely(rcache->valid && *id == rcache->handle)) { - const struct vmw_resource *res = rcache->res; - - rcache->node->first_usage = false; - if (p_val) - *p_val = rcache->node; - - return vmw_resource_relocation_add - (&sw_context->res_relocations, res, - id - sw_context->buf_start); + if (likely((sw_context->sid_valid && + *sid == sw_context->last_sid))) { + *sid = sw_context->sid_translation; + return 0; } - ret = vmw_user_resource_lookup_handle(dev_priv, - sw_context->tfile, - *id, - converter, - &res); + ret = vmw_user_surface_lookup_handle(dev_priv, + sw_context->tfile, + *sid, &srf); if (unlikely(ret != 0)) { - DRM_ERROR("Could not find or use resource 0x%08x.\n", - (unsigned) *id); - dump_stack(); + DRM_ERROR("Could ot find or use surface 0x%08x " + "address 0x%08lx\n", + (unsigned int) *sid, + (unsigned long) sid); return ret; } - rcache->valid = true; - rcache->res = res; - rcache->handle = *id; + ret = vmw_surface_validate(dev_priv, srf); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Could not validate surface.\n"); + vmw_surface_unreference(&srf); + return ret; + } - ret = vmw_resource_relocation_add(&sw_context->res_relocations, - res, - id - sw_context->buf_start); - if (unlikely(ret != 0)) - goto out_no_reloc; + sw_context->last_sid = *sid; + sw_context->sid_valid = true; + sw_context->sid_translation = srf->res.id; + *sid = sw_context->sid_translation; - ret = vmw_resource_val_add(sw_context, res, &node); - if (unlikely(ret != 0)) - goto out_no_reloc; + res = &srf->res; + vmw_resource_to_validate_list(sw_context, &res); - rcache->node = node; - if (p_val) - *p_val = node; - vmw_resource_unreference(&res); return 0; - -out_no_reloc: - BUG_ON(sw_context->error_resource != NULL); - sw_context->error_resource = res; - - return ret; } -/** - * vmw_cmd_cid_check - Check a command header for valid context information. - * - * @dev_priv: Pointer to a device private structure. - * @sw_context: Pointer to the software context. - * @header: A command header with an embedded user-space context handle. - * - * Convenience function: Call vmw_cmd_res_check with the user-space context - * handle embedded in @header. - */ -static int vmw_cmd_cid_check(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - SVGA3dCmdHeader *header) -{ - struct vmw_cid_cmd { - SVGA3dCmdHeader header; - __le32 cid; - } *cmd; - - cmd = container_of(header, struct vmw_cid_cmd, header); - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, - user_context_converter, &cmd->cid, NULL); -} static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, @@ -460,9 +198,7 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, return ret; cmd = container_of(header, struct vmw_sid_cmd, header); - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cmd->body.target.sid, NULL); + ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid); return ret; } @@ -477,14 +213,10 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, int ret; cmd = container_of(header, struct vmw_sid_cmd, header); - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cmd->body.src.sid, NULL); + ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); if (unlikely(ret != 0)) return ret; - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cmd->body.dest.sid, NULL); + return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); } static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, @@ -498,14 +230,10 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, int ret; cmd = container_of(header, struct vmw_sid_cmd, header); - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cmd->body.src.sid, NULL); + ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); if (unlikely(ret != 0)) return ret; - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cmd->body.dest.sid, NULL); + return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); } static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, @@ -524,9 +252,7 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, return -EPERM; } - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cmd->body.srcImage.sid, NULL); + return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid); } static int vmw_cmd_present_check(struct vmw_private *dev_priv, @@ -546,15 +272,14 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, return -EPERM; } - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, &cmd->body.sid, - NULL); + return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); } /** * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. * * @dev_priv: The device private structure. + * @cid: The hardware context for the next query. * @new_query_bo: The new buffer holding query results. * @sw_context: The software context used for this command submission. * @@ -562,18 +287,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, * query results, and if another buffer currently is pinned for query * results. If so, the function prepares the state of @sw_context for * switching pinned buffers after successful submission of the current - * command batch. + * command batch. It also checks whether we're using a new query context. + * In that case, it makes sure we emit a query barrier for the old + * context before the current query buffer is fenced. */ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, + uint32_t cid, struct ttm_buffer_object *new_query_bo, struct vmw_sw_context *sw_context) { - struct vmw_res_cache_entry *ctx_entry = - &sw_context->res_cache[vmw_res_context]; int ret; - - BUG_ON(!ctx_entry->valid); - sw_context->last_query_ctx = ctx_entry->res; + bool add_cid = false; + uint32_t cid_to_add; if (unlikely(new_query_bo != sw_context->cur_query_bo)) { @@ -583,9 +308,12 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, } if (unlikely(sw_context->cur_query_bo != NULL)) { - sw_context->needs_post_query_barrier = true; + BUG_ON(!sw_context->query_cid_valid); + add_cid = true; + cid_to_add = sw_context->cur_query_cid; ret = vmw_bo_to_validate_list(sw_context, sw_context->cur_query_bo, + DRM_VMW_FENCE_FLAG_EXEC, NULL); if (unlikely(ret != 0)) return ret; @@ -594,12 +322,35 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, ret = vmw_bo_to_validate_list(sw_context, dev_priv->dummy_query_bo, + DRM_VMW_FENCE_FLAG_EXEC, NULL); if (unlikely(ret != 0)) return ret; } + if (unlikely(cid != sw_context->cur_query_cid && + sw_context->query_cid_valid)) { + add_cid = true; + cid_to_add = sw_context->cur_query_cid; + } + + sw_context->cur_query_cid = cid; + sw_context->query_cid_valid = true; + + if (add_cid) { + struct vmw_resource *ctx = sw_context->cur_ctx; + + if (list_empty(&ctx->query_head)) + list_add_tail(&ctx->query_head, + &sw_context->query_list); + ret = vmw_bo_to_validate_list(sw_context, + dev_priv->dummy_query_bo, + DRM_VMW_FENCE_FLAG_EXEC, + NULL); + if (unlikely(ret != 0)) + return ret; + } return 0; } @@ -611,9 +362,10 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, * @sw_context: The software context used for this command submission batch. * * This function will check if we're switching query buffers, and will then, + * if no other query waits are issued this command submission batch, * issue a dummy occlusion query wait used as a query barrier. When the fence * object following that query wait has signaled, we are sure that all - * preceding queries have finished, and the old query buffer can be unpinned. + * preseding queries have finished, and the old query buffer can be unpinned. * However, since both the new query buffer and the old one are fenced with * that fence, we can do an asynchronus unpin now, and be sure that the * old query buffer won't be moved until the fence has signaled. @@ -624,19 +376,20 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context) { + + struct vmw_resource *ctx, *next_ctx; + int ret; + /* * The validate list should still hold references to all * contexts here. */ - if (sw_context->needs_post_query_barrier) { - struct vmw_res_cache_entry *ctx_entry = - &sw_context->res_cache[vmw_res_context]; - struct vmw_resource *ctx; - int ret; + list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list, + query_head) { + list_del_init(&ctx->query_head); - BUG_ON(!ctx_entry->valid); - ctx = ctx_entry->res; + BUG_ON(list_empty(&ctx->validate_head)); ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); @@ -650,46 +403,40 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ttm_bo_unref(&dev_priv->pinned_bo); } - if (!sw_context->needs_post_query_barrier) { - vmw_bo_pin(sw_context->cur_query_bo, true); + vmw_bo_pin(sw_context->cur_query_bo, true); - /* - * We pin also the dummy_query_bo buffer so that we - * don't need to validate it when emitting - * dummy queries in context destroy paths. - */ + /* + * We pin also the dummy_query_bo buffer so that we + * don't need to validate it when emitting + * dummy queries in context destroy paths. + */ - vmw_bo_pin(dev_priv->dummy_query_bo, true); - dev_priv->dummy_query_bo_pinned = true; + vmw_bo_pin(dev_priv->dummy_query_bo, true); + dev_priv->dummy_query_bo_pinned = true; - BUG_ON(sw_context->last_query_ctx == NULL); - dev_priv->query_cid = sw_context->last_query_ctx->id; - dev_priv->query_cid_valid = true; - dev_priv->pinned_bo = - ttm_bo_reference(sw_context->cur_query_bo); - } + dev_priv->query_cid = sw_context->cur_query_cid; + dev_priv->pinned_bo = + ttm_bo_reference(sw_context->cur_query_bo); } } /** - * vmw_translate_guest_pointer - Prepare to translate a user-space buffer - * handle to a valid SVGAGuestPtr + * vmw_query_switch_backoff - clear query barrier list + * @sw_context: The sw context used for this submission batch. * - * @dev_priv: Pointer to a device private structure. - * @sw_context: The software context used for this command batch validation. - * @ptr: Pointer to the user-space handle to be translated. - * @vmw_bo_p: Points to a location that, on successful return will carry - * a reference-counted pointer to the DMA buffer identified by the - * user-space handle in @id. + * This function is used as part of an error path, where a previously + * set up list of query barriers needs to be cleared. * - * This function saves information needed to translate a user-space buffer - * handle to a valid SVGAGuestPtr. The translation does not take place - * immediately, but during a call to vmw_apply_relocations(). - * This function builds a relocation list and a list of buffers to validate. - * The former needs to be freed using either vmw_apply_relocations() or - * vmw_free_relocations(). The latter needs to be freed using - * vmw_clear_validations. */ +static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context) +{ + struct list_head *list, *next; + + list_for_each_safe(list, next, &sw_context->query_list) { + list_del_init(list); + } +} + static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAGuestPtr *ptr, @@ -718,7 +465,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, reloc = &sw_context->relocs[sw_context->cur_reloc++]; reloc->location = ptr; - ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index); + ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC, + &reloc->index); if (unlikely(ret != 0)) goto out_no_reloc; @@ -731,37 +479,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, return ret; } -/** - * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. - * - * @dev_priv: Pointer to a device private struct. - * @sw_context: The software context used for this command submission. - * @header: Pointer to the command header in the command stream. - */ -static int vmw_cmd_begin_query(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - SVGA3dCmdHeader *header) -{ - struct vmw_begin_query_cmd { - SVGA3dCmdHeader header; - SVGA3dCmdBeginQuery q; - } *cmd; - - cmd = container_of(header, struct vmw_begin_query_cmd, - header); - - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, - user_context_converter, &cmd->q.cid, - NULL); -} - -/** - * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. - * - * @dev_priv: Pointer to a device private struct. - * @sw_context: The software context used for this command submission. - * @header: Pointer to the command header in the command stream. - */ static int vmw_cmd_end_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) @@ -784,19 +501,13 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); + ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid, + &vmw_bo->base, sw_context); vmw_dmabuf_unreference(&vmw_bo); return ret; } -/* - * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. - * - * @dev_priv: Pointer to a device private struct. - * @sw_context: The software context used for this command submission. - * @header: Pointer to the command header in the command stream. - */ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) @@ -807,6 +518,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, SVGA3dCmdWaitForQuery q; } *cmd; int ret; + struct vmw_resource *ctx; cmd = container_of(header, struct vmw_query_cmd, header); ret = vmw_cmd_cid_check(dev_priv, sw_context, header); @@ -820,6 +532,16 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, return ret; vmw_dmabuf_unreference(&vmw_bo); + + /* + * This wait will act as a barrier for previous waits for this + * context. + */ + + ctx = sw_context->cur_ctx; + if (!list_empty(&ctx->query_head)) + list_del_init(&ctx->query_head); + return 0; } @@ -828,12 +550,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { struct vmw_dma_buffer *vmw_bo = NULL; + struct ttm_buffer_object *bo; struct vmw_surface *srf = NULL; struct vmw_dma_cmd { SVGA3dCmdHeader header; SVGA3dCmdSurfaceDMA dma; } *cmd; int ret; + struct vmw_resource *res; cmd = container_of(header, struct vmw_dma_cmd, header); ret = vmw_translate_guest_ptr(dev_priv, sw_context, @@ -842,20 +566,37 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, &cmd->dma.host.sid, - NULL); + bo = &vmw_bo->base; + ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, + cmd->dma.host.sid, &srf); + if (ret) { + DRM_ERROR("could not find surface\n"); + goto out_no_reloc; + } + + ret = vmw_surface_validate(dev_priv, srf); if (unlikely(ret != 0)) { - if (unlikely(ret != -ERESTARTSYS)) - DRM_ERROR("could not find surface for DMA.\n"); - goto out_no_surface; + if (ret != -ERESTARTSYS) + DRM_ERROR("Culd not validate surface.\n"); + goto out_no_validate; } - srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); + /* + * Patch command stream with device SID. + */ + cmd->dma.host.sid = srf->res.id; + vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); + + vmw_dmabuf_unreference(&vmw_bo); + + res = &srf->res; + vmw_resource_to_validate_list(sw_context, &res); - vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); + return 0; -out_no_surface: +out_no_validate: + vmw_surface_unreference(&srf); +out_no_reloc: vmw_dmabuf_unreference(&vmw_bo); return ret; } @@ -888,9 +629,8 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv, } for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &decl->array.surfaceId, NULL); + ret = vmw_cmd_sid_check(dev_priv, sw_context, + &decl->array.surfaceId); if (unlikely(ret != 0)) return ret; } @@ -904,9 +644,8 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv, range = (SVGA3dPrimitiveRange *) decl; for (i = 0; i < cmd->body.numRanges; ++i, ++range) { - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &range->indexArray.surfaceId, NULL); + ret = vmw_cmd_sid_check(dev_priv, sw_context, + &range->indexArray.surfaceId); if (unlikely(ret != 0)) return ret; } @@ -937,9 +676,8 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv, if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) continue; - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cur_state->value, NULL); + ret = vmw_cmd_sid_check(dev_priv, sw_context, + &cur_state->value); if (unlikely(ret != 0)) return ret; } @@ -970,34 +708,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, return ret; } -/** - * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER - * command - * - * @dev_priv: Pointer to a device private struct. - * @sw_context: The software context being used for this batch. - * @header: Pointer to the command header in the command stream. - */ -static int vmw_cmd_set_shader(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - SVGA3dCmdHeader *header) -{ - struct vmw_set_shader_cmd { - SVGA3dCmdHeader header; - SVGA3dCmdSetShader body; - } *cmd; - int ret; - - cmd = container_of(header, struct vmw_set_shader_cmd, - header); - - ret = vmw_cmd_cid_check(dev_priv, sw_context, header); - if (unlikely(ret != 0)) - return ret; - - return 0; -} - static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf, uint32_t *size) @@ -1071,20 +781,16 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader), + VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query), + VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, - &vmw_cmd_blt_surf_screen_check), - VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid), - VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), - VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), - VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), + &vmw_cmd_blt_surf_screen_check) }; static int vmw_cmd_check(struct vmw_private *dev_priv, @@ -1131,8 +837,6 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv, int32_t cur_size = size; int ret; - sw_context->buf_start = buf; - while (cur_size > 0) { size = cur_size; ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); @@ -1164,63 +868,43 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) for (i = 0; i < sw_context->cur_reloc; ++i) { reloc = &sw_context->relocs[i]; - validate = &sw_context->val_bufs[reloc->index].base; + validate = &sw_context->val_bufs[reloc->index]; bo = validate->bo; - switch (bo->mem.mem_type) { - case TTM_PL_VRAM: + if (bo->mem.mem_type == TTM_PL_VRAM) { reloc->location->offset += bo->offset; reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; - break; - case VMW_PL_GMR: + } else reloc->location->gmrId = bo->mem.start; - break; - default: - BUG(); - } } vmw_free_relocations(sw_context); } -/** - * vmw_resource_list_unrefererence - Free up a resource list and unreference - * all resources referenced by it. - * - * @list: The resource list. - */ -static void vmw_resource_list_unreference(struct list_head *list) -{ - struct vmw_resource_val_node *val, *val_next; - - /* - * Drop references to resources held during command submission. - */ - - list_for_each_entry_safe(val, val_next, list, head) { - list_del_init(&val->head); - vmw_resource_unreference(&val->res); - kfree(val); - } -} - static void vmw_clear_validations(struct vmw_sw_context *sw_context) { - struct vmw_validate_buffer *entry, *next; - struct vmw_resource_val_node *val; + struct ttm_validate_buffer *entry, *next; + struct vmw_resource *res, *res_next; /* * Drop references to DMA buffers held during command submission. */ list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, - base.head) { - list_del(&entry->base.head); - ttm_bo_unref(&entry->base.bo); - (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash); + head) { + list_del(&entry->head); + vmw_dmabuf_validate_clear(entry->bo); + ttm_bo_unref(&entry->bo); sw_context->cur_val_buf--; } BUG_ON(sw_context->cur_val_buf != 0); - list_for_each_entry(val, &sw_context->resource_list, head) - (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash); + /* + * Drop references to resources held during command submission. + */ + vmw_resource_unreserve(&sw_context->resource_list); + list_for_each_entry_safe(res, res_next, &sw_context->resource_list, + validate_head) { + list_del_init(&res->validate_head); + vmw_resource_unreference(&res); + } } static int vmw_validate_single_buffer(struct vmw_private *dev_priv, @@ -1245,7 +929,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, * used as a GMR, this will return -ENOMEM. */ - ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false); + ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false); if (likely(ret == 0 || ret == -ERESTARTSYS)) return ret; @@ -1255,7 +939,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, */ DRM_INFO("Falling through to VRAM.\n"); - ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); + ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); return ret; } @@ -1263,11 +947,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, static int vmw_validate_buffers(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context) { - struct vmw_validate_buffer *entry; + struct ttm_validate_buffer *entry; int ret; - list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { - ret = vmw_validate_single_buffer(dev_priv, entry->base.bo); + list_for_each_entry(entry, &sw_context->validate_nodes, head) { + ret = vmw_validate_single_buffer(dev_priv, entry->bo); if (unlikely(ret != 0)) return ret; } @@ -1430,8 +1114,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, { struct vmw_sw_context *sw_context = &dev_priv->ctx; struct vmw_fence_obj *fence = NULL; - struct vmw_resource *error_resource; - struct list_head resource_list; uint32_t handle; void *cmd; int ret; @@ -1461,33 +1143,24 @@ int vmw_execbuf_process(struct drm_file *file_priv, sw_context->kernel = true; sw_context->tfile = vmw_fpriv(file_priv)->tfile; + sw_context->cid_valid = false; + sw_context->sid_valid = false; sw_context->cur_reloc = 0; sw_context->cur_val_buf = 0; sw_context->fence_flags = 0; + INIT_LIST_HEAD(&sw_context->query_list); INIT_LIST_HEAD(&sw_context->resource_list); sw_context->cur_query_bo = dev_priv->pinned_bo; - sw_context->last_query_ctx = NULL; - sw_context->needs_post_query_barrier = false; - memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); + sw_context->cur_query_cid = dev_priv->query_cid; + sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL); + INIT_LIST_HEAD(&sw_context->validate_nodes); - INIT_LIST_HEAD(&sw_context->res_relocations); - if (!sw_context->res_ht_initialized) { - ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); - if (unlikely(ret != 0)) - goto out_unlock; - sw_context->res_ht_initialized = true; - } - INIT_LIST_HEAD(&resource_list); ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, command_size); if (unlikely(ret != 0)) goto out_err; - ret = vmw_resources_reserve(sw_context); - if (unlikely(ret != 0)) - goto out_err; - ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); if (unlikely(ret != 0)) goto out_err; @@ -1496,31 +1169,24 @@ int vmw_execbuf_process(struct drm_file *file_priv, if (unlikely(ret != 0)) goto out_err; - ret = vmw_resources_validate(sw_context); - if (unlikely(ret != 0)) - goto out_err; + vmw_apply_relocations(sw_context); if (throttle_us) { ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, throttle_us); if (unlikely(ret != 0)) - goto out_err; + goto out_throttle; } cmd = vmw_fifo_reserve(dev_priv, command_size); if (unlikely(cmd == NULL)) { DRM_ERROR("Failed reserving fifo space for commands.\n"); ret = -ENOMEM; - goto out_err; + goto out_throttle; } - vmw_apply_relocations(sw_context); memcpy(cmd, kernel_commands, command_size); - - vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); - vmw_resource_relocations_free(&sw_context->res_relocations); - vmw_fifo_commit(dev_priv, command_size); vmw_query_bo_switch_commit(dev_priv, sw_context); @@ -1536,14 +1202,9 @@ int vmw_execbuf_process(struct drm_file *file_priv, if (ret != 0) DRM_ERROR("Fence submission error. Syncing.\n"); - vmw_resource_list_unreserve(&sw_context->resource_list, false); ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, (void *) fence); - if (unlikely(dev_priv->pinned_bo != NULL && - !dev_priv->query_cid_valid)) - __vmw_execbuf_release_pinned_bo(dev_priv, fence); - vmw_clear_validations(sw_context); vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, user_fence_rep, fence, handle); @@ -1556,40 +1217,17 @@ int vmw_execbuf_process(struct drm_file *file_priv, vmw_fence_obj_unreference(&fence); } - list_splice_init(&sw_context->resource_list, &resource_list); mutex_unlock(&dev_priv->cmdbuf_mutex); - - /* - * Unreference resources outside of the cmdbuf_mutex to - * avoid deadlocks in resource destruction paths. - */ - vmw_resource_list_unreference(&resource_list); - return 0; out_err: - vmw_resource_relocations_free(&sw_context->res_relocations); vmw_free_relocations(sw_context); +out_throttle: + vmw_query_switch_backoff(sw_context); ttm_eu_backoff_reservation(&sw_context->validate_nodes); - vmw_resource_list_unreserve(&sw_context->resource_list, true); vmw_clear_validations(sw_context); - if (unlikely(dev_priv->pinned_bo != NULL && - !dev_priv->query_cid_valid)) - __vmw_execbuf_release_pinned_bo(dev_priv, NULL); out_unlock: - list_splice_init(&sw_context->resource_list, &resource_list); - error_resource = sw_context->error_resource; - sw_context->error_resource = NULL; mutex_unlock(&dev_priv->cmdbuf_mutex); - - /* - * Unreference resources outside of the cmdbuf_mutex to - * avoid deadlocks in resource destruction paths. - */ - vmw_resource_list_unreference(&resource_list); - if (unlikely(error_resource != NULL)) - vmw_resource_unreference(&error_resource); - return ret; } @@ -1614,13 +1252,13 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) /** - * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned + * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned * query bo. * * @dev_priv: The device private structure. - * @fence: If non-NULL should point to a struct vmw_fence_obj issued - * _after_ a query barrier that flushes all queries touching the current - * buffer pointed to by @dev_priv->pinned_bo + * @only_on_cid_match: Only flush and unpin if the current active query cid + * matches @cid. + * @cid: Optional context id to match. * * This function should be used to unpin the pinned query bo, or * as a query barrier when we need to make sure that all queries have @@ -1633,26 +1271,31 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) * * The function will synchronize on the previous query barrier, and will * thus not finish until that barrier has executed. - * - * the @dev_priv->cmdbuf_mutex needs to be held by the current thread - * before calling this function. */ -void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, - struct vmw_fence_obj *fence) +void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, + bool only_on_cid_match, uint32_t cid) { int ret = 0; struct list_head validate_list; struct ttm_validate_buffer pinned_val, query_val; - struct vmw_fence_obj *lfence = NULL; + struct vmw_fence_obj *fence; + + mutex_lock(&dev_priv->cmdbuf_mutex); if (dev_priv->pinned_bo == NULL) goto out_unlock; + if (only_on_cid_match && cid != dev_priv->query_cid) + goto out_unlock; + INIT_LIST_HEAD(&validate_list); + pinned_val.new_sync_obj_arg = (void *)(unsigned long) + DRM_VMW_FENCE_FLAG_EXEC; pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); list_add_tail(&pinned_val.head, &validate_list); + query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg; query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); list_add_tail(&query_val.head, &validate_list); @@ -1665,34 +1308,25 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, goto out_no_reserve; } - if (dev_priv->query_cid_valid) { - BUG_ON(fence != NULL); - ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); - if (unlikely(ret != 0)) { - vmw_execbuf_unpin_panic(dev_priv); - goto out_no_emit; - } - dev_priv->query_cid_valid = false; + ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); + if (unlikely(ret != 0)) { + vmw_execbuf_unpin_panic(dev_priv); + goto out_no_emit; } vmw_bo_pin(dev_priv->pinned_bo, false); vmw_bo_pin(dev_priv->dummy_query_bo, false); dev_priv->dummy_query_bo_pinned = false; - if (fence == NULL) { - (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, - NULL); - fence = lfence; - } + (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); ttm_eu_fence_buffer_objects(&validate_list, (void *) fence); - if (lfence != NULL) - vmw_fence_obj_unreference(&lfence); ttm_bo_unref(&query_val.bo); ttm_bo_unref(&pinned_val.bo); ttm_bo_unref(&dev_priv->pinned_bo); out_unlock: + mutex_unlock(&dev_priv->cmdbuf_mutex); return; out_no_emit: @@ -1701,31 +1335,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ttm_bo_unref(&query_val.bo); ttm_bo_unref(&pinned_val.bo); ttm_bo_unref(&dev_priv->pinned_bo); -} - -/** - * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned - * query bo. - * - * @dev_priv: The device private structure. - * - * This function should be used to unpin the pinned query bo, or - * as a query barrier when we need to make sure that all queries have - * finished before the next fifo command. (For example on hardware - * context destructions where the hardware may otherwise leak unfinished - * queries). - * - * This function does not return any failure codes, but make attempts - * to do safe unpinning in case of errors. - * - * The function will synchronize on the previous query barrier, and will - * thus not finish until that barrier has executed. - */ -void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) -{ - mutex_lock(&dev_priv->cmdbuf_mutex); - if (dev_priv->query_cid_valid) - __vmw_execbuf_release_pinned_bo(dev_priv, NULL); mutex_unlock(&dev_priv->cmdbuf_mutex); } diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index c62d20e8a6f1..bc187fafd58c 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -537,7 +537,7 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) container_of(fence, struct vmw_user_fence, fence); struct vmw_fence_manager *fman = fence->fman; - ttm_base_object_kfree(ufence, base); + kfree(ufence); /* * Free kernel space accounting. */ diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index d9fbbe191071..7290811f89be 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -133,7 +133,6 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, struct drm_vmw_rect *clips = NULL; struct drm_mode_object *obj; struct vmw_framebuffer *vfb; - struct vmw_resource *res; uint32_t num_clips; int ret; @@ -181,13 +180,11 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) goto out_no_ttm_lock; - ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid, - user_surface_converter, - &res); + ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid, + &surface); if (ret) goto out_no_surface; - surface = vmw_res_to_srf(res); ret = vmw_kms_present(dev_priv, file_priv, vfb, surface, arg->sid, arg->dest_x, arg->dest_y, diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 79f7e8e60529..070fb239c5af 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -373,7 +373,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) drm_mode_crtc_set_gamma_size(crtc, 256); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.dirty_info_property, 1); diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 87e39f68e9d0..cb55b7b66377 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -35,7 +35,6 @@ #include "svga_escape.h" #define VMW_MAX_NUM_STREAMS 1 -#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE) struct vmw_stream { struct vmw_dma_buffer *buf; @@ -450,14 +449,6 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv) return 0; } - -static bool vmw_overlay_available(const struct vmw_private *dev_priv) -{ - return (dev_priv->overlay_priv != NULL && - ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) == - VMW_OVERLAY_CAP_MASK)); -} - int vmw_overlay_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -470,7 +461,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, struct vmw_resource *res; int ret; - if (!vmw_overlay_available(dev_priv)) + if (!overlay) return -ENOSYS; ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res); @@ -501,7 +492,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, int vmw_overlay_num_overlays(struct vmw_private *dev_priv) { - if (!vmw_overlay_available(dev_priv)) + if (!dev_priv->overlay_priv) return 0; return VMW_MAX_NUM_STREAMS; @@ -512,7 +503,7 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv) struct vmw_overlay *overlay = dev_priv->overlay_priv; int i, k; - if (!vmw_overlay_available(dev_priv)) + if (!overlay) return 0; mutex_lock(&overlay->mutex); @@ -578,6 +569,12 @@ int vmw_overlay_init(struct vmw_private *dev_priv) if (dev_priv->overlay_priv) return -EINVAL; + if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) && + (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) { + DRM_INFO("hardware doesn't support overlays\n"); + return -ENOSYS; + } + overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); if (!overlay) return -ENOMEM; diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index e01a17b407b2..da3c6b5b98a1 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -30,7 +30,17 @@ #include #include #include -#include "vmwgfx_resource_priv.h" + +struct vmw_user_context { + struct ttm_base_object base; + struct vmw_resource res; +}; + +struct vmw_user_surface { + struct ttm_base_object base; + struct vmw_surface srf; + uint32_t size; +}; struct vmw_user_dma_buffer { struct ttm_base_object base; @@ -52,21 +62,17 @@ struct vmw_user_stream { struct vmw_stream stream; }; +struct vmw_surface_offset { + uint32_t face; + uint32_t mip; + uint32_t bo_offset; +}; + +static uint64_t vmw_user_context_size; +static uint64_t vmw_user_surface_size; static uint64_t vmw_user_stream_size; -static const struct vmw_res_func vmw_stream_func = { - .res_type = vmw_res_stream, - .needs_backup = false, - .may_evict = false, - .type_name = "video streams", - .backup_placement = NULL, - .create = NULL, - .destroy = NULL, - .bind = NULL, - .unbind = NULL -}; - static inline struct vmw_dma_buffer * vmw_dma_buffer(struct ttm_buffer_object *bo) { @@ -94,14 +100,13 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) * * Release the resource id to the resource id manager and set it to -1 */ -void vmw_resource_release_id(struct vmw_resource *res) +static void vmw_resource_release_id(struct vmw_resource *res) { struct vmw_private *dev_priv = res->dev_priv; - struct idr *idr = &dev_priv->res_idr[res->func->res_type]; write_lock(&dev_priv->resource_lock); if (res->id != -1) - idr_remove(idr, res->id); + idr_remove(res->idr, res->id); res->id = -1; write_unlock(&dev_priv->resource_lock); } @@ -111,33 +116,17 @@ static void vmw_resource_release(struct kref *kref) struct vmw_resource *res = container_of(kref, struct vmw_resource, kref); struct vmw_private *dev_priv = res->dev_priv; - int id; - struct idr *idr = &dev_priv->res_idr[res->func->res_type]; + int id = res->id; + struct idr *idr = res->idr; res->avail = false; - list_del_init(&res->lru_head); + if (res->remove_from_lists != NULL) + res->remove_from_lists(res); write_unlock(&dev_priv->resource_lock); - if (res->backup) { - struct ttm_buffer_object *bo = &res->backup->base; - - ttm_bo_reserve(bo, false, false, false, 0); - if (!list_empty(&res->mob_head) && - res->func->unbind != NULL) { - struct ttm_validate_buffer val_buf; - - val_buf.bo = bo; - res->func->unbind(res, false, &val_buf); - } - res->backup_dirty = false; - list_del_init(&res->mob_head); - ttm_bo_unreserve(bo); - vmw_dmabuf_unreference(&res->backup); - } if (likely(res->hw_destroy != NULL)) res->hw_destroy(res); - id = res->id; if (res->res_free != NULL) res->res_free(res); else @@ -164,25 +153,25 @@ void vmw_resource_unreference(struct vmw_resource **p_res) /** * vmw_resource_alloc_id - release a resource id to the id manager. * + * @dev_priv: Pointer to the device private structure. * @res: Pointer to the resource. * * Allocate the lowest free resource from the resource manager, and set * @res->id to that id. Returns 0 on success and -ENOMEM on failure. */ -int vmw_resource_alloc_id(struct vmw_resource *res) +static int vmw_resource_alloc_id(struct vmw_private *dev_priv, + struct vmw_resource *res) { - struct vmw_private *dev_priv = res->dev_priv; int ret; - struct idr *idr = &dev_priv->res_idr[res->func->res_type]; BUG_ON(res->id != -1); do { - if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) + if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0)) return -ENOMEM; write_lock(&dev_priv->resource_lock); - ret = idr_get_new_above(idr, res, 1, &res->id); + ret = idr_get_new_above(res->idr, res, 1, &res->id); write_unlock(&dev_priv->resource_lock); } while (ret == -EAGAIN); @@ -190,39 +179,31 @@ int vmw_resource_alloc_id(struct vmw_resource *res) return ret; } -/** - * vmw_resource_init - initialize a struct vmw_resource - * - * @dev_priv: Pointer to a device private struct. - * @res: The struct vmw_resource to initialize. - * @obj_type: Resource object type. - * @delay_id: Boolean whether to defer device id allocation until - * the first validation. - * @res_free: Resource destructor. - * @func: Resource function table. - */ -int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, - bool delay_id, - void (*res_free) (struct vmw_resource *res), - const struct vmw_res_func *func) + +static int vmw_resource_init(struct vmw_private *dev_priv, + struct vmw_resource *res, + struct idr *idr, + enum ttm_object_type obj_type, + bool delay_id, + void (*res_free) (struct vmw_resource *res), + void (*remove_from_lists) + (struct vmw_resource *res)) { kref_init(&res->kref); res->hw_destroy = NULL; res->res_free = res_free; + res->remove_from_lists = remove_from_lists; + res->res_type = obj_type; + res->idr = idr; res->avail = false; res->dev_priv = dev_priv; - res->func = func; - INIT_LIST_HEAD(&res->lru_head); - INIT_LIST_HEAD(&res->mob_head); + INIT_LIST_HEAD(&res->query_head); + INIT_LIST_HEAD(&res->validate_head); res->id = -1; - res->backup = NULL; - res->backup_offset = 0; - res->backup_dirty = false; - res->res_dirty = false; if (delay_id) return 0; else - return vmw_resource_alloc_id(res); + return vmw_resource_alloc_id(dev_priv, res); } /** @@ -237,8 +218,9 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, * Activate basically means that the function vmw_resource_lookup will * find it. */ -void vmw_resource_activate(struct vmw_resource *res, - void (*hw_destroy) (struct vmw_resource *)) + +static void vmw_resource_activate(struct vmw_resource *res, + void (*hw_destroy) (struct vmw_resource *)) { struct vmw_private *dev_priv = res->dev_priv; @@ -268,245 +250,1396 @@ struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, } /** - * vmw_user_resource_lookup_handle - lookup a struct resource from a - * TTM user-space handle and perform basic type checks - * - * @dev_priv: Pointer to a device private struct - * @tfile: Pointer to a struct ttm_object_file identifying the caller - * @handle: The TTM user-space handle - * @converter: Pointer to an object describing the resource type - * @p_res: On successful return the location pointed to will contain - * a pointer to a refcounted struct vmw_resource. - * - * If the handle can't be found or is associated with an incorrect resource - * type, -EINVAL will be returned. + * Context management: */ -int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t handle, - const struct vmw_user_resource_conv - *converter, - struct vmw_resource **p_res) + +static void vmw_hw_context_destroy(struct vmw_resource *res) { - struct ttm_base_object *base; - struct vmw_resource *res; - int ret = -EINVAL; - base = ttm_base_object_lookup(tfile, handle); - if (unlikely(base == NULL)) - return -EINVAL; + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDestroyContext body; + } *cmd; - if (unlikely(base->object_type != converter->object_type)) - goto out_bad_resource; - res = converter->base_obj_to_res(base); + vmw_execbuf_release_pinned_bo(dev_priv, true, res->id); - read_lock(&dev_priv->resource_lock); - if (!res->avail || res->res_free != converter->res_free) { - read_unlock(&dev_priv->resource_lock); - goto out_bad_resource; + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for surface " + "destruction.\n"); + return; } - kref_get(&res->kref); - read_unlock(&dev_priv->resource_lock); - - *p_res = res; - ret = 0; - -out_bad_resource: - ttm_base_object_unref(&base); + cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); + cmd->header.size = cpu_to_le32(sizeof(cmd->body)); + cmd->body.cid = cpu_to_le32(res->id); - return ret; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_3d_resource_dec(dev_priv, false); } -/** - * Helper function that looks either a surface or dmabuf. - * - * The pointer this pointed at by out_surf and out_buf needs to be null. - */ -int vmw_user_lookup_handle(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t handle, - struct vmw_surface **out_surf, - struct vmw_dma_buffer **out_buf) +static int vmw_context_init(struct vmw_private *dev_priv, + struct vmw_resource *res, + void (*res_free) (struct vmw_resource *res)) { - struct vmw_resource *res; int ret; - BUG_ON(*out_surf || *out_buf); + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDefineContext body; + } *cmd; - ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle, - user_surface_converter, - &res); - if (!ret) { - *out_surf = vmw_res_to_srf(res); - return 0; + ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr, + VMW_RES_CONTEXT, false, res_free, NULL); + + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to allocate a resource id.\n"); + goto out_early; } - *out_surf = NULL; - ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); - return ret; -} + if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) { + DRM_ERROR("Out of hw context ids.\n"); + vmw_resource_unreference(&res); + return -ENOMEM; + } -/** - * Buffer management. - */ -void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) -{ - struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Fifo reserve failed.\n"); + vmw_resource_unreference(&res); + return -ENOMEM; + } - kfree(vmw_bo); + cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); + cmd->header.size = cpu_to_le32(sizeof(cmd->body)); + cmd->body.cid = cpu_to_le32(res->id); + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + (void) vmw_3d_resource_inc(dev_priv, false); + vmw_resource_activate(res, vmw_hw_context_destroy); + return 0; + +out_early: + if (res_free == NULL) + kfree(res); + else + res_free(res); + return ret; } -int vmw_dmabuf_init(struct vmw_private *dev_priv, - struct vmw_dma_buffer *vmw_bo, - size_t size, struct ttm_placement *placement, - bool interruptible, - void (*bo_free) (struct ttm_buffer_object *bo)) +struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) { - struct ttm_bo_device *bdev = &dev_priv->bdev; - size_t acc_size; + struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); int ret; - BUG_ON(!bo_free); - - acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); - memset(vmw_bo, 0, sizeof(*vmw_bo)); - - INIT_LIST_HEAD(&vmw_bo->res_list); + if (unlikely(res == NULL)) + return NULL; - ret = ttm_bo_init(bdev, &vmw_bo->base, size, - ttm_bo_type_device, placement, - 0, interruptible, - NULL, acc_size, NULL, bo_free); - return ret; + ret = vmw_context_init(dev_priv, res, NULL); + return (ret == 0) ? res : NULL; } -static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) +/** + * User-space context management: + */ + +static void vmw_user_context_free(struct vmw_resource *res) { - struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); + struct vmw_user_context *ctx = + container_of(res, struct vmw_user_context, res); + struct vmw_private *dev_priv = res->dev_priv; - ttm_base_object_kfree(vmw_user_bo, base); + kfree(ctx); + ttm_mem_global_free(vmw_mem_glob(dev_priv), + vmw_user_context_size); } -static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) +/** + * This function is called when user space has no more references on the + * base object. It releases the base-object's reference on the resource object. + */ + +static void vmw_user_context_base_release(struct ttm_base_object **p_base) { - struct vmw_user_dma_buffer *vmw_user_bo; struct ttm_base_object *base = *p_base; - struct ttm_buffer_object *bo; + struct vmw_user_context *ctx = + container_of(base, struct vmw_user_context, base); + struct vmw_resource *res = &ctx->res; *p_base = NULL; + vmw_resource_unreference(&res); +} - if (unlikely(base == NULL)) - return; +int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_resource *res; + struct vmw_user_context *ctx; + struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + int ret = 0; - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); - bo = &vmw_user_bo->dma.base; - ttm_bo_unref(&bo); + res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid); + if (unlikely(res == NULL)) + return -EINVAL; + + if (res->res_free != &vmw_user_context_free) { + ret = -EINVAL; + goto out; + } + + ctx = container_of(res, struct vmw_user_context, res); + if (ctx->base.tfile != tfile && !ctx->base.shareable) { + ret = -EPERM; + goto out; + } + + ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE); +out: + vmw_resource_unreference(&res); + return ret; } -/** - * vmw_user_dmabuf_alloc - Allocate a user dma buffer - * - * @dev_priv: Pointer to a struct device private. - * @tfile: Pointer to a struct ttm_object_file on which to register the user - * object. - * @size: Size of the dma buffer. - * @shareable: Boolean whether the buffer is shareable with other open files. - * @handle: Pointer to where the handle value should be assigned. - * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer - * should be assigned. - */ -int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t size, - bool shareable, - uint32_t *handle, - struct vmw_dma_buffer **p_dma_buf) +int vmw_context_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct vmw_user_dma_buffer *user_bo; - struct ttm_buffer_object *tmp; + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_user_context *ctx; + struct vmw_resource *res; + struct vmw_resource *tmp; + struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_master *vmaster = vmw_master(file_priv->master); int ret; - user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); - if (unlikely(user_bo == NULL)) { - DRM_ERROR("Failed to allocate a buffer.\n"); - return -ENOMEM; - } - ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, - &vmw_vram_sys_placement, true, - &vmw_user_dmabuf_destroy); + /* + * Approximate idr memory usage with 128 bytes. It will be limited + * by maximum number_of contexts anyway. + */ + + if (unlikely(vmw_user_context_size == 0)) + vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; + + ret = ttm_read_lock(&vmaster->lock, true); if (unlikely(ret != 0)) return ret; - tmp = ttm_bo_reference(&user_bo->dma.base); - ret = ttm_base_object_init(tfile, - &user_bo->base, - shareable, - ttm_buffer_type, - &vmw_user_dmabuf_release, NULL); + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), + vmw_user_context_size, + false, true); if (unlikely(ret != 0)) { - ttm_bo_unref(&tmp); - goto out_no_base_object; + if (ret != -ERESTARTSYS) + DRM_ERROR("Out of graphics memory for context" + " creation.\n"); + goto out_unlock; + } + + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (unlikely(ctx == NULL)) { + ttm_mem_global_free(vmw_mem_glob(dev_priv), + vmw_user_context_size); + ret = -ENOMEM; + goto out_unlock; } - *p_dma_buf = &user_bo->dma; - *handle = user_bo->base.hash.key; + res = &ctx->res; + ctx->base.shareable = false; + ctx->base.tfile = NULL; + + /* + * From here on, the destructor takes over resource freeing. + */ + + ret = vmw_context_init(dev_priv, res, vmw_user_context_free); + if (unlikely(ret != 0)) + goto out_unlock; + + tmp = vmw_resource_reference(&ctx->res); + ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, + &vmw_user_context_base_release, NULL); + + if (unlikely(ret != 0)) { + vmw_resource_unreference(&tmp); + goto out_err; + } + + arg->cid = res->id; +out_err: + vmw_resource_unreference(&res); +out_unlock: + ttm_read_unlock(&vmaster->lock); + return ret; + +} + +int vmw_context_check(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + int id, + struct vmw_resource **p_res) +{ + struct vmw_resource *res; + int ret = 0; + + read_lock(&dev_priv->resource_lock); + res = idr_find(&dev_priv->context_idr, id); + if (res && res->avail) { + struct vmw_user_context *ctx = + container_of(res, struct vmw_user_context, res); + if (ctx->base.tfile != tfile && !ctx->base.shareable) + ret = -EPERM; + if (p_res) + *p_res = vmw_resource_reference(res); + } else + ret = -EINVAL; + read_unlock(&dev_priv->resource_lock); -out_no_base_object: return ret; } +struct vmw_bpp { + uint8_t bpp; + uint8_t s_bpp; +}; + +/* + * Size table for the supported SVGA3D surface formats. It consists of + * two values. The bpp value and the s_bpp value which is short for + * "stride bits per pixel" The values are given in such a way that the + * minimum stride for the image is calculated using + * + * min_stride = w*s_bpp + * + * and the total memory requirement for the image is + * + * h*min_stride*bpp/s_bpp + * + */ +static const struct vmw_bpp vmw_sf_bpp[] = { + [SVGA3D_FORMAT_INVALID] = {0, 0}, + [SVGA3D_X8R8G8B8] = {32, 32}, + [SVGA3D_A8R8G8B8] = {32, 32}, + [SVGA3D_R5G6B5] = {16, 16}, + [SVGA3D_X1R5G5B5] = {16, 16}, + [SVGA3D_A1R5G5B5] = {16, 16}, + [SVGA3D_A4R4G4B4] = {16, 16}, + [SVGA3D_Z_D32] = {32, 32}, + [SVGA3D_Z_D16] = {16, 16}, + [SVGA3D_Z_D24S8] = {32, 32}, + [SVGA3D_Z_D15S1] = {16, 16}, + [SVGA3D_LUMINANCE8] = {8, 8}, + [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8}, + [SVGA3D_LUMINANCE16] = {16, 16}, + [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16}, + [SVGA3D_DXT1] = {4, 16}, + [SVGA3D_DXT2] = {8, 32}, + [SVGA3D_DXT3] = {8, 32}, + [SVGA3D_DXT4] = {8, 32}, + [SVGA3D_DXT5] = {8, 32}, + [SVGA3D_BUMPU8V8] = {16, 16}, + [SVGA3D_BUMPL6V5U5] = {16, 16}, + [SVGA3D_BUMPX8L8V8U8] = {32, 32}, + [SVGA3D_ARGB_S10E5] = {16, 16}, + [SVGA3D_ARGB_S23E8] = {32, 32}, + [SVGA3D_A2R10G10B10] = {32, 32}, + [SVGA3D_V8U8] = {16, 16}, + [SVGA3D_Q8W8V8U8] = {32, 32}, + [SVGA3D_CxV8U8] = {16, 16}, + [SVGA3D_X8L8V8U8] = {32, 32}, + [SVGA3D_A2W10V10U10] = {32, 32}, + [SVGA3D_ALPHA8] = {8, 8}, + [SVGA3D_R_S10E5] = {16, 16}, + [SVGA3D_R_S23E8] = {32, 32}, + [SVGA3D_RG_S10E5] = {16, 16}, + [SVGA3D_RG_S23E8] = {32, 32}, + [SVGA3D_BUFFER] = {8, 8}, + [SVGA3D_Z_D24X8] = {32, 32}, + [SVGA3D_V16U16] = {32, 32}, + [SVGA3D_G16R16] = {32, 32}, + [SVGA3D_A16B16G16R16] = {64, 64}, + [SVGA3D_UYVY] = {12, 12}, + [SVGA3D_YUY2] = {12, 12}, + [SVGA3D_NV12] = {12, 8}, + [SVGA3D_AYUV] = {32, 32}, + [SVGA3D_BC4_UNORM] = {4, 16}, + [SVGA3D_BC5_UNORM] = {8, 32}, + [SVGA3D_Z_DF16] = {16, 16}, + [SVGA3D_Z_DF24] = {24, 24}, + [SVGA3D_Z_D24S8_INT] = {32, 32} +}; + + +/** + * Surface management. + */ + +struct vmw_surface_dma { + SVGA3dCmdHeader header; + SVGA3dCmdSurfaceDMA body; + SVGA3dCopyBox cb; + SVGA3dCmdSurfaceDMASuffix suffix; +}; + +struct vmw_surface_define { + SVGA3dCmdHeader header; + SVGA3dCmdDefineSurface body; +}; + +struct vmw_surface_destroy { + SVGA3dCmdHeader header; + SVGA3dCmdDestroySurface body; +}; + + /** - * vmw_user_dmabuf_verify_access - verify access permissions on this - * buffer object. + * vmw_surface_dma_size - Compute fifo size for a dma command. + * + * @srf: Pointer to a struct vmw_surface * - * @bo: Pointer to the buffer object being accessed - * @tfile: Identifying the caller. + * Computes the required size for a surface dma command for backup or + * restoration of the surface represented by @srf. */ -int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, - struct ttm_object_file *tfile) +static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) { - struct vmw_user_dma_buffer *vmw_user_bo; + return srf->num_sizes * sizeof(struct vmw_surface_dma); +} - if (unlikely(bo->destroy != vmw_user_dmabuf_destroy)) - return -EPERM; - vmw_user_bo = vmw_user_dma_buffer(bo); - return (vmw_user_bo->base.tfile == tfile || - vmw_user_bo->base.shareable) ? 0 : -EPERM; +/** + * vmw_surface_define_size - Compute fifo size for a surface define command. + * + * @srf: Pointer to a struct vmw_surface + * + * Computes the required size for a surface define command for the definition + * of the surface represented by @srf. + */ +static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) +{ + return sizeof(struct vmw_surface_define) + srf->num_sizes * + sizeof(SVGA3dSize); } -int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + +/** + * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. + * + * Computes the required size for a surface destroy command for the destruction + * of a hw surface. + */ +static inline uint32_t vmw_surface_destroy_size(void) { - struct vmw_private *dev_priv = vmw_priv(dev); - union drm_vmw_alloc_dmabuf_arg *arg = - (union drm_vmw_alloc_dmabuf_arg *)data; - struct drm_vmw_alloc_dmabuf_req *req = &arg->req; - struct drm_vmw_dmabuf_rep *rep = &arg->rep; - struct vmw_dma_buffer *dma_buf; - uint32_t handle; - struct vmw_master *vmaster = vmw_master(file_priv->master); - int ret; + return sizeof(struct vmw_surface_destroy); +} - ret = ttm_read_lock(&vmaster->lock, true); - if (unlikely(ret != 0)) - return ret; +/** + * vmw_surface_destroy_encode - Encode a surface_destroy command. + * + * @id: The surface id + * @cmd_space: Pointer to memory area in which the commands should be encoded. + */ +static void vmw_surface_destroy_encode(uint32_t id, + void *cmd_space) +{ + struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *) + cmd_space; - ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, - req->size, false, &handle, &dma_buf); - if (unlikely(ret != 0)) - goto out_no_dmabuf; + cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; + cmd->header.size = sizeof(cmd->body); + cmd->body.sid = id; +} - rep->handle = handle; - rep->map_handle = dma_buf->base.addr_space_offset; - rep->cur_gmr_id = handle; - rep->cur_gmr_offset = 0; +/** + * vmw_surface_define_encode - Encode a surface_define command. + * + * @srf: Pointer to a struct vmw_surface object. + * @cmd_space: Pointer to memory area in which the commands should be encoded. + */ +static void vmw_surface_define_encode(const struct vmw_surface *srf, + void *cmd_space) +{ + struct vmw_surface_define *cmd = (struct vmw_surface_define *) + cmd_space; + struct drm_vmw_size *src_size; + SVGA3dSize *cmd_size; + uint32_t cmd_len; + int i; + + cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); + + cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; + cmd->header.size = cmd_len; + cmd->body.sid = srf->res.id; + cmd->body.surfaceFlags = srf->flags; + cmd->body.format = cpu_to_le32(srf->format); + for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) + cmd->body.face[i].numMipLevels = srf->mip_levels[i]; + + cmd += 1; + cmd_size = (SVGA3dSize *) cmd; + src_size = srf->sizes; + + for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { + cmd_size->width = src_size->width; + cmd_size->height = src_size->height; + cmd_size->depth = src_size->depth; + } +} + + +/** + * vmw_surface_dma_encode - Encode a surface_dma command. + * + * @srf: Pointer to a struct vmw_surface object. + * @cmd_space: Pointer to memory area in which the commands should be encoded. + * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents + * should be placed or read from. + * @to_surface: Boolean whether to DMA to the surface or from the surface. + */ +static void vmw_surface_dma_encode(struct vmw_surface *srf, + void *cmd_space, + const SVGAGuestPtr *ptr, + bool to_surface) +{ + uint32_t i; + uint32_t bpp = vmw_sf_bpp[srf->format].bpp; + uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp; + struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; + + for (i = 0; i < srf->num_sizes; ++i) { + SVGA3dCmdHeader *header = &cmd->header; + SVGA3dCmdSurfaceDMA *body = &cmd->body; + SVGA3dCopyBox *cb = &cmd->cb; + SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; + const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; + const struct drm_vmw_size *cur_size = &srf->sizes[i]; + + header->id = SVGA_3D_CMD_SURFACE_DMA; + header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); + + body->guest.ptr = *ptr; + body->guest.ptr.offset += cur_offset->bo_offset; + body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3; + body->host.sid = srf->res.id; + body->host.face = cur_offset->face; + body->host.mipmap = cur_offset->mip; + body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : + SVGA3D_READ_HOST_VRAM); + cb->x = 0; + cb->y = 0; + cb->z = 0; + cb->srcx = 0; + cb->srcy = 0; + cb->srcz = 0; + cb->w = cur_size->width; + cb->h = cur_size->height; + cb->d = cur_size->depth; + + suffix->suffixSize = sizeof(*suffix); + suffix->maximumOffset = body->guest.pitch*cur_size->height* + cur_size->depth*bpp / stride_bpp; + suffix->flags.discard = 0; + suffix->flags.unsynchronized = 0; + suffix->flags.reserved = 0; + ++cmd; + } +}; + + +static void vmw_hw_surface_destroy(struct vmw_resource *res) +{ + + struct vmw_private *dev_priv = res->dev_priv; + struct vmw_surface *srf; + void *cmd; + + if (res->id != -1) { + + cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for surface " + "destruction.\n"); + return; + } + + vmw_surface_destroy_encode(res->id, cmd); + vmw_fifo_commit(dev_priv, vmw_surface_destroy_size()); + + /* + * used_memory_size_atomic, or separate lock + * to avoid taking dev_priv::cmdbuf_mutex in + * the destroy path. + */ + + mutex_lock(&dev_priv->cmdbuf_mutex); + srf = container_of(res, struct vmw_surface, res); + dev_priv->used_memory_size -= srf->backup_size; + mutex_unlock(&dev_priv->cmdbuf_mutex); + + } + vmw_3d_resource_dec(dev_priv, false); +} + +void vmw_surface_res_free(struct vmw_resource *res) +{ + struct vmw_surface *srf = container_of(res, struct vmw_surface, res); + + if (srf->backup) + ttm_bo_unref(&srf->backup); + kfree(srf->offsets); + kfree(srf->sizes); + kfree(srf->snooper.image); + kfree(srf); +} + + +/** + * vmw_surface_do_validate - make a surface available to the device. + * + * @dev_priv: Pointer to a device private struct. + * @srf: Pointer to a struct vmw_surface. + * + * If the surface doesn't have a hw id, allocate one, and optionally + * DMA the backed up surface contents to the device. + * + * Returns -EBUSY if there wasn't sufficient device resources to + * complete the validation. Retry after freeing up resources. + * + * May return other errors if the kernel is out of guest resources. + */ +int vmw_surface_do_validate(struct vmw_private *dev_priv, + struct vmw_surface *srf) +{ + struct vmw_resource *res = &srf->res; + struct list_head val_list; + struct ttm_validate_buffer val_buf; + uint32_t submit_size; + uint8_t *cmd; + int ret; + + if (likely(res->id != -1)) + return 0; + + if (unlikely(dev_priv->used_memory_size + srf->backup_size >= + dev_priv->memory_size)) + return -EBUSY; + + /* + * Reserve- and validate the backup DMA bo. + */ + + if (srf->backup) { + INIT_LIST_HEAD(&val_list); + val_buf.bo = ttm_bo_reference(srf->backup); + val_buf.new_sync_obj_arg = (void *)((unsigned long) + DRM_VMW_FENCE_FLAG_EXEC); + list_add_tail(&val_buf.head, &val_list); + ret = ttm_eu_reserve_buffers(&val_list); + if (unlikely(ret != 0)) + goto out_no_reserve; + + ret = ttm_bo_validate(srf->backup, &vmw_srf_placement, + true, false, false); + if (unlikely(ret != 0)) + goto out_no_validate; + } + + /* + * Alloc id for the resource. + */ + + ret = vmw_resource_alloc_id(dev_priv, res); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to allocate a surface id.\n"); + goto out_no_id; + } + if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { + ret = -EBUSY; + goto out_no_fifo; + } + + + /* + * Encode surface define- and dma commands. + */ + + submit_size = vmw_surface_define_size(srf); + if (srf->backup) + submit_size += vmw_surface_dma_size(srf); + + cmd = vmw_fifo_reserve(dev_priv, submit_size); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for surface " + "validation.\n"); + ret = -ENOMEM; + goto out_no_fifo; + } + + vmw_surface_define_encode(srf, cmd); + if (srf->backup) { + SVGAGuestPtr ptr; + + cmd += vmw_surface_define_size(srf); + vmw_bo_get_guest_ptr(srf->backup, &ptr); + vmw_surface_dma_encode(srf, cmd, &ptr, true); + } + + vmw_fifo_commit(dev_priv, submit_size); + + /* + * Create a fence object and fence the backup buffer. + */ + + if (srf->backup) { + struct vmw_fence_obj *fence; + + (void) vmw_execbuf_fence_commands(NULL, dev_priv, + &fence, NULL); + ttm_eu_fence_buffer_objects(&val_list, fence); + if (likely(fence != NULL)) + vmw_fence_obj_unreference(&fence); + ttm_bo_unref(&val_buf.bo); + ttm_bo_unref(&srf->backup); + } + + /* + * Surface memory usage accounting. + */ + + dev_priv->used_memory_size += srf->backup_size; + + return 0; + +out_no_fifo: + vmw_resource_release_id(res); +out_no_id: +out_no_validate: + if (srf->backup) + ttm_eu_backoff_reservation(&val_list); +out_no_reserve: + if (srf->backup) + ttm_bo_unref(&val_buf.bo); + return ret; +} + +/** + * vmw_surface_evict - Evict a hw surface. + * + * @dev_priv: Pointer to a device private struct. + * @srf: Pointer to a struct vmw_surface + * + * DMA the contents of a hw surface to a backup guest buffer object, + * and destroy the hw surface, releasing its id. + */ +int vmw_surface_evict(struct vmw_private *dev_priv, + struct vmw_surface *srf) +{ + struct vmw_resource *res = &srf->res; + struct list_head val_list; + struct ttm_validate_buffer val_buf; + uint32_t submit_size; + uint8_t *cmd; + int ret; + struct vmw_fence_obj *fence; + SVGAGuestPtr ptr; + + BUG_ON(res->id == -1); + + /* + * Create a surface backup buffer object. + */ + + if (!srf->backup) { + ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size, + ttm_bo_type_device, + &vmw_srf_placement, 0, 0, true, + NULL, &srf->backup); + if (unlikely(ret != 0)) + return ret; + } + + /* + * Reserve- and validate the backup DMA bo. + */ + + INIT_LIST_HEAD(&val_list); + val_buf.bo = ttm_bo_reference(srf->backup); + val_buf.new_sync_obj_arg = (void *)(unsigned long) + DRM_VMW_FENCE_FLAG_EXEC; + list_add_tail(&val_buf.head, &val_list); + ret = ttm_eu_reserve_buffers(&val_list); + if (unlikely(ret != 0)) + goto out_no_reserve; + + ret = ttm_bo_validate(srf->backup, &vmw_srf_placement, + true, false, false); + if (unlikely(ret != 0)) + goto out_no_validate; + + + /* + * Encode the dma- and surface destroy commands. + */ + + submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size(); + cmd = vmw_fifo_reserve(dev_priv, submit_size); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for surface " + "eviction.\n"); + ret = -ENOMEM; + goto out_no_fifo; + } + + vmw_bo_get_guest_ptr(srf->backup, &ptr); + vmw_surface_dma_encode(srf, cmd, &ptr, false); + cmd += vmw_surface_dma_size(srf); + vmw_surface_destroy_encode(res->id, cmd); + vmw_fifo_commit(dev_priv, submit_size); + + /* + * Surface memory usage accounting. + */ + + dev_priv->used_memory_size -= srf->backup_size; + + /* + * Create a fence object and fence the DMA buffer. + */ + + (void) vmw_execbuf_fence_commands(NULL, dev_priv, + &fence, NULL); + ttm_eu_fence_buffer_objects(&val_list, fence); + if (likely(fence != NULL)) + vmw_fence_obj_unreference(&fence); + ttm_bo_unref(&val_buf.bo); + + /* + * Release the surface ID. + */ + + vmw_resource_release_id(res); + + return 0; + +out_no_fifo: +out_no_validate: + if (srf->backup) + ttm_eu_backoff_reservation(&val_list); +out_no_reserve: + ttm_bo_unref(&val_buf.bo); + ttm_bo_unref(&srf->backup); + return ret; +} + + +/** + * vmw_surface_validate - make a surface available to the device, evicting + * other surfaces if needed. + * + * @dev_priv: Pointer to a device private struct. + * @srf: Pointer to a struct vmw_surface. + * + * Try to validate a surface and if it fails due to limited device resources, + * repeatedly try to evict other surfaces until the request can be + * acommodated. + * + * May return errors if out of resources. + */ +int vmw_surface_validate(struct vmw_private *dev_priv, + struct vmw_surface *srf) +{ + int ret; + struct vmw_surface *evict_srf; + + do { + write_lock(&dev_priv->resource_lock); + list_del_init(&srf->lru_head); + write_unlock(&dev_priv->resource_lock); + + ret = vmw_surface_do_validate(dev_priv, srf); + if (likely(ret != -EBUSY)) + break; + + write_lock(&dev_priv->resource_lock); + if (list_empty(&dev_priv->surface_lru)) { + DRM_ERROR("Out of device memory for surfaces.\n"); + ret = -EBUSY; + write_unlock(&dev_priv->resource_lock); + break; + } + + evict_srf = vmw_surface_reference + (list_first_entry(&dev_priv->surface_lru, + struct vmw_surface, + lru_head)); + list_del_init(&evict_srf->lru_head); + + write_unlock(&dev_priv->resource_lock); + (void) vmw_surface_evict(dev_priv, evict_srf); + + vmw_surface_unreference(&evict_srf); + + } while (1); + + if (unlikely(ret != 0 && srf->res.id != -1)) { + write_lock(&dev_priv->resource_lock); + list_add_tail(&srf->lru_head, &dev_priv->surface_lru); + write_unlock(&dev_priv->resource_lock); + } + + return ret; +} + + +/** + * vmw_surface_remove_from_lists - Remove surface resources from lookup lists + * + * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface + * + * As part of the resource destruction, remove the surface from any + * lookup lists. + */ +static void vmw_surface_remove_from_lists(struct vmw_resource *res) +{ + struct vmw_surface *srf = container_of(res, struct vmw_surface, res); + + list_del_init(&srf->lru_head); +} + +int vmw_surface_init(struct vmw_private *dev_priv, + struct vmw_surface *srf, + void (*res_free) (struct vmw_resource *res)) +{ + int ret; + struct vmw_resource *res = &srf->res; + + BUG_ON(res_free == NULL); + INIT_LIST_HEAD(&srf->lru_head); + ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr, + VMW_RES_SURFACE, true, res_free, + vmw_surface_remove_from_lists); + + if (unlikely(ret != 0)) + res_free(res); + + /* + * The surface won't be visible to hardware until a + * surface validate. + */ + + (void) vmw_3d_resource_inc(dev_priv, false); + vmw_resource_activate(res, vmw_hw_surface_destroy); + return ret; +} + +static void vmw_user_surface_free(struct vmw_resource *res) +{ + struct vmw_surface *srf = container_of(res, struct vmw_surface, res); + struct vmw_user_surface *user_srf = + container_of(srf, struct vmw_user_surface, srf); + struct vmw_private *dev_priv = srf->res.dev_priv; + uint32_t size = user_srf->size; + + if (srf->backup) + ttm_bo_unref(&srf->backup); + kfree(srf->offsets); + kfree(srf->sizes); + kfree(srf->snooper.image); + kfree(user_srf); + ttm_mem_global_free(vmw_mem_glob(dev_priv), size); +} + +/** + * vmw_resource_unreserve - unreserve resources previously reserved for + * command submission. + * + * @list_head: list of resources to unreserve. + * + * Currently only surfaces are considered, and unreserving a surface + * means putting it back on the device's surface lru list, + * so that it can be evicted if necessary. + * This function traverses the resource list and + * checks whether resources are surfaces, and in that case puts them back + * on the device's surface LRU list. + */ +void vmw_resource_unreserve(struct list_head *list) +{ + struct vmw_resource *res; + struct vmw_surface *srf; + rwlock_t *lock = NULL; + + list_for_each_entry(res, list, validate_head) { + + if (res->res_free != &vmw_surface_res_free && + res->res_free != &vmw_user_surface_free) + continue; + + if (unlikely(lock == NULL)) { + lock = &res->dev_priv->resource_lock; + write_lock(lock); + } + + srf = container_of(res, struct vmw_surface, res); + list_del_init(&srf->lru_head); + list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru); + } + + if (lock != NULL) + write_unlock(lock); +} + +/** + * Helper function that looks either a surface or dmabuf. + * + * The pointer this pointed at by out_surf and out_buf needs to be null. + */ +int vmw_user_lookup_handle(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t handle, + struct vmw_surface **out_surf, + struct vmw_dma_buffer **out_buf) +{ + int ret; + + BUG_ON(*out_surf || *out_buf); - vmw_dmabuf_unreference(&dma_buf); + ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf); + if (!ret) + return 0; + + ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); + return ret; +} + + +int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t handle, struct vmw_surface **out) +{ + struct vmw_resource *res; + struct vmw_surface *srf; + struct vmw_user_surface *user_srf; + struct ttm_base_object *base; + int ret = -EINVAL; + + base = ttm_base_object_lookup(tfile, handle); + if (unlikely(base == NULL)) + return -EINVAL; + + if (unlikely(base->object_type != VMW_RES_SURFACE)) + goto out_bad_resource; + + user_srf = container_of(base, struct vmw_user_surface, base); + srf = &user_srf->srf; + res = &srf->res; + + read_lock(&dev_priv->resource_lock); + + if (!res->avail || res->res_free != &vmw_user_surface_free) { + read_unlock(&dev_priv->resource_lock); + goto out_bad_resource; + } + + kref_get(&res->kref); + read_unlock(&dev_priv->resource_lock); + + *out = srf; + ret = 0; + +out_bad_resource: + ttm_base_object_unref(&base); + + return ret; +} + +static void vmw_user_surface_base_release(struct ttm_base_object **p_base) +{ + struct ttm_base_object *base = *p_base; + struct vmw_user_surface *user_srf = + container_of(base, struct vmw_user_surface, base); + struct vmw_resource *res = &user_srf->srf.res; + + *p_base = NULL; + vmw_resource_unreference(&res); +} + +int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + + return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); +} + +int vmw_surface_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_user_surface *user_srf; + struct vmw_surface *srf; + struct vmw_resource *res; + struct vmw_resource *tmp; + union drm_vmw_surface_create_arg *arg = + (union drm_vmw_surface_create_arg *)data; + struct drm_vmw_surface_create_req *req = &arg->req; + struct drm_vmw_surface_arg *rep = &arg->rep; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct drm_vmw_size __user *user_sizes; + int ret; + int i, j; + uint32_t cur_bo_offset; + struct drm_vmw_size *cur_size; + struct vmw_surface_offset *cur_offset; + uint32_t stride_bpp; + uint32_t bpp; + uint32_t num_sizes; + uint32_t size; + struct vmw_master *vmaster = vmw_master(file_priv->master); + + if (unlikely(vmw_user_surface_size == 0)) + vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + + 128; + + num_sizes = 0; + for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) + num_sizes += req->mip_levels[i]; + + if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * + DRM_VMW_MAX_MIP_LEVELS) + return -EINVAL; + + size = vmw_user_surface_size + 128 + + ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + + ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); + + + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + return ret; + + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), + size, false, true); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Out of graphics memory for surface" + " creation.\n"); + goto out_unlock; + } + + user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL); + if (unlikely(user_srf == NULL)) { + ret = -ENOMEM; + goto out_no_user_srf; + } + + srf = &user_srf->srf; + res = &srf->res; + + srf->flags = req->flags; + srf->format = req->format; + srf->scanout = req->scanout; + srf->backup = NULL; + + memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); + srf->num_sizes = num_sizes; + user_srf->size = size; + + srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); + if (unlikely(srf->sizes == NULL)) { + ret = -ENOMEM; + goto out_no_sizes; + } + srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), + GFP_KERNEL); + if (unlikely(srf->sizes == NULL)) { + ret = -ENOMEM; + goto out_no_offsets; + } + + user_sizes = (struct drm_vmw_size __user *)(unsigned long) + req->size_addr; + + ret = copy_from_user(srf->sizes, user_sizes, + srf->num_sizes * sizeof(*srf->sizes)); + if (unlikely(ret != 0)) { + ret = -EFAULT; + goto out_no_copy; + } + + cur_bo_offset = 0; + cur_offset = srf->offsets; + cur_size = srf->sizes; + + bpp = vmw_sf_bpp[srf->format].bpp; + stride_bpp = vmw_sf_bpp[srf->format].s_bpp; + + for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { + for (j = 0; j < srf->mip_levels[i]; ++j) { + uint32_t stride = + (cur_size->width * stride_bpp + 7) >> 3; + + cur_offset->face = i; + cur_offset->mip = j; + cur_offset->bo_offset = cur_bo_offset; + cur_bo_offset += stride * cur_size->height * + cur_size->depth * bpp / stride_bpp; + ++cur_offset; + ++cur_size; + } + } + srf->backup_size = cur_bo_offset; + + if (srf->scanout && + srf->num_sizes == 1 && + srf->sizes[0].width == 64 && + srf->sizes[0].height == 64 && + srf->format == SVGA3D_A8R8G8B8) { + + /* allocate image area and clear it */ + srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL); + if (!srf->snooper.image) { + DRM_ERROR("Failed to allocate cursor_image\n"); + ret = -ENOMEM; + goto out_no_copy; + } + } else { + srf->snooper.image = NULL; + } + srf->snooper.crtc = NULL; + + user_srf->base.shareable = false; + user_srf->base.tfile = NULL; + + /** + * From this point, the generic resource management functions + * destroy the object on failure. + */ + + ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); + if (unlikely(ret != 0)) + goto out_unlock; + + tmp = vmw_resource_reference(&srf->res); + ret = ttm_base_object_init(tfile, &user_srf->base, + req->shareable, VMW_RES_SURFACE, + &vmw_user_surface_base_release, NULL); + + if (unlikely(ret != 0)) { + vmw_resource_unreference(&tmp); + vmw_resource_unreference(&res); + goto out_unlock; + } + + rep->sid = user_srf->base.hash.key; + if (rep->sid == SVGA3D_INVALID_ID) + DRM_ERROR("Created bad Surface ID.\n"); + + vmw_resource_unreference(&res); + + ttm_read_unlock(&vmaster->lock); + return 0; +out_no_copy: + kfree(srf->offsets); +out_no_offsets: + kfree(srf->sizes); +out_no_sizes: + kfree(user_srf); +out_no_user_srf: + ttm_mem_global_free(vmw_mem_glob(dev_priv), size); +out_unlock: + ttm_read_unlock(&vmaster->lock); + return ret; +} + +int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + union drm_vmw_surface_reference_arg *arg = + (union drm_vmw_surface_reference_arg *)data; + struct drm_vmw_surface_arg *req = &arg->req; + struct drm_vmw_surface_create_req *rep = &arg->rep; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_surface *srf; + struct vmw_user_surface *user_srf; + struct drm_vmw_size __user *user_sizes; + struct ttm_base_object *base; + int ret = -EINVAL; + + base = ttm_base_object_lookup(tfile, req->sid); + if (unlikely(base == NULL)) { + DRM_ERROR("Could not find surface to reference.\n"); + return -EINVAL; + } + + if (unlikely(base->object_type != VMW_RES_SURFACE)) + goto out_bad_resource; + + user_srf = container_of(base, struct vmw_user_surface, base); + srf = &user_srf->srf; + + ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); + if (unlikely(ret != 0)) { + DRM_ERROR("Could not add a reference to a surface.\n"); + goto out_no_reference; + } + + rep->flags = srf->flags; + rep->format = srf->format; + memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); + user_sizes = (struct drm_vmw_size __user *)(unsigned long) + rep->size_addr; + + if (user_sizes) + ret = copy_to_user(user_sizes, srf->sizes, + srf->num_sizes * sizeof(*srf->sizes)); + if (unlikely(ret != 0)) { + DRM_ERROR("copy_to_user failed %p %u\n", + user_sizes, srf->num_sizes); + ret = -EFAULT; + } +out_bad_resource: +out_no_reference: + ttm_base_object_unref(&base); + + return ret; +} + +int vmw_surface_check(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t handle, int *id) +{ + struct ttm_base_object *base; + struct vmw_user_surface *user_srf; + + int ret = -EPERM; + + base = ttm_base_object_lookup(tfile, handle); + if (unlikely(base == NULL)) + return -EINVAL; + + if (unlikely(base->object_type != VMW_RES_SURFACE)) + goto out_bad_surface; + + user_srf = container_of(base, struct vmw_user_surface, base); + *id = user_srf->srf.res.id; + ret = 0; + +out_bad_surface: + /** + * FIXME: May deadlock here when called from the + * command parsing code. + */ + + ttm_base_object_unref(&base); + return ret; +} + +/** + * Buffer management. + */ +void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) +{ + struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); + + kfree(vmw_bo); +} + +int vmw_dmabuf_init(struct vmw_private *dev_priv, + struct vmw_dma_buffer *vmw_bo, + size_t size, struct ttm_placement *placement, + bool interruptible, + void (*bo_free) (struct ttm_buffer_object *bo)) +{ + struct ttm_bo_device *bdev = &dev_priv->bdev; + size_t acc_size; + int ret; + + BUG_ON(!bo_free); + + acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); + memset(vmw_bo, 0, sizeof(*vmw_bo)); + + INIT_LIST_HEAD(&vmw_bo->validate_list); + + ret = ttm_bo_init(bdev, &vmw_bo->base, size, + ttm_bo_type_device, placement, + 0, 0, interruptible, + NULL, acc_size, NULL, bo_free); + return ret; +} + +static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) +{ + struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); + + kfree(vmw_user_bo); +} + +static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) +{ + struct vmw_user_dma_buffer *vmw_user_bo; + struct ttm_base_object *base = *p_base; + struct ttm_buffer_object *bo; + + *p_base = NULL; + + if (unlikely(base == NULL)) + return; + + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); + bo = &vmw_user_bo->dma.base; + ttm_bo_unref(&bo); +} + +int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + union drm_vmw_alloc_dmabuf_arg *arg = + (union drm_vmw_alloc_dmabuf_arg *)data; + struct drm_vmw_alloc_dmabuf_req *req = &arg->req; + struct drm_vmw_dmabuf_rep *rep = &arg->rep; + struct vmw_user_dma_buffer *vmw_user_bo; + struct ttm_buffer_object *tmp; + struct vmw_master *vmaster = vmw_master(file_priv->master); + int ret; + + vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); + if (unlikely(vmw_user_bo == NULL)) + return -ENOMEM; + + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) { + kfree(vmw_user_bo); + return ret; + } + + ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, + &vmw_vram_sys_placement, true, + &vmw_user_dmabuf_destroy); + if (unlikely(ret != 0)) + goto out_no_dmabuf; + + tmp = ttm_bo_reference(&vmw_user_bo->dma.base); + ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, + &vmw_user_bo->base, + false, + ttm_buffer_type, + &vmw_user_dmabuf_release, NULL); + if (unlikely(ret != 0)) + goto out_no_base_object; + else { + rep->handle = vmw_user_bo->base.hash.key; + rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; + rep->cur_gmr_id = vmw_user_bo->base.hash.key; + rep->cur_gmr_offset = 0; + } +out_no_base_object: + ttm_bo_unref(&tmp); out_no_dmabuf: ttm_read_unlock(&vmaster->lock); @@ -524,6 +1657,27 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, TTM_REF_USAGE); } +uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, + uint32_t cur_validate_node) +{ + struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); + + if (likely(vmw_bo->on_validate_list)) + return vmw_bo->cur_validate_node; + + vmw_bo->cur_validate_node = cur_validate_node; + vmw_bo->on_validate_list = true; + + return cur_validate_node; +} + +void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo) +{ + struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); + + vmw_bo->on_validate_list = false; +} + int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, uint32_t handle, struct vmw_dma_buffer **out) { @@ -552,18 +1706,6 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, return 0; } -int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, - struct vmw_dma_buffer *dma_buf) -{ - struct vmw_user_dma_buffer *user_bo; - - if (dma_buf->base.destroy != vmw_user_dmabuf_destroy) - return -EINVAL; - - user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); - return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); -} - /* * Stream management */ @@ -588,8 +1730,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv, struct vmw_resource *res = &stream->res; int ret; - ret = vmw_resource_init(dev_priv, res, false, res_free, - &vmw_stream_func); + ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, + VMW_RES_STREAM, false, res_free, NULL); if (unlikely(ret != 0)) { if (res_free == NULL) @@ -611,13 +1753,17 @@ static int vmw_stream_init(struct vmw_private *dev_priv, return 0; } +/** + * User-space context management: + */ + static void vmw_user_stream_free(struct vmw_resource *res) { struct vmw_user_stream *stream = container_of(res, struct vmw_user_stream, stream.res); struct vmw_private *dev_priv = res->dev_priv; - ttm_base_object_kfree(stream, base); + kfree(stream); ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_user_stream_size); } @@ -646,11 +1792,9 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, struct vmw_user_stream *stream; struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct idr *idr = &dev_priv->res_idr[vmw_res_stream]; int ret = 0; - - res = vmw_resource_lookup(dev_priv, idr, arg->stream_id); + res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id); if (unlikely(res == NULL)) return -EINVAL; @@ -751,8 +1895,7 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv, struct vmw_resource *res; int ret; - res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream], - *inout_id); + res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id); if (unlikely(res == NULL)) return -EINVAL; @@ -847,453 +1990,3 @@ int vmw_dumb_destroy(struct drm_file *file_priv, return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, handle, TTM_REF_USAGE); } - -/** - * vmw_resource_buf_alloc - Allocate a backup buffer for a resource. - * - * @res: The resource for which to allocate a backup buffer. - * @interruptible: Whether any sleeps during allocation should be - * performed while interruptible. - */ -static int vmw_resource_buf_alloc(struct vmw_resource *res, - bool interruptible) -{ - unsigned long size = - (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; - struct vmw_dma_buffer *backup; - int ret; - - if (likely(res->backup)) { - BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size); - return 0; - } - - backup = kzalloc(sizeof(*backup), GFP_KERNEL); - if (unlikely(backup == NULL)) - return -ENOMEM; - - ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, - res->func->backup_placement, - interruptible, - &vmw_dmabuf_bo_free); - if (unlikely(ret != 0)) - goto out_no_dmabuf; - - res->backup = backup; - -out_no_dmabuf: - return ret; -} - -/** - * vmw_resource_do_validate - Make a resource up-to-date and visible - * to the device. - * - * @res: The resource to make visible to the device. - * @val_buf: Information about a buffer possibly - * containing backup data if a bind operation is needed. - * - * On hardware resource shortage, this function returns -EBUSY and - * should be retried once resources have been freed up. - */ -static int vmw_resource_do_validate(struct vmw_resource *res, - struct ttm_validate_buffer *val_buf) -{ - int ret = 0; - const struct vmw_res_func *func = res->func; - - if (unlikely(res->id == -1)) { - ret = func->create(res); - if (unlikely(ret != 0)) - return ret; - } - - if (func->bind && - ((func->needs_backup && list_empty(&res->mob_head) && - val_buf->bo != NULL) || - (!func->needs_backup && val_buf->bo != NULL))) { - ret = func->bind(res, val_buf); - if (unlikely(ret != 0)) - goto out_bind_failed; - if (func->needs_backup) - list_add_tail(&res->mob_head, &res->backup->res_list); - } - - /* - * Only do this on write operations, and move to - * vmw_resource_unreserve if it can be called after - * backup buffers have been unreserved. Otherwise - * sort out locking. - */ - res->res_dirty = true; - - return 0; - -out_bind_failed: - func->destroy(res); - - return ret; -} - -/** - * vmw_resource_unreserve - Unreserve a resource previously reserved for - * command submission. - * - * @res: Pointer to the struct vmw_resource to unreserve. - * @new_backup: Pointer to new backup buffer if command submission - * switched. - * @new_backup_offset: New backup offset if @new_backup is !NULL. - * - * Currently unreserving a resource means putting it back on the device's - * resource lru list, so that it can be evicted if necessary. - */ -void vmw_resource_unreserve(struct vmw_resource *res, - struct vmw_dma_buffer *new_backup, - unsigned long new_backup_offset) -{ - struct vmw_private *dev_priv = res->dev_priv; - - if (!list_empty(&res->lru_head)) - return; - - if (new_backup && new_backup != res->backup) { - - if (res->backup) { - BUG_ON(atomic_read(&res->backup->base.reserved) == 0); - list_del_init(&res->mob_head); - vmw_dmabuf_unreference(&res->backup); - } - - res->backup = vmw_dmabuf_reference(new_backup); - BUG_ON(atomic_read(&new_backup->base.reserved) == 0); - list_add_tail(&res->mob_head, &new_backup->res_list); - } - if (new_backup) - res->backup_offset = new_backup_offset; - - if (!res->func->may_evict) - return; - - write_lock(&dev_priv->resource_lock); - list_add_tail(&res->lru_head, - &res->dev_priv->res_lru[res->func->res_type]); - write_unlock(&dev_priv->resource_lock); -} - -/** - * vmw_resource_check_buffer - Check whether a backup buffer is needed - * for a resource and in that case, allocate - * one, reserve and validate it. - * - * @res: The resource for which to allocate a backup buffer. - * @interruptible: Whether any sleeps during allocation should be - * performed while interruptible. - * @val_buf: On successful return contains data about the - * reserved and validated backup buffer. - */ -int vmw_resource_check_buffer(struct vmw_resource *res, - bool interruptible, - struct ttm_validate_buffer *val_buf) -{ - struct list_head val_list; - bool backup_dirty = false; - int ret; - - if (unlikely(res->backup == NULL)) { - ret = vmw_resource_buf_alloc(res, interruptible); - if (unlikely(ret != 0)) - return ret; - } - - INIT_LIST_HEAD(&val_list); - val_buf->bo = ttm_bo_reference(&res->backup->base); - list_add_tail(&val_buf->head, &val_list); - ret = ttm_eu_reserve_buffers(&val_list); - if (unlikely(ret != 0)) - goto out_no_reserve; - - if (res->func->needs_backup && list_empty(&res->mob_head)) - return 0; - - backup_dirty = res->backup_dirty; - ret = ttm_bo_validate(&res->backup->base, - res->func->backup_placement, - true, false); - - if (unlikely(ret != 0)) - goto out_no_validate; - - return 0; - -out_no_validate: - ttm_eu_backoff_reservation(&val_list); -out_no_reserve: - ttm_bo_unref(&val_buf->bo); - if (backup_dirty) - vmw_dmabuf_unreference(&res->backup); - - return ret; -} - -/** - * vmw_resource_reserve - Reserve a resource for command submission - * - * @res: The resource to reserve. - * - * This function takes the resource off the LRU list and make sure - * a backup buffer is present for guest-backed resources. However, - * the buffer may not be bound to the resource at this point. - * - */ -int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) -{ - struct vmw_private *dev_priv = res->dev_priv; - int ret; - - write_lock(&dev_priv->resource_lock); - list_del_init(&res->lru_head); - write_unlock(&dev_priv->resource_lock); - - if (res->func->needs_backup && res->backup == NULL && - !no_backup) { - ret = vmw_resource_buf_alloc(res, true); - if (unlikely(ret != 0)) - return ret; - } - - return 0; -} - -/** - * vmw_resource_backoff_reservation - Unreserve and unreference a - * backup buffer - *. - * @val_buf: Backup buffer information. - */ -void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) -{ - struct list_head val_list; - - if (likely(val_buf->bo == NULL)) - return; - - INIT_LIST_HEAD(&val_list); - list_add_tail(&val_buf->head, &val_list); - ttm_eu_backoff_reservation(&val_list); - ttm_bo_unref(&val_buf->bo); -} - -/** - * vmw_resource_do_evict - Evict a resource, and transfer its data - * to a backup buffer. - * - * @res: The resource to evict. - */ -int vmw_resource_do_evict(struct vmw_resource *res) -{ - struct ttm_validate_buffer val_buf; - const struct vmw_res_func *func = res->func; - int ret; - - BUG_ON(!func->may_evict); - - val_buf.bo = NULL; - ret = vmw_resource_check_buffer(res, true, &val_buf); - if (unlikely(ret != 0)) - return ret; - - if (unlikely(func->unbind != NULL && - (!func->needs_backup || !list_empty(&res->mob_head)))) { - ret = func->unbind(res, res->res_dirty, &val_buf); - if (unlikely(ret != 0)) - goto out_no_unbind; - list_del_init(&res->mob_head); - } - ret = func->destroy(res); - res->backup_dirty = true; - res->res_dirty = false; -out_no_unbind: - vmw_resource_backoff_reservation(&val_buf); - - return ret; -} - - -/** - * vmw_resource_validate - Make a resource up-to-date and visible - * to the device. - * - * @res: The resource to make visible to the device. - * - * On succesful return, any backup DMA buffer pointed to by @res->backup will - * be reserved and validated. - * On hardware resource shortage, this function will repeatedly evict - * resources of the same type until the validation succeeds. - */ -int vmw_resource_validate(struct vmw_resource *res) -{ - int ret; - struct vmw_resource *evict_res; - struct vmw_private *dev_priv = res->dev_priv; - struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; - struct ttm_validate_buffer val_buf; - - if (likely(!res->func->may_evict)) - return 0; - - val_buf.bo = NULL; - if (res->backup) - val_buf.bo = &res->backup->base; - do { - ret = vmw_resource_do_validate(res, &val_buf); - if (likely(ret != -EBUSY)) - break; - - write_lock(&dev_priv->resource_lock); - if (list_empty(lru_list) || !res->func->may_evict) { - DRM_ERROR("Out of device device id entries " - "for %s.\n", res->func->type_name); - ret = -EBUSY; - write_unlock(&dev_priv->resource_lock); - break; - } - - evict_res = vmw_resource_reference - (list_first_entry(lru_list, struct vmw_resource, - lru_head)); - list_del_init(&evict_res->lru_head); - - write_unlock(&dev_priv->resource_lock); - vmw_resource_do_evict(evict_res); - vmw_resource_unreference(&evict_res); - } while (1); - - if (unlikely(ret != 0)) - goto out_no_validate; - else if (!res->func->needs_backup && res->backup) { - list_del_init(&res->mob_head); - vmw_dmabuf_unreference(&res->backup); - } - - return 0; - -out_no_validate: - return ret; -} - -/** - * vmw_fence_single_bo - Utility function to fence a single TTM buffer - * object without unreserving it. - * - * @bo: Pointer to the struct ttm_buffer_object to fence. - * @fence: Pointer to the fence. If NULL, this function will - * insert a fence into the command stream.. - * - * Contrary to the ttm_eu version of this function, it takes only - * a single buffer object instead of a list, and it also doesn't - * unreserve the buffer object, which needs to be done separately. - */ -void vmw_fence_single_bo(struct ttm_buffer_object *bo, - struct vmw_fence_obj *fence) -{ - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_driver *driver = bdev->driver; - struct vmw_fence_obj *old_fence_obj; - struct vmw_private *dev_priv = - container_of(bdev, struct vmw_private, bdev); - - if (fence == NULL) - vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - else - driver->sync_obj_ref(fence); - - spin_lock(&bdev->fence_lock); - - old_fence_obj = bo->sync_obj; - bo->sync_obj = fence; - - spin_unlock(&bdev->fence_lock); - - if (old_fence_obj) - vmw_fence_obj_unreference(&old_fence_obj); -} - -/** - * vmw_resource_move_notify - TTM move_notify_callback - * - * @bo: The TTM buffer object about to move. - * @mem: The truct ttm_mem_reg indicating to what memory - * region the move is taking place. - * - * For now does nothing. - */ -void vmw_resource_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) -{ -} - -/** - * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. - * - * @res: The resource being queried. - */ -bool vmw_resource_needs_backup(const struct vmw_resource *res) -{ - return res->func->needs_backup; -} - -/** - * vmw_resource_evict_type - Evict all resources of a specific type - * - * @dev_priv: Pointer to a device private struct - * @type: The resource type to evict - * - * To avoid thrashing starvation or as part of the hibernation sequence, - * evict all evictable resources of a specific type. - */ -static void vmw_resource_evict_type(struct vmw_private *dev_priv, - enum vmw_res_type type) -{ - struct list_head *lru_list = &dev_priv->res_lru[type]; - struct vmw_resource *evict_res; - - do { - write_lock(&dev_priv->resource_lock); - - if (list_empty(lru_list)) - goto out_unlock; - - evict_res = vmw_resource_reference( - list_first_entry(lru_list, struct vmw_resource, - lru_head)); - list_del_init(&evict_res->lru_head); - write_unlock(&dev_priv->resource_lock); - vmw_resource_do_evict(evict_res); - vmw_resource_unreference(&evict_res); - } while (1); - -out_unlock: - write_unlock(&dev_priv->resource_lock); -} - -/** - * vmw_resource_evict_all - Evict all evictable resources - * - * @dev_priv: Pointer to a device private struct - * - * To avoid thrashing starvation or as part of the hibernation sequence, - * evict all evictable resources. In particular this means that all - * guest-backed resources that are registered with the device are - * evicted and the OTable becomes clean. - */ -void vmw_resource_evict_all(struct vmw_private *dev_priv) -{ - enum vmw_res_type type; - - mutex_lock(&dev_priv->cmdbuf_mutex); - - for (type = 0; type < vmw_res_max; ++type) - vmw_resource_evict_type(dev_priv, type); - - mutex_unlock(&dev_priv->cmdbuf_mutex); -} diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h deleted file mode 100644 index f3adeed2854c..000000000000 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h +++ /dev/null @@ -1,84 +0,0 @@ -/************************************************************************** - * - * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#ifndef _VMWGFX_RESOURCE_PRIV_H_ -#define _VMWGFX_RESOURCE_PRIV_H_ - -#include "vmwgfx_drv.h" - -/** - * struct vmw_user_resource_conv - Identify a derived user-exported resource - * type and provide a function to convert its ttm_base_object pointer to - * a struct vmw_resource - */ -struct vmw_user_resource_conv { - enum ttm_object_type object_type; - struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base); - void (*res_free) (struct vmw_resource *res); -}; - -/** - * struct vmw_res_func - members and functions common for a resource type - * - * @res_type: Enum that identifies the lru list to use for eviction. - * @needs_backup: Whether the resource is guest-backed and needs - * persistent buffer storage. - * @type_name: String that identifies the resource type. - * @backup_placement: TTM placement for backup buffers. - * @may_evict Whether the resource may be evicted. - * @create: Create a hardware resource. - * @destroy: Destroy a hardware resource. - * @bind: Bind a hardware resource to persistent buffer storage. - * @unbind: Unbind a hardware resource from persistent - * buffer storage. - */ - -struct vmw_res_func { - enum vmw_res_type res_type; - bool needs_backup; - const char *type_name; - struct ttm_placement *backup_placement; - bool may_evict; - - int (*create) (struct vmw_resource *res); - int (*destroy) (struct vmw_resource *res); - int (*bind) (struct vmw_resource *res, - struct ttm_validate_buffer *val_buf); - int (*unbind) (struct vmw_resource *res, - bool readback, - struct ttm_validate_buffer *val_buf); -}; - -int vmw_resource_alloc_id(struct vmw_resource *res); -void vmw_resource_release_id(struct vmw_resource *res); -int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, - bool delay_id, - void (*res_free) (struct vmw_resource *res), - const struct vmw_res_func *func); -void vmw_resource_activate(struct vmw_resource *res, - void (*hw_destroy) (struct vmw_resource *)); -#endif diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 26387c3d5a21..6deaf2f8bab1 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -468,7 +468,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) drm_mode_crtc_set_gamma_size(crtc, 256); - drm_object_attach_property(&connector->base, + drm_connector_attach_property(connector, dev->mode_config.dirty_info_property, 1); @@ -485,7 +485,7 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv) return -EINVAL; } - if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) { + if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) { DRM_INFO("Not using screen objects," " missing cap SCREEN_OBJECT_2\n"); return -ENOSYS; diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c deleted file mode 100644 index 582814339748..000000000000 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ /dev/null @@ -1,893 +0,0 @@ -/************************************************************************** - * - * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#include "vmwgfx_drv.h" -#include "vmwgfx_resource_priv.h" -#include -#include "svga3d_surfacedefs.h" - -/** - * struct vmw_user_surface - User-space visible surface resource - * - * @base: The TTM base object handling user-space visibility. - * @srf: The surface metadata. - * @size: TTM accounting size for the surface. - */ -struct vmw_user_surface { - struct ttm_base_object base; - struct vmw_surface srf; - uint32_t size; - uint32_t backup_handle; -}; - -/** - * struct vmw_surface_offset - Backing store mip level offset info - * - * @face: Surface face. - * @mip: Mip level. - * @bo_offset: Offset into backing store of this mip level. - * - */ -struct vmw_surface_offset { - uint32_t face; - uint32_t mip; - uint32_t bo_offset; -}; - -static void vmw_user_surface_free(struct vmw_resource *res); -static struct vmw_resource * -vmw_user_surface_base_to_res(struct ttm_base_object *base); -static int vmw_legacy_srf_bind(struct vmw_resource *res, - struct ttm_validate_buffer *val_buf); -static int vmw_legacy_srf_unbind(struct vmw_resource *res, - bool readback, - struct ttm_validate_buffer *val_buf); -static int vmw_legacy_srf_create(struct vmw_resource *res); -static int vmw_legacy_srf_destroy(struct vmw_resource *res); - -static const struct vmw_user_resource_conv user_surface_conv = { - .object_type = VMW_RES_SURFACE, - .base_obj_to_res = vmw_user_surface_base_to_res, - .res_free = vmw_user_surface_free -}; - -const struct vmw_user_resource_conv *user_surface_converter = - &user_surface_conv; - - -static uint64_t vmw_user_surface_size; - -static const struct vmw_res_func vmw_legacy_surface_func = { - .res_type = vmw_res_surface, - .needs_backup = false, - .may_evict = true, - .type_name = "legacy surfaces", - .backup_placement = &vmw_srf_placement, - .create = &vmw_legacy_srf_create, - .destroy = &vmw_legacy_srf_destroy, - .bind = &vmw_legacy_srf_bind, - .unbind = &vmw_legacy_srf_unbind -}; - -/** - * struct vmw_surface_dma - SVGA3D DMA command - */ -struct vmw_surface_dma { - SVGA3dCmdHeader header; - SVGA3dCmdSurfaceDMA body; - SVGA3dCopyBox cb; - SVGA3dCmdSurfaceDMASuffix suffix; -}; - -/** - * struct vmw_surface_define - SVGA3D Surface Define command - */ -struct vmw_surface_define { - SVGA3dCmdHeader header; - SVGA3dCmdDefineSurface body; -}; - -/** - * struct vmw_surface_destroy - SVGA3D Surface Destroy command - */ -struct vmw_surface_destroy { - SVGA3dCmdHeader header; - SVGA3dCmdDestroySurface body; -}; - - -/** - * vmw_surface_dma_size - Compute fifo size for a dma command. - * - * @srf: Pointer to a struct vmw_surface - * - * Computes the required size for a surface dma command for backup or - * restoration of the surface represented by @srf. - */ -static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf) -{ - return srf->num_sizes * sizeof(struct vmw_surface_dma); -} - - -/** - * vmw_surface_define_size - Compute fifo size for a surface define command. - * - * @srf: Pointer to a struct vmw_surface - * - * Computes the required size for a surface define command for the definition - * of the surface represented by @srf. - */ -static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf) -{ - return sizeof(struct vmw_surface_define) + srf->num_sizes * - sizeof(SVGA3dSize); -} - - -/** - * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. - * - * Computes the required size for a surface destroy command for the destruction - * of a hw surface. - */ -static inline uint32_t vmw_surface_destroy_size(void) -{ - return sizeof(struct vmw_surface_destroy); -} - -/** - * vmw_surface_destroy_encode - Encode a surface_destroy command. - * - * @id: The surface id - * @cmd_space: Pointer to memory area in which the commands should be encoded. - */ -static void vmw_surface_destroy_encode(uint32_t id, - void *cmd_space) -{ - struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *) - cmd_space; - - cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; - cmd->header.size = sizeof(cmd->body); - cmd->body.sid = id; -} - -/** - * vmw_surface_define_encode - Encode a surface_define command. - * - * @srf: Pointer to a struct vmw_surface object. - * @cmd_space: Pointer to memory area in which the commands should be encoded. - */ -static void vmw_surface_define_encode(const struct vmw_surface *srf, - void *cmd_space) -{ - struct vmw_surface_define *cmd = (struct vmw_surface_define *) - cmd_space; - struct drm_vmw_size *src_size; - SVGA3dSize *cmd_size; - uint32_t cmd_len; - int i; - - cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); - - cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; - cmd->header.size = cmd_len; - cmd->body.sid = srf->res.id; - cmd->body.surfaceFlags = srf->flags; - cmd->body.format = cpu_to_le32(srf->format); - for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) - cmd->body.face[i].numMipLevels = srf->mip_levels[i]; - - cmd += 1; - cmd_size = (SVGA3dSize *) cmd; - src_size = srf->sizes; - - for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { - cmd_size->width = src_size->width; - cmd_size->height = src_size->height; - cmd_size->depth = src_size->depth; - } -} - -/** - * vmw_surface_dma_encode - Encode a surface_dma command. - * - * @srf: Pointer to a struct vmw_surface object. - * @cmd_space: Pointer to memory area in which the commands should be encoded. - * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents - * should be placed or read from. - * @to_surface: Boolean whether to DMA to the surface or from the surface. - */ -static void vmw_surface_dma_encode(struct vmw_surface *srf, - void *cmd_space, - const SVGAGuestPtr *ptr, - bool to_surface) -{ - uint32_t i; - struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; - const struct svga3d_surface_desc *desc = - svga3dsurface_get_desc(srf->format); - - for (i = 0; i < srf->num_sizes; ++i) { - SVGA3dCmdHeader *header = &cmd->header; - SVGA3dCmdSurfaceDMA *body = &cmd->body; - SVGA3dCopyBox *cb = &cmd->cb; - SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; - const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; - const struct drm_vmw_size *cur_size = &srf->sizes[i]; - - header->id = SVGA_3D_CMD_SURFACE_DMA; - header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); - - body->guest.ptr = *ptr; - body->guest.ptr.offset += cur_offset->bo_offset; - body->guest.pitch = svga3dsurface_calculate_pitch(desc, - cur_size); - body->host.sid = srf->res.id; - body->host.face = cur_offset->face; - body->host.mipmap = cur_offset->mip; - body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : - SVGA3D_READ_HOST_VRAM); - cb->x = 0; - cb->y = 0; - cb->z = 0; - cb->srcx = 0; - cb->srcy = 0; - cb->srcz = 0; - cb->w = cur_size->width; - cb->h = cur_size->height; - cb->d = cur_size->depth; - - suffix->suffixSize = sizeof(*suffix); - suffix->maximumOffset = - svga3dsurface_get_image_buffer_size(desc, cur_size, - body->guest.pitch); - suffix->flags.discard = 0; - suffix->flags.unsynchronized = 0; - suffix->flags.reserved = 0; - ++cmd; - } -}; - - -/** - * vmw_hw_surface_destroy - destroy a Device surface - * - * @res: Pointer to a struct vmw_resource embedded in a struct - * vmw_surface. - * - * Destroys a the device surface associated with a struct vmw_surface if - * any, and adjusts accounting and resource count accordingly. - */ -static void vmw_hw_surface_destroy(struct vmw_resource *res) -{ - - struct vmw_private *dev_priv = res->dev_priv; - struct vmw_surface *srf; - void *cmd; - - if (res->id != -1) { - - cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed reserving FIFO space for surface " - "destruction.\n"); - return; - } - - vmw_surface_destroy_encode(res->id, cmd); - vmw_fifo_commit(dev_priv, vmw_surface_destroy_size()); - - /* - * used_memory_size_atomic, or separate lock - * to avoid taking dev_priv::cmdbuf_mutex in - * the destroy path. - */ - - mutex_lock(&dev_priv->cmdbuf_mutex); - srf = vmw_res_to_srf(res); - dev_priv->used_memory_size -= res->backup_size; - mutex_unlock(&dev_priv->cmdbuf_mutex); - } - vmw_3d_resource_dec(dev_priv, false); -} - -/** - * vmw_legacy_srf_create - Create a device surface as part of the - * resource validation process. - * - * @res: Pointer to a struct vmw_surface. - * - * If the surface doesn't have a hw id. - * - * Returns -EBUSY if there wasn't sufficient device resources to - * complete the validation. Retry after freeing up resources. - * - * May return other errors if the kernel is out of guest resources. - */ -static int vmw_legacy_srf_create(struct vmw_resource *res) -{ - struct vmw_private *dev_priv = res->dev_priv; - struct vmw_surface *srf; - uint32_t submit_size; - uint8_t *cmd; - int ret; - - if (likely(res->id != -1)) - return 0; - - srf = vmw_res_to_srf(res); - if (unlikely(dev_priv->used_memory_size + res->backup_size >= - dev_priv->memory_size)) - return -EBUSY; - - /* - * Alloc id for the resource. - */ - - ret = vmw_resource_alloc_id(res); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed to allocate a surface id.\n"); - goto out_no_id; - } - - if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) { - ret = -EBUSY; - goto out_no_fifo; - } - - /* - * Encode surface define- commands. - */ - - submit_size = vmw_surface_define_size(srf); - cmd = vmw_fifo_reserve(dev_priv, submit_size); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed reserving FIFO space for surface " - "creation.\n"); - ret = -ENOMEM; - goto out_no_fifo; - } - - vmw_surface_define_encode(srf, cmd); - vmw_fifo_commit(dev_priv, submit_size); - /* - * Surface memory usage accounting. - */ - - dev_priv->used_memory_size += res->backup_size; - return 0; - -out_no_fifo: - vmw_resource_release_id(res); -out_no_id: - return ret; -} - -/** - * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface. - * - * @res: Pointer to a struct vmw_res embedded in a struct - * vmw_surface. - * @val_buf: Pointer to a struct ttm_validate_buffer containing - * information about the backup buffer. - * @bind: Boolean wether to DMA to the surface. - * - * Transfer backup data to or from a legacy surface as part of the - * validation process. - * May return other errors if the kernel is out of guest resources. - * The backup buffer will be fenced or idle upon successful completion, - * and if the surface needs persistent backup storage, the backup buffer - * will also be returned reserved iff @bind is true. - */ -static int vmw_legacy_srf_dma(struct vmw_resource *res, - struct ttm_validate_buffer *val_buf, - bool bind) -{ - SVGAGuestPtr ptr; - struct vmw_fence_obj *fence; - uint32_t submit_size; - struct vmw_surface *srf = vmw_res_to_srf(res); - uint8_t *cmd; - struct vmw_private *dev_priv = res->dev_priv; - - BUG_ON(val_buf->bo == NULL); - - submit_size = vmw_surface_dma_size(srf); - cmd = vmw_fifo_reserve(dev_priv, submit_size); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed reserving FIFO space for surface " - "DMA.\n"); - return -ENOMEM; - } - vmw_bo_get_guest_ptr(val_buf->bo, &ptr); - vmw_surface_dma_encode(srf, cmd, &ptr, bind); - - vmw_fifo_commit(dev_priv, submit_size); - - /* - * Create a fence object and fence the backup buffer. - */ - - (void) vmw_execbuf_fence_commands(NULL, dev_priv, - &fence, NULL); - - vmw_fence_single_bo(val_buf->bo, fence); - - if (likely(fence != NULL)) - vmw_fence_obj_unreference(&fence); - - return 0; -} - -/** - * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the - * surface validation process. - * - * @res: Pointer to a struct vmw_res embedded in a struct - * vmw_surface. - * @val_buf: Pointer to a struct ttm_validate_buffer containing - * information about the backup buffer. - * - * This function will copy backup data to the surface if the - * backup buffer is dirty. - */ -static int vmw_legacy_srf_bind(struct vmw_resource *res, - struct ttm_validate_buffer *val_buf) -{ - if (!res->backup_dirty) - return 0; - - return vmw_legacy_srf_dma(res, val_buf, true); -} - - -/** - * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the - * surface eviction process. - * - * @res: Pointer to a struct vmw_res embedded in a struct - * vmw_surface. - * @val_buf: Pointer to a struct ttm_validate_buffer containing - * information about the backup buffer. - * - * This function will copy backup data from the surface. - */ -static int vmw_legacy_srf_unbind(struct vmw_resource *res, - bool readback, - struct ttm_validate_buffer *val_buf) -{ - if (unlikely(readback)) - return vmw_legacy_srf_dma(res, val_buf, false); - return 0; -} - -/** - * vmw_legacy_srf_destroy - Destroy a device surface as part of a - * resource eviction process. - * - * @res: Pointer to a struct vmw_res embedded in a struct - * vmw_surface. - */ -static int vmw_legacy_srf_destroy(struct vmw_resource *res) -{ - struct vmw_private *dev_priv = res->dev_priv; - uint32_t submit_size; - uint8_t *cmd; - - BUG_ON(res->id == -1); - - /* - * Encode the dma- and surface destroy commands. - */ - - submit_size = vmw_surface_destroy_size(); - cmd = vmw_fifo_reserve(dev_priv, submit_size); - if (unlikely(cmd == NULL)) { - DRM_ERROR("Failed reserving FIFO space for surface " - "eviction.\n"); - return -ENOMEM; - } - - vmw_surface_destroy_encode(res->id, cmd); - vmw_fifo_commit(dev_priv, submit_size); - - /* - * Surface memory usage accounting. - */ - - dev_priv->used_memory_size -= res->backup_size; - - /* - * Release the surface ID. - */ - - vmw_resource_release_id(res); - - return 0; -} - - -/** - * vmw_surface_init - initialize a struct vmw_surface - * - * @dev_priv: Pointer to a device private struct. - * @srf: Pointer to the struct vmw_surface to initialize. - * @res_free: Pointer to a resource destructor used to free - * the object. - */ -static int vmw_surface_init(struct vmw_private *dev_priv, - struct vmw_surface *srf, - void (*res_free) (struct vmw_resource *res)) -{ - int ret; - struct vmw_resource *res = &srf->res; - - BUG_ON(res_free == NULL); - (void) vmw_3d_resource_inc(dev_priv, false); - ret = vmw_resource_init(dev_priv, res, true, res_free, - &vmw_legacy_surface_func); - - if (unlikely(ret != 0)) { - vmw_3d_resource_dec(dev_priv, false); - res_free(res); - return ret; - } - - /* - * The surface won't be visible to hardware until a - * surface validate. - */ - - vmw_resource_activate(res, vmw_hw_surface_destroy); - return ret; -} - -/** - * vmw_user_surface_base_to_res - TTM base object to resource converter for - * user visible surfaces - * - * @base: Pointer to a TTM base object - * - * Returns the struct vmw_resource embedded in a struct vmw_surface - * for the user-visible object identified by the TTM base object @base. - */ -static struct vmw_resource * -vmw_user_surface_base_to_res(struct ttm_base_object *base) -{ - return &(container_of(base, struct vmw_user_surface, base)->srf.res); -} - -/** - * vmw_user_surface_free - User visible surface resource destructor - * - * @res: A struct vmw_resource embedded in a struct vmw_surface. - */ -static void vmw_user_surface_free(struct vmw_resource *res) -{ - struct vmw_surface *srf = vmw_res_to_srf(res); - struct vmw_user_surface *user_srf = - container_of(srf, struct vmw_user_surface, srf); - struct vmw_private *dev_priv = srf->res.dev_priv; - uint32_t size = user_srf->size; - - kfree(srf->offsets); - kfree(srf->sizes); - kfree(srf->snooper.image); - ttm_base_object_kfree(user_srf, base); - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); -} - -/** - * vmw_user_surface_free - User visible surface TTM base object destructor - * - * @p_base: Pointer to a pointer to a TTM base object - * embedded in a struct vmw_user_surface. - * - * Drops the base object's reference on its resource, and the - * pointer pointed to by *p_base is set to NULL. - */ -static void vmw_user_surface_base_release(struct ttm_base_object **p_base) -{ - struct ttm_base_object *base = *p_base; - struct vmw_user_surface *user_srf = - container_of(base, struct vmw_user_surface, base); - struct vmw_resource *res = &user_srf->srf.res; - - *p_base = NULL; - vmw_resource_unreference(&res); -} - -/** - * vmw_user_surface_destroy_ioctl - Ioctl function implementing - * the user surface destroy functionality. - * - * @dev: Pointer to a struct drm_device. - * @data: Pointer to data copied from / to user-space. - * @file_priv: Pointer to a drm file private structure. - */ -int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - - return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); -} - -/** - * vmw_user_surface_define_ioctl - Ioctl function implementing - * the user surface define functionality. - * - * @dev: Pointer to a struct drm_device. - * @data: Pointer to data copied from / to user-space. - * @file_priv: Pointer to a drm file private structure. - */ -int vmw_surface_define_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct vmw_private *dev_priv = vmw_priv(dev); - struct vmw_user_surface *user_srf; - struct vmw_surface *srf; - struct vmw_resource *res; - struct vmw_resource *tmp; - union drm_vmw_surface_create_arg *arg = - (union drm_vmw_surface_create_arg *)data; - struct drm_vmw_surface_create_req *req = &arg->req; - struct drm_vmw_surface_arg *rep = &arg->rep; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct drm_vmw_size __user *user_sizes; - int ret; - int i, j; - uint32_t cur_bo_offset; - struct drm_vmw_size *cur_size; - struct vmw_surface_offset *cur_offset; - uint32_t num_sizes; - uint32_t size; - struct vmw_master *vmaster = vmw_master(file_priv->master); - const struct svga3d_surface_desc *desc; - - if (unlikely(vmw_user_surface_size == 0)) - vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + - 128; - - num_sizes = 0; - for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) - num_sizes += req->mip_levels[i]; - - if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * - DRM_VMW_MAX_MIP_LEVELS) - return -EINVAL; - - size = vmw_user_surface_size + 128 + - ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + - ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); - - - desc = svga3dsurface_get_desc(req->format); - if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { - DRM_ERROR("Invalid surface format for surface creation.\n"); - return -EINVAL; - } - - ret = ttm_read_lock(&vmaster->lock, true); - if (unlikely(ret != 0)) - return ret; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - size, false, true); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for surface" - " creation.\n"); - goto out_unlock; - } - - user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); - if (unlikely(user_srf == NULL)) { - ret = -ENOMEM; - goto out_no_user_srf; - } - - srf = &user_srf->srf; - res = &srf->res; - - srf->flags = req->flags; - srf->format = req->format; - srf->scanout = req->scanout; - - memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); - srf->num_sizes = num_sizes; - user_srf->size = size; - - srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); - if (unlikely(srf->sizes == NULL)) { - ret = -ENOMEM; - goto out_no_sizes; - } - srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), - GFP_KERNEL); - if (unlikely(srf->sizes == NULL)) { - ret = -ENOMEM; - goto out_no_offsets; - } - - user_sizes = (struct drm_vmw_size __user *)(unsigned long) - req->size_addr; - - ret = copy_from_user(srf->sizes, user_sizes, - srf->num_sizes * sizeof(*srf->sizes)); - if (unlikely(ret != 0)) { - ret = -EFAULT; - goto out_no_copy; - } - - srf->base_size = *srf->sizes; - srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; - srf->multisample_count = 1; - - cur_bo_offset = 0; - cur_offset = srf->offsets; - cur_size = srf->sizes; - - for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { - for (j = 0; j < srf->mip_levels[i]; ++j) { - uint32_t stride = svga3dsurface_calculate_pitch - (desc, cur_size); - - cur_offset->face = i; - cur_offset->mip = j; - cur_offset->bo_offset = cur_bo_offset; - cur_bo_offset += svga3dsurface_get_image_buffer_size - (desc, cur_size, stride); - ++cur_offset; - ++cur_size; - } - } - res->backup_size = cur_bo_offset; - if (srf->scanout && - srf->num_sizes == 1 && - srf->sizes[0].width == 64 && - srf->sizes[0].height == 64 && - srf->format == SVGA3D_A8R8G8B8) { - - srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); - /* clear the image */ - if (srf->snooper.image) { - memset(srf->snooper.image, 0x00, 64 * 64 * 4); - } else { - DRM_ERROR("Failed to allocate cursor_image\n"); - ret = -ENOMEM; - goto out_no_copy; - } - } else { - srf->snooper.image = NULL; - } - srf->snooper.crtc = NULL; - - user_srf->base.shareable = false; - user_srf->base.tfile = NULL; - - /** - * From this point, the generic resource management functions - * destroy the object on failure. - */ - - ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); - if (unlikely(ret != 0)) - goto out_unlock; - - tmp = vmw_resource_reference(&srf->res); - ret = ttm_base_object_init(tfile, &user_srf->base, - req->shareable, VMW_RES_SURFACE, - &vmw_user_surface_base_release, NULL); - - if (unlikely(ret != 0)) { - vmw_resource_unreference(&tmp); - vmw_resource_unreference(&res); - goto out_unlock; - } - - rep->sid = user_srf->base.hash.key; - vmw_resource_unreference(&res); - - ttm_read_unlock(&vmaster->lock); - return 0; -out_no_copy: - kfree(srf->offsets); -out_no_offsets: - kfree(srf->sizes); -out_no_sizes: - ttm_base_object_kfree(user_srf, base); -out_no_user_srf: - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); -out_unlock: - ttm_read_unlock(&vmaster->lock); - return ret; -} - -/** - * vmw_user_surface_define_ioctl - Ioctl function implementing - * the user surface reference functionality. - * - * @dev: Pointer to a struct drm_device. - * @data: Pointer to data copied from / to user-space. - * @file_priv: Pointer to a drm file private structure. - */ -int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - union drm_vmw_surface_reference_arg *arg = - (union drm_vmw_surface_reference_arg *)data; - struct drm_vmw_surface_arg *req = &arg->req; - struct drm_vmw_surface_create_req *rep = &arg->rep; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct vmw_surface *srf; - struct vmw_user_surface *user_srf; - struct drm_vmw_size __user *user_sizes; - struct ttm_base_object *base; - int ret = -EINVAL; - - base = ttm_base_object_lookup(tfile, req->sid); - if (unlikely(base == NULL)) { - DRM_ERROR("Could not find surface to reference.\n"); - return -EINVAL; - } - - if (unlikely(base->object_type != VMW_RES_SURFACE)) - goto out_bad_resource; - - user_srf = container_of(base, struct vmw_user_surface, base); - srf = &user_srf->srf; - - ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); - if (unlikely(ret != 0)) { - DRM_ERROR("Could not add a reference to a surface.\n"); - goto out_no_reference; - } - - rep->flags = srf->flags; - rep->format = srf->format; - memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); - user_sizes = (struct drm_vmw_size __user *)(unsigned long) - rep->size_addr; - - if (user_sizes) - ret = copy_to_user(user_sizes, srf->sizes, - srf->num_sizes * sizeof(*srf->sizes)); - if (unlikely(ret != 0)) { - DRM_ERROR("copy_to_user failed %p %u\n", - user_sizes, srf->num_sizes); - ret = -EFAULT; - } -out_bad_resource: -out_no_reference: - ttm_base_object_unref(&base); - - return ret; -} diff --git a/trunk/drivers/gpu/vga/vga_switcheroo.c b/trunk/drivers/gpu/vga/vga_switcheroo.c index fa60add0ff63..e25cf31faab2 100644 --- a/trunk/drivers/gpu/vga/vga_switcheroo.c +++ b/trunk/drivers/gpu/vga/vga_switcheroo.c @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -375,6 +376,7 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char usercmd[64]; + const char *pdev_name; int ret; bool delay = false, can_switch; bool just_mux = false; @@ -466,6 +468,7 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, goto out; if (can_switch) { + pdev_name = pci_name(client->pdev); ret = vga_switchto_stage1(client); if (ret) printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret); @@ -537,6 +540,7 @@ static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv) int vga_switcheroo_process_delayed_switch(void) { struct vga_switcheroo_client *client; + const char *pdev_name; int ret; int err = -EINVAL; @@ -551,6 +555,7 @@ int vga_switcheroo_process_delayed_switch(void) if (!client || !check_can_switch()) goto err; + pdev_name = pci_name(client->pdev); ret = vga_switchto_stage2(client); if (ret) printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret); @@ -562,3 +567,4 @@ int vga_switcheroo_process_delayed_switch(void) return err; } EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); + diff --git a/trunk/include/drm/drmP.h b/trunk/include/drm/drmP.h index fad21c927a38..3fd82809b2d4 100644 --- a/trunk/include/drm/drmP.h +++ b/trunk/include/drm/drmP.h @@ -1431,8 +1431,6 @@ extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); extern u32 drm_vblank_count(struct drm_device *dev, int crtc); extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, struct timeval *vblanktime); -extern void drm_send_vblank_event(struct drm_device *dev, int crtc, - struct drm_pending_vblank_event *e); extern bool drm_handle_vblank(struct drm_device *dev, int crtc); extern int drm_vblank_get(struct drm_device *dev, int crtc); extern void drm_vblank_put(struct drm_device *dev, int crtc); @@ -1505,7 +1503,6 @@ extern unsigned int drm_debug; extern unsigned int drm_vblank_offdelay; extern unsigned int drm_timestamp_precision; -extern unsigned int drm_timestamp_monotonic; extern struct class *drm_class; extern struct proc_dir_entry *drm_proc_root; diff --git a/trunk/include/drm/drm_crtc.h b/trunk/include/drm/drm_crtc.h index ee9b0b59237f..3fa18b7e9497 100644 --- a/trunk/include/drm/drm_crtc.h +++ b/trunk/include/drm/drm_crtc.h @@ -792,7 +792,6 @@ struct drm_mode_config { /* output poll support */ bool poll_enabled; - bool poll_running; struct delayed_work output_poll_work; /* pointers to standard properties */ @@ -888,14 +887,14 @@ extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_ extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, const struct drm_display_mode *mode); -extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); +extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode); extern void drm_mode_config_init(struct drm_device *dev); extern void drm_mode_config_reset(struct drm_device *dev); extern void drm_mode_config_cleanup(struct drm_device *dev); extern void drm_mode_set_name(struct drm_display_mode *mode); -extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); -extern int drm_mode_width(const struct drm_display_mode *mode); -extern int drm_mode_height(const struct drm_display_mode *mode); +extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2); +extern int drm_mode_width(struct drm_display_mode *mode); +extern int drm_mode_height(struct drm_display_mode *mode); /* for us by fb module */ extern int drm_mode_attachmode_crtc(struct drm_device *dev, @@ -920,6 +919,12 @@ extern void drm_mode_set_crtcinfo(struct drm_display_mode *p, extern void drm_mode_connector_list_update(struct drm_connector *connector); extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, struct edid *edid); +extern int drm_connector_property_set_value(struct drm_connector *connector, + struct drm_property *property, + uint64_t value); +extern int drm_connector_property_get_value(struct drm_connector *connector, + struct drm_property *property, + uint64_t *value); extern int drm_object_property_set_value(struct drm_mode_object *obj, struct drm_property *property, uint64_t val); @@ -941,6 +946,8 @@ extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY); extern bool drm_crtc_in_use(struct drm_crtc *crtc); +extern void drm_connector_attach_property(struct drm_connector *connector, + struct drm_property *property, uint64_t init_val); extern void drm_object_attach_property(struct drm_mode_object *obj, struct drm_property *property, uint64_t init_val); @@ -1030,7 +1037,6 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern u8 *drm_find_cea_extension(struct edid *edid); -extern u8 drm_match_cea_mode(struct drm_display_mode *to_match); extern bool drm_detect_hdmi_monitor(struct edid *edid); extern bool drm_detect_monitor_audio(struct edid *edid); extern int drm_mode_page_flip_ioctl(struct drm_device *dev, diff --git a/trunk/include/drm/drm_crtc_helper.h b/trunk/include/drm/drm_crtc_helper.h index f43d556bf40b..e01cc80c9c30 100644 --- a/trunk/include/drm/drm_crtc_helper.h +++ b/trunk/include/drm/drm_crtc_helper.h @@ -137,8 +137,6 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder); extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode); -extern void drm_helper_move_panel_connectors_to_head(struct drm_device *); - extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, struct drm_mode_fb_cmd2 *mode_cmd); @@ -164,7 +162,6 @@ extern int drm_helper_resume_force_mode(struct drm_device *dev); extern void drm_kms_helper_poll_init(struct drm_device *dev); extern void drm_kms_helper_poll_fini(struct drm_device *dev); extern void drm_helper_hpd_irq_event(struct drm_device *dev); -extern void drm_kms_helper_hotplug_event(struct drm_device *dev); extern void drm_kms_helper_poll_disable(struct drm_device *dev); extern void drm_kms_helper_poll_enable(struct drm_device *dev); diff --git a/trunk/include/drm/drm_dp_helper.h b/trunk/include/drm/drm_dp_helper.h index e8e1417af3d9..fe061489f91f 100644 --- a/trunk/include/drm/drm_dp_helper.h +++ b/trunk/include/drm/drm_dp_helper.h @@ -25,7 +25,6 @@ #include #include -#include /* * Unless otherwise noted, all values are from the DP 1.1a spec. Note that @@ -312,14 +311,6 @@ #define MODE_I2C_READ 4 #define MODE_I2C_STOP 8 -/** - * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp - * aux algorithm - * @running: set by the algo indicating whether an i2c is ongoing or whether - * the i2c bus is quiescent - * @address: i2c target address for the currently ongoing transfer - * @aux_ch: driver callback to transfer a single byte of the i2c payload - */ struct i2c_algo_dp_aux_data { bool running; u16 address; @@ -331,34 +322,4 @@ struct i2c_algo_dp_aux_data { int i2c_dp_aux_add_bus(struct i2c_adapter *adapter); - -#define DP_LINK_STATUS_SIZE 6 -bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], - int lane_count); -bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], - int lane_count); -u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], - int lane); -u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], - int lane); - -#define DP_RECEIVER_CAP_SIZE 0xf -void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); -void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]); - -u8 drm_dp_link_rate_to_bw_code(int link_rate); -int drm_dp_bw_code_to_link_rate(u8 link_bw); - -static inline int -drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); -} - -static inline u8 -drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE]) -{ - return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; -} - #endif /* _DRM_DP_HELPER_H_ */ diff --git a/trunk/include/drm/drm_hashtab.h b/trunk/include/drm/drm_hashtab.h index fce2ef3fdfff..3650d5d011ee 100644 --- a/trunk/include/drm/drm_hashtab.h +++ b/trunk/include/drm/drm_hashtab.h @@ -61,19 +61,5 @@ extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); extern void drm_ht_remove(struct drm_open_hash *ht); -/* - * RCU-safe interface - * - * The user of this API needs to make sure that two or more instances of the - * hash table manipulation functions are never run simultaneously. - * The lookup function drm_ht_find_item_rcu may, however, run simultaneously - * with any of the manipulation functions as long as it's called from within - * an RCU read-locked section. - */ -#define drm_ht_insert_item_rcu drm_ht_insert_item -#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please -#define drm_ht_remove_key_rcu drm_ht_remove_key -#define drm_ht_remove_item_rcu drm_ht_remove_item -#define drm_ht_find_item_rcu drm_ht_find_item #endif diff --git a/trunk/include/drm/intel-gtt.h b/trunk/include/drm/intel-gtt.h index 6eb76a1f11ab..2e37e9f02e71 100644 --- a/trunk/include/drm/intel-gtt.h +++ b/trunk/include/drm/intel-gtt.h @@ -3,7 +3,7 @@ #ifndef _DRM_INTEL_GTT_H #define _DRM_INTEL_GTT_H -struct intel_gtt { +const struct intel_gtt { /* Size of memory reserved for graphics by the BIOS */ unsigned int stolen_size; /* Total number of gtt entries. */ @@ -17,7 +17,6 @@ struct intel_gtt { unsigned int do_idle_maps : 1; /* Share the scratch page dma with ppgtts. */ dma_addr_t scratch_page_dma; - struct page *scratch_page; /* for ppgtt PDE access */ u32 __iomem *gtt; /* needed for ioremap in drm/i915 */ @@ -40,6 +39,10 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); #define AGP_DCACHE_MEMORY 1 #define AGP_PHYS_MEMORY 2 +/* New caching attributes for gen6/sandybridge */ +#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2) +#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4) + /* flag for GFDT type */ #define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) diff --git a/trunk/include/drm/ttm/ttm_bo_api.h b/trunk/include/drm/ttm/ttm_bo_api.h index 3cb5d848fb66..e8028ade567f 100644 --- a/trunk/include/drm/ttm/ttm_bo_api.h +++ b/trunk/include/drm/ttm/ttm_bo_api.h @@ -141,6 +141,8 @@ struct ttm_tt; * struct ttm_buffer_object * * @bdev: Pointer to the buffer object device structure. + * @buffer_start: The virtual user-space start address of ttm_bo_type_user + * buffers. * @type: The bo type. * @destroy: Destruction function. If NULL, kfree is used. * @num_pages: Actual number of pages. @@ -170,6 +172,7 @@ struct ttm_tt; * @seq_valid: The value of @val_seq is valid. This value is protected by * the bo_device::lru_lock. * @reserved: Deadlock-free lock used for synchronization state transitions. + * @sync_obj_arg: Opaque argument to synchronization object function. * @sync_obj: Pointer to a synchronization object. * @priv_flags: Flags describing buffer object internal state. * @vm_rb: Rb node for the vm rb tree. @@ -197,6 +200,7 @@ struct ttm_buffer_object { struct ttm_bo_global *glob; struct ttm_bo_device *bdev; + unsigned long buffer_start; enum ttm_bo_type type; void (*destroy) (struct ttm_buffer_object *); unsigned long num_pages; @@ -251,6 +255,7 @@ struct ttm_buffer_object { * checking NULL while reserved but not holding the mentioned lock. */ + void *sync_obj_arg; void *sync_obj; unsigned long priv_flags; @@ -337,6 +342,7 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, * @bo: The buffer object. * @placement: Proposed placement for the buffer object. * @interruptible: Sleep interruptible if sleeping. + * @no_wait_reserve: Return immediately if other buffers are busy. * @no_wait_gpu: Return immediately if the GPU is busy. * * Changes placement and caching policy of the buffer object @@ -349,7 +355,7 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, */ extern int ttm_bo_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, - bool interruptible, + bool interruptible, bool no_wait_reserve, bool no_wait_gpu); /** @@ -423,9 +429,8 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, * @no_wait: Return immediately if buffer is busy. * * Synchronizes a buffer object for CPU RW access. This means - * command submission that affects the buffer will return -EBUSY - * until ttm_bo_synccpu_write_release is called. - * + * blocking command submission that affects the buffer and + * waiting for buffer idle. This lock is recursive. * Returns * -EBUSY if the buffer is busy and no_wait is true. * -ERESTARTSYS if interrupted by a signal. @@ -467,6 +472,8 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, * @type: Requested type of buffer object. * @flags: Initial placement flags. * @page_alignment: Data alignment in pages. + * @buffer_start: Virtual address of user space data backing a + * user buffer object. * @interruptible: If needing to sleep to wait for GPU resources, * sleep interruptible. * @persistent_swap_storage: Usually the swap storage is deleted for buffers @@ -498,6 +505,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, + unsigned long buffer_start, bool interrubtible, struct file *persistent_swap_storage, size_t acc_size, @@ -513,6 +521,8 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev, * @type: Requested type of buffer object. * @flags: Initial placement flags. * @page_alignment: Data alignment in pages. + * @buffer_start: Virtual address of user space data backing a + * user buffer object. * @interruptible: If needing to sleep while waiting for GPU resources, * sleep interruptible. * @persistent_swap_storage: Usually the swap storage is deleted for buffers @@ -535,6 +545,7 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, + unsigned long buffer_start, bool interruptible, struct file *persistent_swap_storage, struct ttm_buffer_object **p_bo); @@ -725,18 +736,4 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); -/** - * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved - * - * @bo: The buffer object to check. - * - * This function returns an indication if a bo is reserved or not, and should - * only be used to print an error when it is not from incorrect api usage, since - * there's no guarantee that it is the caller that is holding the reservation. - */ -static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo) -{ - return atomic_read(&bo->reserved); -} - #endif diff --git a/trunk/include/drm/ttm/ttm_bo_driver.h b/trunk/include/drm/ttm/ttm_bo_driver.h index e3a43a47d78c..d803b92b0324 100644 --- a/trunk/include/drm/ttm/ttm_bo_driver.h +++ b/trunk/include/drm/ttm/ttm_bo_driver.h @@ -394,7 +394,7 @@ struct ttm_bo_driver { */ int (*move) (struct ttm_buffer_object *bo, bool evict, bool interruptible, - bool no_wait_gpu, + bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem); /** @@ -422,10 +422,10 @@ struct ttm_bo_driver { * documentation. */ - bool (*sync_obj_signaled) (void *sync_obj); - int (*sync_obj_wait) (void *sync_obj, + bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg); + int (*sync_obj_wait) (void *sync_obj, void *sync_arg, bool lazy, bool interruptible); - int (*sync_obj_flush) (void *sync_obj); + int (*sync_obj_flush) (void *sync_obj, void *sync_arg); void (*sync_obj_unref) (void **sync_obj); void *(*sync_obj_ref) (void *sync_obj); @@ -521,6 +521,8 @@ struct ttm_bo_global { * lru_lock: Spinlock that protects the buffer+device lru lists and * ddestroy lists. * @val_seq: Current validation sequence. + * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. + * If a GPU lockup has been detected, this is forced to 0. * @dev_mapping: A pointer to the struct address_space representing the * device address space. * @wq: Work queue structure for the delayed delete workqueue. @@ -554,6 +556,7 @@ struct ttm_bo_device { * Protected by load / firstopen / lastclose /unload sync. */ + bool nice_mode; struct address_space *dev_mapping; /* @@ -703,6 +706,7 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, * @proposed_placement: Proposed new placement for the buffer object. * @mem: A struct ttm_mem_reg. * @interruptible: Sleep interruptible when sliping. + * @no_wait_reserve: Return immediately if other buffers are busy. * @no_wait_gpu: Return immediately if the GPU is busy. * * Allocate memory space for the buffer object pointed to by @bo, using @@ -718,13 +722,27 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem, bool interruptible, - bool no_wait_gpu); + bool no_wait_reserve, bool no_wait_gpu); extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); +/** + * ttm_bo_wait_for_cpu + * + * @bo: Pointer to a struct ttm_buffer_object. + * @no_wait: Don't sleep while waiting. + * + * Wait until a buffer object is no longer sync'ed for CPU access. + * Returns: + * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). + * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. + */ + +extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); + extern void ttm_bo_global_release(struct drm_global_reference *ref); extern int ttm_bo_global_init(struct drm_global_reference *ref); @@ -900,6 +918,7 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, * * @bo: A pointer to a struct ttm_buffer_object. * @evict: 1: This is an eviction. Don't try to pipeline. + * @no_wait_reserve: Return immediately if other buffers are busy. * @no_wait_gpu: Return immediately if the GPU is busy. * @new_mem: struct ttm_mem_reg indicating where to move. * @@ -914,14 +933,15 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, */ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool evict, bool no_wait_gpu, - struct ttm_mem_reg *new_mem); + bool evict, bool no_wait_reserve, + bool no_wait_gpu, struct ttm_mem_reg *new_mem); /** * ttm_bo_move_memcpy * * @bo: A pointer to a struct ttm_buffer_object. * @evict: 1: This is an eviction. Don't try to pipeline. + * @no_wait_reserve: Return immediately if other buffers are busy. * @no_wait_gpu: Return immediately if the GPU is busy. * @new_mem: struct ttm_mem_reg indicating where to move. * @@ -936,8 +956,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, */ extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool evict, bool no_wait_gpu, - struct ttm_mem_reg *new_mem); + bool evict, bool no_wait_reserve, + bool no_wait_gpu, struct ttm_mem_reg *new_mem); /** * ttm_bo_free_old_node @@ -953,7 +973,10 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); * * @bo: A pointer to a struct ttm_buffer_object. * @sync_obj: A sync object that signals when moving is complete. + * @sync_obj_arg: An argument to pass to the sync object idle / wait + * functions. * @evict: This is an evict move. Don't return until the buffer is idle. + * @no_wait_reserve: Return immediately if other buffers are busy. * @no_wait_gpu: Return immediately if the GPU is busy. * @new_mem: struct ttm_mem_reg indicating where to move. * @@ -967,7 +990,9 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, void *sync_obj, - bool evict, bool no_wait_gpu, + void *sync_obj_arg, + bool evict, bool no_wait_reserve, + bool no_wait_gpu, struct ttm_mem_reg *new_mem); /** * ttm_io_prot diff --git a/trunk/include/drm/ttm/ttm_execbuf_util.h b/trunk/include/drm/ttm/ttm_execbuf_util.h index 547e19f06e57..1926cae373ba 100644 --- a/trunk/include/drm/ttm/ttm_execbuf_util.h +++ b/trunk/include/drm/ttm/ttm_execbuf_util.h @@ -39,6 +39,8 @@ * * @head: list head for thread-private list. * @bo: refcounted buffer object pointer. + * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once + * adding a new sync object. * @reserved: Indicates whether @bo has been reserved for validation. * @removed: Indicates whether @bo has been removed from lru lists. * @put_count: Number of outstanding references on bo::list_kref. @@ -48,6 +50,7 @@ struct ttm_validate_buffer { struct list_head head; struct ttm_buffer_object *bo; + void *new_sync_obj_arg; bool reserved; bool removed; int put_count; diff --git a/trunk/include/drm/ttm/ttm_memory.h b/trunk/include/drm/ttm/ttm_memory.h index 72dcbe81dd07..d6d1da468c97 100644 --- a/trunk/include/drm/ttm/ttm_memory.h +++ b/trunk/include/drm/ttm/ttm_memory.h @@ -60,6 +60,7 @@ struct ttm_mem_shrink { * for the GPU, and this will otherwise block other workqueue tasks(?) * At this point we use only a single-threaded workqueue. * @work: The workqueue callback for the shrink queue. + * @queue: Wait queue for processes suspended waiting for memory. * @lock: Lock to protect the @shrink - and the memory accounting members, * that is, essentially the whole structure with some exceptions. * @zones: Array of pointers to accounting zones. @@ -79,6 +80,7 @@ struct ttm_mem_global { struct ttm_mem_shrink *shrink; struct workqueue_struct *swap_queue; struct work_struct work; + wait_queue_head_t queue; spinlock_t lock; struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; unsigned int num_zones; diff --git a/trunk/include/drm/ttm/ttm_object.h b/trunk/include/drm/ttm/ttm_object.h index fc0cf0649901..b01c563b2751 100644 --- a/trunk/include/drm/ttm/ttm_object.h +++ b/trunk/include/drm/ttm/ttm_object.h @@ -40,7 +40,6 @@ #include #include #include -#include #include /** @@ -121,7 +120,6 @@ struct ttm_object_device; */ struct ttm_base_object { - struct rcu_head rhead; struct drm_hash_item hash; enum ttm_object_type object_type; bool shareable; @@ -270,6 +268,4 @@ extern struct ttm_object_device *ttm_object_device_init extern void ttm_object_device_release(struct ttm_object_device **p_tdev); -#define ttm_base_object_kfree(__object, __base)\ - kfree_rcu(__object, __base.rhead) #endif diff --git a/trunk/include/linux/dma-attrs.h b/trunk/include/linux/dma-attrs.h index c8e1831d7572..f83f793223ff 100644 --- a/trunk/include/linux/dma-attrs.h +++ b/trunk/include/linux/dma-attrs.h @@ -17,7 +17,6 @@ enum dma_attr { DMA_ATTR_NON_CONSISTENT, DMA_ATTR_NO_KERNEL_MAPPING, DMA_ATTR_SKIP_CPU_SYNC, - DMA_ATTR_FORCE_CONTIGUOUS, DMA_ATTR_MAX, }; diff --git a/trunk/include/linux/kref.h b/trunk/include/linux/kref.h index 4972e6e9ca93..65af6887872f 100644 --- a/trunk/include/linux/kref.h +++ b/trunk/include/linux/kref.h @@ -111,25 +111,4 @@ static inline int kref_put_mutex(struct kref *kref, } return 0; } - -/** - * kref_get_unless_zero - Increment refcount for object unless it is zero. - * @kref: object. - * - * Return non-zero if the increment succeeded. Otherwise return 0. - * - * This function is intended to simplify locking around refcounting for - * objects that can be looked up from a lookup structure, and which are - * removed from that lookup structure in the object destructor. - * Operations on such objects require at least a read lock around - * lookup + kref_get, and a write lock around kref_put + remove from lookup - * structure. Furthermore, RCU implementations become extremely tricky. - * With a lookup followed by a kref_get_unless_zero *with return value check* - * locking in the kref_put path can be deferred to the actual removal from - * the lookup structure and RCU lookups become trivial. - */ -static inline int __must_check kref_get_unless_zero(struct kref *kref) -{ - return atomic_add_unless(&kref->refcount, 1, 0); -} #endif /* _KREF_H_ */ diff --git a/trunk/include/uapi/drm/drm.h b/trunk/include/uapi/drm/drm.h index 8d1e2bbee83a..1e3481edf062 100644 --- a/trunk/include/uapi/drm/drm.h +++ b/trunk/include/uapi/drm/drm.h @@ -778,7 +778,6 @@ struct drm_event_vblank { #define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 #define DRM_CAP_DUMB_PREFER_SHADOW 0x4 #define DRM_CAP_PRIME 0x5 -#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 #define DRM_PRIME_CAP_IMPORT 0x1 #define DRM_PRIME_CAP_EXPORT 0x2 diff --git a/trunk/include/uapi/drm/exynos_drm.h b/trunk/include/uapi/drm/exynos_drm.h index 49f010f2b27f..c0494d586e23 100644 --- a/trunk/include/uapi/drm/exynos_drm.h +++ b/trunk/include/uapi/drm/exynos_drm.h @@ -133,26 +133,17 @@ struct drm_exynos_g2d_cmd { __u32 data; }; -enum drm_exynos_g2d_buf_type { - G2D_BUF_USERPTR = 1 << 31, -}; - enum drm_exynos_g2d_event_type { G2D_EVENT_NOT, G2D_EVENT_NONSTOP, G2D_EVENT_STOP, /* not yet */ }; -struct drm_exynos_g2d_userptr { - unsigned long userptr; - unsigned long size; -}; - struct drm_exynos_g2d_set_cmdlist { __u64 cmd; - __u64 cmd_buf; + __u64 cmd_gem; __u32 cmd_nr; - __u32 cmd_buf_nr; + __u32 cmd_gem_nr; /* for g2d event */ __u64 event_type; diff --git a/trunk/include/uapi/drm/i915_drm.h b/trunk/include/uapi/drm/i915_drm.h index b746a3cf5fa9..4322b1e7d2ed 100644 --- a/trunk/include/uapi/drm/i915_drm.h +++ b/trunk/include/uapi/drm/i915_drm.h @@ -306,7 +306,6 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_SEMAPHORES 20 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 #define I915_PARAM_RSVD_FOR_FUTURE_USE 22 -#define I915_PARAM_HAS_SECURE_BATCHES 23 typedef struct drm_i915_getparam { int param; @@ -672,11 +671,6 @@ struct drm_i915_gem_execbuffer2 { /** Resets the SO write offset registers for transform feedback on gen7. */ #define I915_EXEC_GEN7_SOL_RESET (1<<8) -/** Request a privileged ("secure") batch buffer. Note only available for - * DRM_ROOT_ONLY | DRM_MASTER processes. - */ -#define I915_EXEC_SECURE (1<<9) - #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) #define i915_execbuffer2_set_context_id(eb2, context) \ (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK