diff --git a/[refs] b/[refs]
index 37e32cf6fedd..59769d06a78d 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: 8de9e417757fb9f130f55a38f4ee7027b60de1c7
+refs/heads/master: fbed600af159b9dce78dd74c4bff56b40bb19d47
diff --git a/trunk/CREDITS b/trunk/CREDITS
index d8fe12a9421f..2346b09ca8bb 100644
--- a/trunk/CREDITS
+++ b/trunk/CREDITS
@@ -1823,6 +1823,11 @@ S: Kattreinstr 38
S: D-64295
S: Germany
+N: Avi Kivity
+E: avi.kivity@gmail.com
+D: Kernel-based Virtual Machine (KVM)
+S: Ra'annana, Israel
+
N: Andi Kleen
E: andi@firstfloor.org
U: http://www.halobates.de
diff --git a/trunk/Documentation/DMA-attributes.txt b/trunk/Documentation/DMA-attributes.txt
index e59480db9ee0..f50309081ac7 100644
--- a/trunk/Documentation/DMA-attributes.txt
+++ b/trunk/Documentation/DMA-attributes.txt
@@ -91,12 +91,3 @@ transferred to 'device' domain. This attribute can be also used for
dma_unmap_{single,page,sg} functions family to force buffer to stay in
device domain after releasing a mapping for it. Use this attribute with
care!
-
-DMA_ATTR_FORCE_CONTIGUOUS
--------------------------
-
-By default DMA-mapping subsystem is allowed to assemble the buffer
-allocated by dma_alloc_attrs() function from individual pages if it can
-be mapped as contiguous chunk into device dma address space. By
-specifing this attribute the allocated buffer is forced to be contiguous
-also in physical memory.
diff --git a/trunk/Documentation/DocBook/drm.tmpl b/trunk/Documentation/DocBook/drm.tmpl
index 4ee2304f82f9..b0300529ab13 100644
--- a/trunk/Documentation/DocBook/drm.tmpl
+++ b/trunk/Documentation/DocBook/drm.tmpl
@@ -1141,13 +1141,23 @@ int max_width, max_height;
the page_flip operation will be called with a
non-NULL event argument pointing to a
drm_pending_vblank_event instance. Upon page
- flip completion the driver must call drm_send_vblank_event
- to fill in the event and send to wake up any waiting processes.
- This can be performed with
+ flip completion the driver must fill the
+ event::event
+ sequence, tv_sec
+ and tv_usec fields with the associated
+ vertical blanking count and timestamp, add the event to the
+ drm_file list of events to be signaled, and wake
+ up any waiting process. This can be performed with
event.sequence = drm_vblank_count_and_time(..., &now);
+ event->event.tv_sec = now.tv_sec;
+ event->event.tv_usec = now.tv_usec;
+
spin_lock_irqsave(&dev->event_lock, flags);
- ...
- drm_send_vblank_event(dev, pipe, event);
+ list_add_tail(&event->base.link, &event->base.file_priv->event_list);
+ wake_up_interruptible(&event->base.file_priv->event_wait);
spin_unlock_irqrestore(&dev->event_lock, flags);
]]>
@@ -1611,10 +1621,10 @@ void intel_crt_init(struct drm_device *dev)
-
+
- Mode Setting Helper Functions
+ Mid-layer Helper Functions
The CRTC, encoder and connector functions provided by the drivers
implement the DRM API. They're called by the DRM core and ioctl handlers
@@ -2096,21 +2106,6 @@ void intel_crt_init(struct drm_device *dev)
-
- Modeset Helper Functions Reference
-!Edrivers/gpu/drm/drm_crtc_helper.c
-
-
- fbdev Helper Functions Reference
-!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
-!Edrivers/gpu/drm/drm_fb_helper.c
-
-
- Display Port Helper Functions Reference
-!Pdrivers/gpu/drm/drm_dp_helper.c dp helpers
-!Iinclude/drm/drm_dp_helper.h
-!Edrivers/gpu/drm/drm_dp_helper.c
-
diff --git a/trunk/Documentation/arm64/memory.txt b/trunk/Documentation/arm64/memory.txt
index dbbdcbba75a3..4110cca96bd6 100644
--- a/trunk/Documentation/arm64/memory.txt
+++ b/trunk/Documentation/arm64/memory.txt
@@ -27,17 +27,17 @@ Start End Size Use
-----------------------------------------------------------------------
0000000000000000 0000007fffffffff 512GB user
-ffffff8000000000 ffffffbbfffcffff ~240GB vmalloc
+ffffff8000000000 ffffffbbfffeffff ~240GB vmalloc
-ffffffbbfffd0000 ffffffbcfffdffff 64KB [guard page]
+ffffffbbffff0000 ffffffbbffffffff 64KB [guard page]
-ffffffbbfffe0000 ffffffbcfffeffff 64KB PCI I/O space
+ffffffbc00000000 ffffffbdffffffff 8GB vmemmap
-ffffffbbffff0000 ffffffbcffffffff 64KB [guard page]
+ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap]
-ffffffbc00000000 ffffffbdffffffff 8GB vmemmap
+ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space
-ffffffbe00000000 ffffffbffbffffff ~8GB [guard, future vmmemap]
+ffffffbbffff0000 ffffffbcffffffff ~2MB [guard]
ffffffbffc000000 ffffffbfffffffff 64MB modules
diff --git a/trunk/Documentation/cgroups/memory.txt b/trunk/Documentation/cgroups/memory.txt
index c07f7b4fb88d..71c4da413444 100644
--- a/trunk/Documentation/cgroups/memory.txt
+++ b/trunk/Documentation/cgroups/memory.txt
@@ -466,6 +466,10 @@ Note:
5.3 swappiness
Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
+Please note that unlike the global swappiness, memcg knob set to 0
+really prevents from any swapping even if there is a swap storage
+available. This might lead to memcg OOM killer if there are no file
+pages to reclaim.
Following cgroups' swappiness can't be changed.
- root cgroup (uses /proc/sys/vm/swappiness).
diff --git a/trunk/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/trunk/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
deleted file mode 100644
index b4fa934ae3a2..000000000000
--- a/trunk/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+++ /dev/null
@@ -1,191 +0,0 @@
-NVIDIA Tegra host1x
-
-Required properties:
-- compatible: "nvidia,tegra-host1x"
-- reg: Physical base address and length of the controller's registers.
-- interrupts: The interrupt outputs from the controller.
-- #address-cells: The number of cells used to represent physical base addresses
- in the host1x address space. Should be 1.
-- #size-cells: The number of cells used to represent the size of an address
- range in the host1x address space. Should be 1.
-- ranges: The mapping of the host1x address space to the CPU address space.
-
-The host1x top-level node defines a number of children, each representing one
-of the following host1x client modules:
-
-- mpe: video encoder
-
- Required properties:
- - compatible: "nvidia,tegra-mpe"
- - reg: Physical base address and length of the controller's registers.
- - interrupts: The interrupt outputs from the controller.
-
-- vi: video input
-
- Required properties:
- - compatible: "nvidia,tegra-vi"
- - reg: Physical base address and length of the controller's registers.
- - interrupts: The interrupt outputs from the controller.
-
-- epp: encoder pre-processor
-
- Required properties:
- - compatible: "nvidia,tegra-epp"
- - reg: Physical base address and length of the controller's registers.
- - interrupts: The interrupt outputs from the controller.
-
-- isp: image signal processor
-
- Required properties:
- - compatible: "nvidia,tegra-isp"
- - reg: Physical base address and length of the controller's registers.
- - interrupts: The interrupt outputs from the controller.
-
-- gr2d: 2D graphics engine
-
- Required properties:
- - compatible: "nvidia,tegra-gr2d"
- - reg: Physical base address and length of the controller's registers.
- - interrupts: The interrupt outputs from the controller.
-
-- gr3d: 3D graphics engine
-
- Required properties:
- - compatible: "nvidia,tegra-gr3d"
- - reg: Physical base address and length of the controller's registers.
-
-- dc: display controller
-
- Required properties:
- - compatible: "nvidia,tegra-dc"
- - reg: Physical base address and length of the controller's registers.
- - interrupts: The interrupt outputs from the controller.
-
- Each display controller node has a child node, named "rgb", that represents
- the RGB output associated with the controller. It can take the following
- optional properties:
- - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
- - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
- - nvidia,edid: supplies a binary EDID blob
-
-- hdmi: High Definition Multimedia Interface
-
- Required properties:
- - compatible: "nvidia,tegra-hdmi"
- - reg: Physical base address and length of the controller's registers.
- - interrupts: The interrupt outputs from the controller.
- - vdd-supply: regulator for supply voltage
- - pll-supply: regulator for PLL
-
- Optional properties:
- - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
- - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
- - nvidia,edid: supplies a binary EDID blob
-
-- tvo: TV encoder output
-
- Required properties:
- - compatible: "nvidia,tegra-tvo"
- - reg: Physical base address and length of the controller's registers.
- - interrupts: The interrupt outputs from the controller.
-
-- dsi: display serial interface
-
- Required properties:
- - compatible: "nvidia,tegra-dsi"
- - reg: Physical base address and length of the controller's registers.
-
-Example:
-
-/ {
- ...
-
- host1x {
- compatible = "nvidia,tegra20-host1x", "simple-bus";
- reg = <0x50000000 0x00024000>;
- interrupts = <0 65 0x04 /* mpcore syncpt */
- 0 67 0x04>; /* mpcore general */
-
- #address-cells = <1>;
- #size-cells = <1>;
-
- ranges = <0x54000000 0x54000000 0x04000000>;
-
- mpe {
- compatible = "nvidia,tegra20-mpe";
- reg = <0x54040000 0x00040000>;
- interrupts = <0 68 0x04>;
- };
-
- vi {
- compatible = "nvidia,tegra20-vi";
- reg = <0x54080000 0x00040000>;
- interrupts = <0 69 0x04>;
- };
-
- epp {
- compatible = "nvidia,tegra20-epp";
- reg = <0x540c0000 0x00040000>;
- interrupts = <0 70 0x04>;
- };
-
- isp {
- compatible = "nvidia,tegra20-isp";
- reg = <0x54100000 0x00040000>;
- interrupts = <0 71 0x04>;
- };
-
- gr2d {
- compatible = "nvidia,tegra20-gr2d";
- reg = <0x54140000 0x00040000>;
- interrupts = <0 72 0x04>;
- };
-
- gr3d {
- compatible = "nvidia,tegra20-gr3d";
- reg = <0x54180000 0x00040000>;
- };
-
- dc@54200000 {
- compatible = "nvidia,tegra20-dc";
- reg = <0x54200000 0x00040000>;
- interrupts = <0 73 0x04>;
-
- rgb {
- status = "disabled";
- };
- };
-
- dc@54240000 {
- compatible = "nvidia,tegra20-dc";
- reg = <0x54240000 0x00040000>;
- interrupts = <0 74 0x04>;
-
- rgb {
- status = "disabled";
- };
- };
-
- hdmi {
- compatible = "nvidia,tegra20-hdmi";
- reg = <0x54280000 0x00040000>;
- interrupts = <0 75 0x04>;
- status = "disabled";
- };
-
- tvo {
- compatible = "nvidia,tegra20-tvo";
- reg = <0x542c0000 0x00040000>;
- interrupts = <0 76 0x04>;
- status = "disabled";
- };
-
- dsi {
- compatible = "nvidia,tegra20-dsi";
- reg = <0x54300000 0x00040000>;
- status = "disabled";
- };
- };
-
- ...
-};
diff --git a/trunk/Documentation/devicetree/bindings/net/mdio-gpio.txt b/trunk/Documentation/devicetree/bindings/net/mdio-gpio.txt
index bc9549529014..c79bab025369 100644
--- a/trunk/Documentation/devicetree/bindings/net/mdio-gpio.txt
+++ b/trunk/Documentation/devicetree/bindings/net/mdio-gpio.txt
@@ -8,9 +8,16 @@ gpios property as described in section VIII.1 in the following order:
MDC, MDIO.
+Note: Each gpio-mdio bus should have an alias correctly numbered in "aliases"
+node.
+
Example:
-mdio {
+aliases {
+ mdio-gpio0 = <&mdio0>;
+};
+
+mdio0: mdio {
compatible = "virtual,mdio-gpio";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/trunk/Documentation/filesystems/proc.txt b/trunk/Documentation/filesystems/proc.txt
index a1793d670cd0..3844d21d6ca3 100644
--- a/trunk/Documentation/filesystems/proc.txt
+++ b/trunk/Documentation/filesystems/proc.txt
@@ -33,7 +33,7 @@ Table of Contents
2 Modifying System Parameters
3 Per-Process Parameters
- 3.1 /proc//oom_score_adj - Adjust the oom-killer
+ 3.1 /proc//oom_adj & /proc//oom_score_adj - Adjust the oom-killer
score
3.2 /proc//oom_score - Display current oom-killer score
3.3 /proc//io - Display the IO accounting fields
@@ -1320,10 +1320,10 @@ of the kernel.
CHAPTER 3: PER-PROCESS PARAMETERS
------------------------------------------------------------------------------
-3.1 /proc//oom_score_adj- Adjust the oom-killer score
+3.1 /proc//oom_adj & /proc//oom_score_adj- Adjust the oom-killer score
--------------------------------------------------------------------------------
-This file can be used to adjust the badness heuristic used to select which
+These file can be used to adjust the badness heuristic used to select which
process gets killed in out of memory conditions.
The badness heuristic assigns a value to each candidate task ranging from 0
@@ -1361,6 +1361,12 @@ same system, cpuset, mempolicy, or memory controller resources to use at least
equivalent to discounting 50% of the task's allowed memory from being considered
as scoring against the task.
+For backwards compatibility with previous kernels, /proc//oom_adj may also
+be used to tune the badness score. Its acceptable values range from -16
+(OOM_ADJUST_MIN) to +15 (OOM_ADJUST_MAX) and a special value of -17
+(OOM_DISABLE) to disable oom killing entirely for that task. Its value is
+scaled linearly with /proc//oom_score_adj.
+
The value of /proc//oom_score_adj may be reduced no lower than the last
value set by a CAP_SYS_RESOURCE process. To reduce the value any lower
requires CAP_SYS_RESOURCE.
@@ -1375,7 +1381,9 @@ minimal amount of work.
-------------------------------------------------------------
This file can be used to check the current score used by the oom-killer is for
-any given .
+any given . Use it together with /proc//oom_score_adj to tune which
+process should be killed in an out-of-memory situation.
+
3.3 /proc//io - Display the IO accounting fields
-------------------------------------------------------
diff --git a/trunk/Documentation/kref.txt b/trunk/Documentation/kref.txt
index ddf85a5dde0c..48ba715d5a63 100644
--- a/trunk/Documentation/kref.txt
+++ b/trunk/Documentation/kref.txt
@@ -213,91 +213,3 @@ presentation on krefs, which can be found at:
and:
http://www.kroah.com/linux/talks/ols_2004_kref_talk/
-
-The above example could also be optimized using kref_get_unless_zero() in
-the following way:
-
-static struct my_data *get_entry()
-{
- struct my_data *entry = NULL;
- mutex_lock(&mutex);
- if (!list_empty(&q)) {
- entry = container_of(q.next, struct my_data, link);
- if (!kref_get_unless_zero(&entry->refcount))
- entry = NULL;
- }
- mutex_unlock(&mutex);
- return entry;
-}
-
-static void release_entry(struct kref *ref)
-{
- struct my_data *entry = container_of(ref, struct my_data, refcount);
-
- mutex_lock(&mutex);
- list_del(&entry->link);
- mutex_unlock(&mutex);
- kfree(entry);
-}
-
-static void put_entry(struct my_data *entry)
-{
- kref_put(&entry->refcount, release_entry);
-}
-
-Which is useful to remove the mutex lock around kref_put() in put_entry(), but
-it's important that kref_get_unless_zero is enclosed in the same critical
-section that finds the entry in the lookup table,
-otherwise kref_get_unless_zero may reference already freed memory.
-Note that it is illegal to use kref_get_unless_zero without checking its
-return value. If you are sure (by already having a valid pointer) that
-kref_get_unless_zero() will return true, then use kref_get() instead.
-
-The function kref_get_unless_zero also makes it possible to use rcu
-locking for lookups in the above example:
-
-struct my_data
-{
- struct rcu_head rhead;
- .
- struct kref refcount;
- .
- .
-};
-
-static struct my_data *get_entry_rcu()
-{
- struct my_data *entry = NULL;
- rcu_read_lock();
- if (!list_empty(&q)) {
- entry = container_of(q.next, struct my_data, link);
- if (!kref_get_unless_zero(&entry->refcount))
- entry = NULL;
- }
- rcu_read_unlock();
- return entry;
-}
-
-static void release_entry_rcu(struct kref *ref)
-{
- struct my_data *entry = container_of(ref, struct my_data, refcount);
-
- mutex_lock(&mutex);
- list_del_rcu(&entry->link);
- mutex_unlock(&mutex);
- kfree_rcu(entry, rhead);
-}
-
-static void put_entry(struct my_data *entry)
-{
- kref_put(&entry->refcount, release_entry_rcu);
-}
-
-But note that the struct kref member needs to remain in valid memory for a
-rcu grace period after release_entry_rcu was called. That can be accomplished
-by using kfree_rcu(entry, rhead) as done above, or by calling synchronize_rcu()
-before using kfree, but note that synchronize_rcu() may sleep for a
-substantial amount of time.
-
-
-Thomas Hellstrom
diff --git a/trunk/Documentation/networking/netdev-features.txt b/trunk/Documentation/networking/netdev-features.txt
index 4164f5c02e4b..f310edec8a77 100644
--- a/trunk/Documentation/networking/netdev-features.txt
+++ b/trunk/Documentation/networking/netdev-features.txt
@@ -164,4 +164,4 @@ read the CRC recorded by the NIC on receipt of the packet.
This requests that the NIC receive all possible frames, including errored
frames (such as bad FCS, etc). This can be helpful when sniffing a link with
bad packets on it. Some NICs may receive more packets if also put into normal
-PROMISC mdoe.
+PROMISC mode.
diff --git a/trunk/Documentation/networking/vxlan.txt b/trunk/Documentation/networking/vxlan.txt
index 5b34b762d7d5..6d993510f091 100644
--- a/trunk/Documentation/networking/vxlan.txt
+++ b/trunk/Documentation/networking/vxlan.txt
@@ -32,7 +32,7 @@ no entry is in the forwarding table.
# ip link delete vxlan0
3. Show vxlan info
- # ip -d show vxlan0
+ # ip -d link show vxlan0
It is possible to create, destroy and display the vxlan
forwarding table using the new bridge command.
@@ -41,7 +41,7 @@ forwarding table using the new bridge command.
# bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0
2. Delete forwarding table entry
- # bridge fdb delete 00:17:42:8a:b4:05
+ # bridge fdb delete 00:17:42:8a:b4:05 dev vxlan0
3. Show forwarding table
# bridge fdb show dev vxlan0
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index 2f66431bc698..9386a63ea8f6 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -526,17 +526,17 @@ F: drivers/video/geode/
F: arch/x86/include/asm/geode.h
AMD IOMMU (AMD-VI)
-M: Joerg Roedel
+M: Joerg Roedel
L: iommu@lists.linux-foundation.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
-S: Supported
+S: Maintained
F: drivers/iommu/amd_iommu*.[ch]
F: include/linux/amd-iommu.h
AMD MICROCODE UPDATE SUPPORT
-M: Andreas Herrmann
+M: Andreas Herrmann
L: amd64-microcode@amd64.org
-S: Supported
+S: Maintained
F: arch/x86/kernel/microcode_amd.c
AMS (Apple Motion Sensor) DRIVER
@@ -841,6 +841,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
F: arch/arm/mach-sa1100/jornada720.c
F: arch/arm/mach-sa1100/include/mach/jornada720.h
+ARM/IGEP MACHINE SUPPORT
+M: Enric Balletbo i Serra
+M: Javier Martinez Canillas
+L: linux-omap@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/mach-omap2/board-igep0020.c
+
ARM/INCOME PXA270 SUPPORT
M: Marek Vasut
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2512,15 +2520,6 @@ S: Supported
F: drivers/gpu/drm/exynos
F: include/drm/exynos*
-DRM DRIVERS FOR NVIDIA TEGRA
-M: Thierry Reding
-L: dri-devel@lists.freedesktop.org
-L: linux-tegra@vger.kernel.org
-T: git git://gitorious.org/thierryreding/linux.git
-S: Maintained
-F: drivers/gpu/drm/tegra/
-F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
-
DSCC4 DRIVER
M: Francois Romieu
L: netdev@vger.kernel.org
@@ -2717,10 +2716,10 @@ F: include/linux/edac.h
EDAC-AMD64
M: Doug Thompson
-M: Borislav Petkov
+M: Borislav Petkov
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
-S: Supported
+S: Maintained
F: drivers/edac/amd64_edac*
EDAC-E752X
@@ -3607,6 +3606,49 @@ F: drivers/hid/hid-hyperv.c
F: drivers/net/hyperv/
F: drivers/staging/hv/
+I2C OVER PARALLEL PORT
+M: Jean Delvare
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: Documentation/i2c/busses/i2c-parport
+F: Documentation/i2c/busses/i2c-parport-light
+F: drivers/i2c/busses/i2c-parport.c
+F: drivers/i2c/busses/i2c-parport-light.c
+
+I2C/SMBUS CONTROLLER DRIVERS FOR PC
+M: Jean Delvare
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: Documentation/i2c/busses/i2c-ali1535
+F: Documentation/i2c/busses/i2c-ali1563
+F: Documentation/i2c/busses/i2c-ali15x3
+F: Documentation/i2c/busses/i2c-amd756
+F: Documentation/i2c/busses/i2c-amd8111
+F: Documentation/i2c/busses/i2c-i801
+F: Documentation/i2c/busses/i2c-nforce2
+F: Documentation/i2c/busses/i2c-piix4
+F: Documentation/i2c/busses/i2c-sis5595
+F: Documentation/i2c/busses/i2c-sis630
+F: Documentation/i2c/busses/i2c-sis96x
+F: Documentation/i2c/busses/i2c-via
+F: Documentation/i2c/busses/i2c-viapro
+F: drivers/i2c/busses/i2c-ali1535.c
+F: drivers/i2c/busses/i2c-ali1563.c
+F: drivers/i2c/busses/i2c-ali15x3.c
+F: drivers/i2c/busses/i2c-amd756.c
+F: drivers/i2c/busses/i2c-amd756-s4882.c
+F: drivers/i2c/busses/i2c-amd8111.c
+F: drivers/i2c/busses/i2c-i801.c
+F: drivers/i2c/busses/i2c-isch.c
+F: drivers/i2c/busses/i2c-nforce2.c
+F: drivers/i2c/busses/i2c-nforce2-s4985.c
+F: drivers/i2c/busses/i2c-piix4.c
+F: drivers/i2c/busses/i2c-sis5595.c
+F: drivers/i2c/busses/i2c-sis630.c
+F: drivers/i2c/busses/i2c-sis96x.c
+F: drivers/i2c/busses/i2c-via.c
+F: drivers/i2c/busses/i2c-viapro.c
+
I2C/SMBUS STUB DRIVER
M: "Mark M. Hoffman"
L: linux-i2c@vger.kernel.org
@@ -3614,9 +3656,8 @@ S: Maintained
F: drivers/i2c/busses/i2c-stub.c
I2C SUBSYSTEM
-M: "Jean Delvare (PC drivers, core)"
+M: Wolfram Sang
M: "Ben Dooks (embedded platforms)"
-M: "Wolfram Sang (embedded platforms)"
L: linux-i2c@vger.kernel.org
W: http://i2c.wiki.kernel.org/
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/
@@ -3627,6 +3668,13 @@ F: drivers/i2c/
F: include/linux/i2c.h
F: include/linux/i2c-*.h
+I2C-TAOS-EVM DRIVER
+M: Jean Delvare
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: Documentation/i2c/busses/i2c-taos-evm
+F: drivers/i2c/busses/i2c-taos-evm.c
+
I2C-TINY-USB DRIVER
M: Till Harbaum
L: linux-i2c@vger.kernel.org
@@ -3713,7 +3761,7 @@ S: Maintained
F: drivers/platform/x86/ideapad-laptop.c
IDE/ATAPI DRIVERS
-M: Borislav Petkov
+M: Borislav Petkov
L: linux-ide@vger.kernel.org
S: Maintained
F: Documentation/cdrom/ide-cd
@@ -4240,8 +4288,8 @@ F: include/linux/lockd/
F: include/linux/sunrpc/
KERNEL VIRTUAL MACHINE (KVM)
-M: Avi Kivity
M: Marcelo Tosatti
+M: Gleb Natapov
L: kvm@vger.kernel.org
W: http://kvm.qumranet.com
S: Supported
@@ -5373,7 +5421,7 @@ S: Maintained
F: sound/drivers/opl4/
OPROFILE
-M: Robert Richter
+M: Robert Richter
L: oprofile-list@lists.sf.net
S: Maintained
F: arch/*/include/asm/oprofile*.h
@@ -7219,6 +7267,14 @@ L: linux-xtensa@linux-xtensa.org
S: Maintained
F: arch/xtensa/
+THERMAL
+M: Zhang Rui
+L: linux-pm@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
+S: Supported
+F: drivers/thermal/
+F: include/linux/thermal.h
+
THINKPAD ACPI EXTRAS DRIVER
M: Henrique de Moraes Holschuh
L: ibm-acpi-devel@lists.sourceforge.net
@@ -7896,13 +7952,6 @@ M: Roger Luethi
S: Maintained
F: drivers/net/ethernet/via/via-rhine.c
-VIAPRO SMBUS DRIVER
-M: Jean Delvare
-L: linux-i2c@vger.kernel.org
-S: Maintained
-F: Documentation/i2c/busses/i2c-viapro
-F: drivers/i2c/busses/i2c-viapro.c
-
VIA SD/MMC CARD CONTROLLER DRIVER
M: Bruce Chang
M: Harald Welte
@@ -8157,7 +8206,7 @@ F: drivers/platform/x86
X86 MCE INFRASTRUCTURE
M: Tony Luck
-M: Borislav Petkov
+M: Borislav Petkov
L: linux-edac@vger.kernel.org
S: Maintained
F: arch/x86/kernel/cpu/mcheck/*
diff --git a/trunk/Makefile b/trunk/Makefile
index a1ccf225c4e9..3d2fc460b22f 100644
--- a/trunk/Makefile
+++ b/trunk/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 7
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc7
NAME = Terrified Chipmunk
# *DOCUMENTATION*
diff --git a/trunk/arch/alpha/kernel/osf_sys.c b/trunk/arch/alpha/kernel/osf_sys.c
index 1e6956a90608..14db93e4c8a8 100644
--- a/trunk/arch/alpha/kernel/osf_sys.c
+++ b/trunk/arch/alpha/kernel/osf_sys.c
@@ -445,7 +445,7 @@ struct procfs_args {
* unhappy with OSF UFS. [CHECKME]
*/
static int
-osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags)
+osf_ufs_mount(const char *dirname, struct ufs_args __user *args, int flags)
{
int retval;
struct cdfs_args tmp;
@@ -465,7 +465,7 @@ osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags)
}
static int
-osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags)
+osf_cdfs_mount(const char *dirname, struct cdfs_args __user *args, int flags)
{
int retval;
struct cdfs_args tmp;
@@ -485,7 +485,7 @@ osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags)
}
static int
-osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags)
+osf_procfs_mount(const char *dirname, struct procfs_args __user *args, int flags)
{
struct procfs_args tmp;
diff --git a/trunk/arch/arm/boot/Makefile b/trunk/arch/arm/boot/Makefile
index f2aa09eb658e..9137df539b61 100644
--- a/trunk/arch/arm/boot/Makefile
+++ b/trunk/arch/arm/boot/Makefile
@@ -33,7 +33,7 @@ ifeq ($(CONFIG_XIP_KERNEL),y)
$(obj)/xipImage: vmlinux FORCE
$(call if_changed,objcopy)
- $(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
+ @$(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
$(obj)/Image $(obj)/zImage: FORCE
@echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)'
@@ -48,14 +48,14 @@ $(obj)/xipImage: FORCE
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
- $(kecho) ' Kernel: $@ is ready'
+ @$(kecho) ' Kernel: $@ is ready'
$(obj)/compressed/vmlinux: $(obj)/Image FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
- $(kecho) ' Kernel: $@ is ready'
+ @$(kecho) ' Kernel: $@ is ready'
endif
@@ -90,7 +90,7 @@ fi
$(obj)/uImage: $(obj)/zImage FORCE
@$(check_for_multiple_loadaddr)
$(call if_changed,uimage)
- $(kecho) ' Image $@ is ready'
+ @$(kecho) ' Image $@ is ready'
$(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
$(Q)$(MAKE) $(build)=$(obj)/bootp $@
@@ -98,7 +98,7 @@ $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
$(obj)/bootpImage: $(obj)/bootp/bootp FORCE
$(call if_changed,objcopy)
- $(kecho) ' Kernel: $@ is ready'
+ @$(kecho) ' Kernel: $@ is ready'
PHONY += initrd FORCE
initrd:
diff --git a/trunk/arch/arm/boot/dts/tegra30.dtsi b/trunk/arch/arm/boot/dts/tegra30.dtsi
index b1497c7d7d68..df7f2270fc91 100644
--- a/trunk/arch/arm/boot/dts/tegra30.dtsi
+++ b/trunk/arch/arm/boot/dts/tegra30.dtsi
@@ -73,8 +73,8 @@
pinmux: pinmux {
compatible = "nvidia,tegra30-pinmux";
- reg = <0x70000868 0xd0 /* Pad control registers */
- 0x70003000 0x3e0>; /* Mux registers */
+ reg = <0x70000868 0xd4 /* Pad control registers */
+ 0x70003000 0x3e4>; /* Mux registers */
};
serial@70006000 {
diff --git a/trunk/arch/arm/include/asm/io.h b/trunk/arch/arm/include/asm/io.h
index 35c1ed89b936..42f042ee4ada 100644
--- a/trunk/arch/arm/include/asm/io.h
+++ b/trunk/arch/arm/include/asm/io.h
@@ -64,7 +64,7 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
asm volatile("strh %1, %0"
- : "+Qo" (*(volatile u16 __force *)addr)
+ : "+Q" (*(volatile u16 __force *)addr)
: "r" (val));
}
@@ -72,7 +72,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 val;
asm volatile("ldrh %1, %0"
- : "+Qo" (*(volatile u16 __force *)addr),
+ : "+Q" (*(volatile u16 __force *)addr),
"=r" (val));
return val;
}
diff --git a/trunk/arch/arm/include/asm/sched_clock.h b/trunk/arch/arm/include/asm/sched_clock.h
index 05b8e82ec9f5..e3f757263438 100644
--- a/trunk/arch/arm/include/asm/sched_clock.h
+++ b/trunk/arch/arm/include/asm/sched_clock.h
@@ -10,7 +10,5 @@
extern void sched_clock_postinit(void);
extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
-extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
- unsigned long rate);
#endif
diff --git a/trunk/arch/arm/include/asm/vfpmacros.h b/trunk/arch/arm/include/asm/vfpmacros.h
index 6a6f1e485f41..301c1db3e99b 100644
--- a/trunk/arch/arm/include/asm/vfpmacros.h
+++ b/trunk/arch/arm/include/asm/vfpmacros.h
@@ -27,9 +27,9 @@
#if __LINUX_ARM_ARCH__ <= 6
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
- tst \tmp, #HWCAP_VFPv3D16
- ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
- addne \base, \base, #32*4 @ step over unused register space
+ tst \tmp, #HWCAP_VFPD32
+ ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
@@ -51,9 +51,9 @@
#if __LINUX_ARM_ARCH__ <= 6
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
- tst \tmp, #HWCAP_VFPv3D16
- stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
- addne \base, \base, #32*4 @ step over unused register space
+ tst \tmp, #HWCAP_VFPD32
+ stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
diff --git a/trunk/arch/arm/include/uapi/asm/hwcap.h b/trunk/arch/arm/include/uapi/asm/hwcap.h
index f254f6503cce..3688fd15a32d 100644
--- a/trunk/arch/arm/include/uapi/asm/hwcap.h
+++ b/trunk/arch/arm/include/uapi/asm/hwcap.h
@@ -18,11 +18,12 @@
#define HWCAP_THUMBEE (1 << 11)
#define HWCAP_NEON (1 << 12)
#define HWCAP_VFPv3 (1 << 13)
-#define HWCAP_VFPv3D16 (1 << 14)
+#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
#define HWCAP_TLS (1 << 15)
#define HWCAP_VFPv4 (1 << 16)
#define HWCAP_IDIVA (1 << 17)
#define HWCAP_IDIVT (1 << 18)
+#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
diff --git a/trunk/arch/arm/kernel/sched_clock.c b/trunk/arch/arm/kernel/sched_clock.c
index e21bac20d90d..fc6692e2b603 100644
--- a/trunk/arch/arm/kernel/sched_clock.c
+++ b/trunk/arch/arm/kernel/sched_clock.c
@@ -107,13 +107,6 @@ static void sched_clock_poll(unsigned long wrap_ticks)
update_sched_clock();
}
-void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
- unsigned long rate)
-{
- setup_sched_clock(read, bits, rate);
- cd.needs_suspend = true;
-}
-
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
unsigned long r, w;
@@ -189,18 +182,15 @@ void __init sched_clock_postinit(void)
static int sched_clock_suspend(void)
{
sched_clock_poll(sched_clock_timer.data);
- if (cd.needs_suspend)
- cd.suspended = true;
+ cd.suspended = true;
return 0;
}
static void sched_clock_resume(void)
{
- if (cd.needs_suspend) {
- cd.epoch_cyc = read_sched_clock();
- cd.epoch_cyc_copy = cd.epoch_cyc;
- cd.suspended = false;
- }
+ cd.epoch_cyc = read_sched_clock();
+ cd.epoch_cyc_copy = cd.epoch_cyc;
+ cd.suspended = false;
}
static struct syscore_ops sched_clock_ops = {
diff --git a/trunk/arch/arm/mach-at91/at91rm9200_devices.c b/trunk/arch/arm/mach-at91/at91rm9200_devices.c
index 1e122bcd7845..3cee0e6ea7c3 100644
--- a/trunk/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/trunk/arch/arm/mach-at91/at91rm9200_devices.c
@@ -68,7 +68,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
- if (data->overcurrent_pin[i])
+ if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}
diff --git a/trunk/arch/arm/mach-at91/at91sam9260_devices.c b/trunk/arch/arm/mach-at91/at91sam9260_devices.c
index aa1e58729885..414bd855fb0c 100644
--- a/trunk/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/trunk/arch/arm/mach-at91/at91sam9260_devices.c
@@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
- if (data->overcurrent_pin[i])
+ if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}
diff --git a/trunk/arch/arm/mach-at91/at91sam9261_devices.c b/trunk/arch/arm/mach-at91/at91sam9261_devices.c
index b9487696b7be..cd604aad8e96 100644
--- a/trunk/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/trunk/arch/arm/mach-at91/at91sam9261_devices.c
@@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
- if (data->overcurrent_pin[i])
+ if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}
diff --git a/trunk/arch/arm/mach-at91/at91sam9263_devices.c b/trunk/arch/arm/mach-at91/at91sam9263_devices.c
index cb85da2eccea..9c61e59a2104 100644
--- a/trunk/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/trunk/arch/arm/mach-at91/at91sam9263_devices.c
@@ -78,7 +78,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
- if (data->overcurrent_pin[i])
+ if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}
diff --git a/trunk/arch/arm/mach-at91/at91sam9g45_devices.c b/trunk/arch/arm/mach-at91/at91sam9g45_devices.c
index b1596072dcc2..fcd233cb33d2 100644
--- a/trunk/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/trunk/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -1841,8 +1841,8 @@ static struct resource sha_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_AESTDESSHA,
- .end = AT91SAM9G45_ID_AESTDESSHA,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};
@@ -1874,8 +1874,8 @@ static struct resource tdes_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_AESTDESSHA,
- .end = AT91SAM9G45_ID_AESTDESSHA,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};
@@ -1910,8 +1910,8 @@ static struct resource aes_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_AESTDESSHA,
- .end = AT91SAM9G45_ID_AESTDESSHA,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/trunk/arch/arm/mach-davinci/dm644x.c b/trunk/arch/arm/mach-davinci/dm644x.c
index cd0c8b1e1ecf..14e9947bad6e 100644
--- a/trunk/arch/arm/mach-davinci/dm644x.c
+++ b/trunk/arch/arm/mach-davinci/dm644x.c
@@ -713,8 +713,7 @@ static int dm644x_venc_setup_clock(enum vpbe_enc_timings_type type,
break;
case VPBE_ENC_CUSTOM_TIMINGS:
if (pclock <= 27000000) {
- v |= DM644X_VPSS_MUXSEL_PLL2_MODE |
- DM644X_VPSS_DACCLKEN;
+ v |= DM644X_VPSS_DACCLKEN;
writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL));
} else {
/*
diff --git a/trunk/arch/arm/mach-exynos/dma.c b/trunk/arch/arm/mach-exynos/dma.c
index 21d568b3b149..87e07d6fc615 100644
--- a/trunk/arch/arm/mach-exynos/dma.c
+++ b/trunk/arch/arm/mach-exynos/dma.c
@@ -275,6 +275,9 @@ static int __init exynos_dma_init(void)
exynos_pdma1_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4210_pdma1_peri);
exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
+
+ if (samsung_rev() == EXYNOS4210_REV_0)
+ exynos_mdma1_device.res.start = EXYNOS4_PA_S_MDMA1;
} else if (soc_is_exynos4212() || soc_is_exynos4412()) {
exynos_pdma0_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4212_pdma0_peri);
diff --git a/trunk/arch/arm/mach-exynos/include/mach/map.h b/trunk/arch/arm/mach-exynos/include/mach/map.h
index 8480849affb9..ed4da4544cd2 100644
--- a/trunk/arch/arm/mach-exynos/include/mach/map.h
+++ b/trunk/arch/arm/mach-exynos/include/mach/map.h
@@ -90,6 +90,7 @@
#define EXYNOS4_PA_MDMA0 0x10810000
#define EXYNOS4_PA_MDMA1 0x12850000
+#define EXYNOS4_PA_S_MDMA1 0x12840000
#define EXYNOS4_PA_PDMA0 0x12680000
#define EXYNOS4_PA_PDMA1 0x12690000
#define EXYNOS5_PA_MDMA0 0x10800000
diff --git a/trunk/arch/arm/mach-highbank/system.c b/trunk/arch/arm/mach-highbank/system.c
index 82c27230d4a9..86e37cd9376c 100644
--- a/trunk/arch/arm/mach-highbank/system.c
+++ b/trunk/arch/arm/mach-highbank/system.c
@@ -28,6 +28,7 @@ void highbank_restart(char mode, const char *cmd)
hignbank_set_pwr_soft_reset();
scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);
- cpu_do_idle();
+ while (1)
+ cpu_do_idle();
}
diff --git a/trunk/arch/arm/mach-imx/clk-gate2.c b/trunk/arch/arm/mach-imx/clk-gate2.c
index 3c1b8ff9a0a6..cc49c7ae186e 100644
--- a/trunk/arch/arm/mach-imx/clk-gate2.c
+++ b/trunk/arch/arm/mach-imx/clk-gate2.c
@@ -112,7 +112,7 @@ struct clk *clk_register_gate2(struct device *dev, const char *name,
clk = clk_register(dev, &gate->hw);
if (IS_ERR(clk))
- kfree(clk);
+ kfree(gate);
return clk;
}
diff --git a/trunk/arch/arm/mach-imx/ehci-imx25.c b/trunk/arch/arm/mach-imx/ehci-imx25.c
index 412c583a24b0..576af7446952 100644
--- a/trunk/arch/arm/mach-imx/ehci-imx25.c
+++ b/trunk/arch/arm/mach-imx/ehci-imx25.c
@@ -30,7 +30,7 @@
#define MX25_H1_SIC_SHIFT 21
#define MX25_H1_SIC_MASK (0x3 << MX25_H1_SIC_SHIFT)
#define MX25_H1_PP_BIT (1 << 18)
-#define MX25_H1_PM_BIT (1 << 8)
+#define MX25_H1_PM_BIT (1 << 16)
#define MX25_H1_IPPUE_UP_BIT (1 << 7)
#define MX25_H1_IPPUE_DOWN_BIT (1 << 6)
#define MX25_H1_TLL_BIT (1 << 5)
diff --git a/trunk/arch/arm/mach-imx/ehci-imx35.c b/trunk/arch/arm/mach-imx/ehci-imx35.c
index 779e16eb65cb..293397852e4e 100644
--- a/trunk/arch/arm/mach-imx/ehci-imx35.c
+++ b/trunk/arch/arm/mach-imx/ehci-imx35.c
@@ -30,7 +30,7 @@
#define MX35_H1_SIC_SHIFT 21
#define MX35_H1_SIC_MASK (0x3 << MX35_H1_SIC_SHIFT)
#define MX35_H1_PP_BIT (1 << 18)
-#define MX35_H1_PM_BIT (1 << 8)
+#define MX35_H1_PM_BIT (1 << 16)
#define MX35_H1_IPPUE_UP_BIT (1 << 7)
#define MX35_H1_IPPUE_DOWN_BIT (1 << 6)
#define MX35_H1_TLL_BIT (1 << 5)
diff --git a/trunk/arch/arm/mach-omap2/board-igep0020.c b/trunk/arch/arm/mach-omap2/board-igep0020.c
index 48d5e41dfbfa..378590694447 100644
--- a/trunk/arch/arm/mach-omap2/board-igep0020.c
+++ b/trunk/arch/arm/mach-omap2/board-igep0020.c
@@ -580,6 +580,11 @@ static void __init igep_wlan_bt_init(void)
} else
return;
+ /* Make sure that the GPIO pins are muxed correctly */
+ omap_mux_init_gpio(igep_wlan_bt_gpios[0].gpio, OMAP_PIN_OUTPUT);
+ omap_mux_init_gpio(igep_wlan_bt_gpios[1].gpio, OMAP_PIN_OUTPUT);
+ omap_mux_init_gpio(igep_wlan_bt_gpios[2].gpio, OMAP_PIN_OUTPUT);
+
err = gpio_request_array(igep_wlan_bt_gpios,
ARRAY_SIZE(igep_wlan_bt_gpios));
if (err) {
diff --git a/trunk/arch/arm/mach-omap2/clockdomains44xx_data.c b/trunk/arch/arm/mach-omap2/clockdomains44xx_data.c
index b56d06b48782..95192a062d5d 100644
--- a/trunk/arch/arm/mach-omap2/clockdomains44xx_data.c
+++ b/trunk/arch/arm/mach-omap2/clockdomains44xx_data.c
@@ -359,7 +359,7 @@ static struct clockdomain iss_44xx_clkdm = {
.clkdm_offs = OMAP4430_CM2_CAM_CAM_CDOFFS,
.wkdep_srcs = iss_wkup_sleep_deps,
.sleepdep_srcs = iss_wkup_sleep_deps,
- .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain l3_dss_44xx_clkdm = {
diff --git a/trunk/arch/arm/mach-omap2/common-board-devices.c b/trunk/arch/arm/mach-omap2/common-board-devices.c
index 48daac2581b4..84551f205e46 100644
--- a/trunk/arch/arm/mach-omap2/common-board-devices.c
+++ b/trunk/arch/arm/mach-omap2/common-board-devices.c
@@ -64,30 +64,36 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
struct spi_board_info *spi_bi = &ads7846_spi_board_info;
int err;
- err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
- if (err) {
- pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
- return;
- }
+ /*
+ * If a board defines get_pendown_state() function, request the pendown
+ * GPIO and set the GPIO debounce time.
+ * If a board does not define the get_pendown_state() function, then
+ * the ads7846 driver will setup the pendown GPIO itself.
+ */
+ if (board_pdata && board_pdata->get_pendown_state) {
+ err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
+ if (err) {
+ pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
+ return;
+ }
- if (gpio_debounce)
- gpio_set_debounce(gpio_pendown, gpio_debounce);
+ if (gpio_debounce)
+ gpio_set_debounce(gpio_pendown, gpio_debounce);
+
+ gpio_export(gpio_pendown, 0);
+ }
spi_bi->bus_num = bus_num;
spi_bi->irq = gpio_to_irq(gpio_pendown);
+ ads7846_config.gpio_pendown = gpio_pendown;
+
if (board_pdata) {
board_pdata->gpio_pendown = gpio_pendown;
+ board_pdata->gpio_pendown_debounce = gpio_debounce;
spi_bi->platform_data = board_pdata;
- if (board_pdata->get_pendown_state)
- gpio_export(gpio_pendown, 0);
- } else {
- ads7846_config.gpio_pendown = gpio_pendown;
}
- if (!board_pdata || (board_pdata && !board_pdata->get_pendown_state))
- gpio_free(gpio_pendown);
-
spi_register_board_info(&ads7846_spi_board_info, 1);
}
#else
diff --git a/trunk/arch/arm/mach-omap2/devices.c b/trunk/arch/arm/mach-omap2/devices.c
index cba60e05e32e..c72b5a727720 100644
--- a/trunk/arch/arm/mach-omap2/devices.c
+++ b/trunk/arch/arm/mach-omap2/devices.c
@@ -19,6 +19,7 @@
#include
#include
#include
+#include
#include
#include
@@ -613,6 +614,83 @@ static void omap_init_vout(void)
static inline void omap_init_vout(void) {}
#endif
+#if defined(CONFIG_OMAP_OCP2SCP) || defined(CONFIG_OMAP_OCP2SCP_MODULE)
+static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev)
+{
+ int cnt = 0;
+
+ while (ocp2scp_dev->drv_name != NULL) {
+ cnt++;
+ ocp2scp_dev++;
+ }
+
+ return cnt;
+}
+
+static void omap_init_ocp2scp(void)
+{
+ struct omap_hwmod *oh;
+ struct platform_device *pdev;
+ int bus_id = -1, dev_cnt = 0, i;
+ struct omap_ocp2scp_dev *ocp2scp_dev;
+ const char *oh_name, *name;
+ struct omap_ocp2scp_platform_data *pdata;
+
+ if (!cpu_is_omap44xx())
+ return;
+
+ oh_name = "ocp2scp_usb_phy";
+ name = "omap-ocp2scp";
+
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh) {
+ pr_err("%s: could not find omap_hwmod for %s\n", __func__,
+ oh_name);
+ return;
+ }
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("%s: No memory for ocp2scp pdata\n", __func__);
+ return;
+ }
+
+ ocp2scp_dev = oh->dev_attr;
+ dev_cnt = count_ocp2scp_devices(ocp2scp_dev);
+
+ if (!dev_cnt) {
+ pr_err("%s: No devices connected to ocp2scp\n", __func__);
+ kfree(pdata);
+ return;
+ }
+
+ pdata->devices = kzalloc(sizeof(struct omap_ocp2scp_dev *)
+ * dev_cnt, GFP_KERNEL);
+ if (!pdata->devices) {
+ pr_err("%s: No memory for ocp2scp pdata devices\n", __func__);
+ kfree(pdata);
+ return;
+ }
+
+ for (i = 0; i < dev_cnt; i++, ocp2scp_dev++)
+ pdata->devices[i] = ocp2scp_dev;
+
+ pdata->dev_cnt = dev_cnt;
+
+ pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(*pdata), NULL,
+ 0, false);
+ if (IS_ERR(pdev)) {
+ pr_err("Could not build omap_device for %s %s\n",
+ name, oh_name);
+ kfree(pdata->devices);
+ kfree(pdata);
+ return;
+ }
+}
+#else
+static inline void omap_init_ocp2scp(void) { }
+#endif
+
/*-------------------------------------------------------------------------*/
static int __init omap2_init_devices(void)
@@ -640,6 +718,7 @@ static int __init omap2_init_devices(void)
omap_init_sham();
omap_init_aes();
omap_init_vout();
+ omap_init_ocp2scp();
return 0;
}
diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod.c b/trunk/arch/arm/mach-omap2/omap_hwmod.c
index b969ab1d258b..87cc6d058de2 100644
--- a/trunk/arch/arm/mach-omap2/omap_hwmod.c
+++ b/trunk/arch/arm/mach-omap2/omap_hwmod.c
@@ -421,6 +421,38 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
return 0;
}
+/**
+ * _wait_softreset_complete - wait for an OCP softreset to complete
+ * @oh: struct omap_hwmod * to wait on
+ *
+ * Wait until the IP block represented by @oh reports that its OCP
+ * softreset is complete. This can be triggered by software (see
+ * _ocp_softreset()) or by hardware upon returning from off-mode (one
+ * example is HSMMC). Waits for up to MAX_MODULE_SOFTRESET_WAIT
+ * microseconds. Returns the number of microseconds waited.
+ */
+static int _wait_softreset_complete(struct omap_hwmod *oh)
+{
+ struct omap_hwmod_class_sysconfig *sysc;
+ u32 softrst_mask;
+ int c = 0;
+
+ sysc = oh->class->sysc;
+
+ if (sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
+ omap_test_timeout((omap_hwmod_read(oh, sysc->syss_offs)
+ & SYSS_RESETDONE_MASK),
+ MAX_MODULE_SOFTRESET_WAIT, c);
+ else if (sysc->sysc_flags & SYSC_HAS_RESET_STATUS) {
+ softrst_mask = (0x1 << sysc->sysc_fields->srst_shift);
+ omap_test_timeout(!(omap_hwmod_read(oh, sysc->sysc_offs)
+ & softrst_mask),
+ MAX_MODULE_SOFTRESET_WAIT, c);
+ }
+
+ return c;
+}
+
/**
* _set_dmadisable: set OCP_SYSCONFIG.DMADISABLE bit in @v
* @oh: struct omap_hwmod *
@@ -1282,6 +1314,18 @@ static void _enable_sysc(struct omap_hwmod *oh)
if (!oh->class->sysc)
return;
+ /*
+ * Wait until reset has completed, this is needed as the IP
+ * block is reset automatically by hardware in some cases
+ * (off-mode for example), and the drivers require the
+ * IP to be ready when they access it
+ */
+ if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
+ _enable_optional_clocks(oh);
+ _wait_softreset_complete(oh);
+ if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
+ _disable_optional_clocks(oh);
+
v = oh->_sysc_cache;
sf = oh->class->sysc->sysc_flags;
@@ -1804,7 +1848,7 @@ static int _am33xx_disable_module(struct omap_hwmod *oh)
*/
static int _ocp_softreset(struct omap_hwmod *oh)
{
- u32 v, softrst_mask;
+ u32 v;
int c = 0;
int ret = 0;
@@ -1834,19 +1878,7 @@ static int _ocp_softreset(struct omap_hwmod *oh)
if (oh->class->sysc->srst_udelay)
udelay(oh->class->sysc->srst_udelay);
- if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
- omap_test_timeout((omap_hwmod_read(oh,
- oh->class->sysc->syss_offs)
- & SYSS_RESETDONE_MASK),
- MAX_MODULE_SOFTRESET_WAIT, c);
- else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS) {
- softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
- omap_test_timeout(!(omap_hwmod_read(oh,
- oh->class->sysc->sysc_offs)
- & softrst_mask),
- MAX_MODULE_SOFTRESET_WAIT, c);
- }
-
+ c = _wait_softreset_complete(oh);
if (c == MAX_MODULE_SOFTRESET_WAIT)
pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
oh->name, MAX_MODULE_SOFTRESET_WAIT);
@@ -2352,6 +2384,9 @@ static int __init _setup_reset(struct omap_hwmod *oh)
if (oh->_state != _HWMOD_STATE_INITIALIZED)
return -EINVAL;
+ if (oh->flags & HWMOD_EXT_OPT_MAIN_CLK)
+ return -EPERM;
+
if (oh->rst_lines_cnt == 0) {
r = _enable(oh);
if (r) {
diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 652d0285bd6d..0b1249e00398 100644
--- a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -21,6 +21,7 @@
#include
#include
#include
+#include
#include
#include
@@ -2125,6 +2126,14 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = {
.name = "mcpdm",
.class = &omap44xx_mcpdm_hwmod_class,
.clkdm_name = "abe_clkdm",
+ /*
+ * It's suspected that the McPDM requires an off-chip main
+ * functional clock, controlled via I2C. This IP block is
+ * currently reset very early during boot, before I2C is
+ * available, so it doesn't seem that we have any choice in
+ * the kernel other than to avoid resetting it.
+ */
+ .flags = HWMOD_EXT_OPT_MAIN_CLK,
.mpu_irqs = omap44xx_mcpdm_irqs,
.sdma_reqs = omap44xx_mcpdm_sdma_reqs,
.main_clk = "mcpdm_fck",
@@ -2681,6 +2690,32 @@ static struct omap_hwmod_class omap44xx_ocp2scp_hwmod_class = {
.sysc = &omap44xx_ocp2scp_sysc,
};
+/* ocp2scp dev_attr */
+static struct resource omap44xx_usb_phy_and_pll_addrs[] = {
+ {
+ .name = "usb_phy",
+ .start = 0x4a0ad080,
+ .end = 0x4a0ae000,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* XXX: Remove this once control module driver is in place */
+ .name = "ctrl_dev",
+ .start = 0x4a002300,
+ .end = 0x4a002303,
+ .flags = IORESOURCE_MEM,
+ },
+ { }
+};
+
+static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = {
+ {
+ .drv_name = "omap-usb2",
+ .res = omap44xx_usb_phy_and_pll_addrs,
+ },
+ { }
+};
+
/* ocp2scp_usb_phy */
static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
.name = "ocp2scp_usb_phy",
@@ -2694,6 +2729,7 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
.modulemode = MODULEMODE_HWCTRL,
},
},
+ .dev_attr = ocp2scp_dev_attr,
};
/*
diff --git a/trunk/arch/arm/mach-omap2/twl-common.c b/trunk/arch/arm/mach-omap2/twl-common.c
index 635e109f5ad3..a256135d8e48 100644
--- a/trunk/arch/arm/mach-omap2/twl-common.c
+++ b/trunk/arch/arm/mach-omap2/twl-common.c
@@ -73,6 +73,7 @@ void __init omap4_pmic_init(const char *pmic_type,
{
/* PMIC part*/
omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
+ omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT);
omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data);
/* Register additional devices on i2c1 bus if needed */
@@ -366,7 +367,7 @@ static struct regulator_init_data omap4_clk32kg_idata = {
};
static struct regulator_consumer_supply omap4_vdd1_supply[] = {
- REGULATOR_SUPPLY("vcc", "mpu.0"),
+ REGULATOR_SUPPLY("vcc", "cpu0"),
};
static struct regulator_consumer_supply omap4_vdd2_supply[] = {
diff --git a/trunk/arch/arm/mach-omap2/vc.c b/trunk/arch/arm/mach-omap2/vc.c
index 880249b17012..75878c37959b 100644
--- a/trunk/arch/arm/mach-omap2/vc.c
+++ b/trunk/arch/arm/mach-omap2/vc.c
@@ -264,7 +264,7 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
if (initialized) {
if (voltdm->pmic->i2c_high_speed != i2c_high_speed)
- pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).",
+ pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).\n",
__func__, voltdm->name, i2c_high_speed);
return;
}
diff --git a/trunk/arch/arm/mach-pxa/hx4700.c b/trunk/arch/arm/mach-pxa/hx4700.c
index 5ecbd17b5641..e2c6391863fe 100644
--- a/trunk/arch/arm/mach-pxa/hx4700.c
+++ b/trunk/arch/arm/mach-pxa/hx4700.c
@@ -28,6 +28,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -556,7 +557,7 @@ static struct platform_device hx4700_lcd = {
*/
static struct platform_pwm_backlight_data backlight_data = {
- .pwm_id = 1,
+ .pwm_id = -1, /* Superseded by pwm_lookup */
.max_brightness = 200,
.dft_brightness = 100,
.pwm_period_ns = 30923,
@@ -571,6 +572,10 @@ static struct platform_device backlight = {
},
};
+static struct pwm_lookup hx4700_pwm_lookup[] = {
+ PWM_LOOKUP("pxa27x-pwm.1", 0, "pwm-backlight", NULL),
+};
+
/*
* USB "Transceiver"
*/
@@ -872,6 +877,7 @@ static void __init hx4700_init(void)
pxa_set_stuart_info(NULL);
platform_add_devices(devices, ARRAY_SIZE(devices));
+ pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup));
pxa_set_ficp_info(&ficp_info);
pxa27x_set_i2c_power_info(NULL);
diff --git a/trunk/arch/arm/mach-pxa/spitz_pm.c b/trunk/arch/arm/mach-pxa/spitz_pm.c
index 438f02fe122a..842596d4d31e 100644
--- a/trunk/arch/arm/mach-pxa/spitz_pm.c
+++ b/trunk/arch/arm/mach-pxa/spitz_pm.c
@@ -86,10 +86,7 @@ static void spitz_discharge1(int on)
gpio_set_value(SPITZ_GPIO_LED_GREEN, on);
}
-static unsigned long gpio18_config[] = {
- GPIO18_RDY,
- GPIO18_GPIO,
-};
+static unsigned long gpio18_config = GPIO18_GPIO;
static void spitz_presuspend(void)
{
@@ -112,7 +109,7 @@ static void spitz_presuspend(void)
PGSR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
PGSR2 |= GPIO_bit(SPITZ_GPIO_KEY_STROBE0);
- pxa2xx_mfp_config(&gpio18_config[0], 1);
+ pxa2xx_mfp_config(&gpio18_config, 1);
gpio_request_one(18, GPIOF_OUT_INIT_HIGH, "Unknown");
gpio_free(18);
@@ -131,7 +128,6 @@ static void spitz_presuspend(void)
static void spitz_postsuspend(void)
{
- pxa2xx_mfp_config(&gpio18_config[1], 1);
}
static int spitz_should_wakeup(unsigned int resume_on_alarm)
diff --git a/trunk/arch/arm/mm/alignment.c b/trunk/arch/arm/mm/alignment.c
index 023f443784ec..b820edaf3184 100644
--- a/trunk/arch/arm/mm/alignment.c
+++ b/trunk/arch/arm/mm/alignment.c
@@ -745,7 +745,7 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
- union offset_union offset;
+ union offset_union uninitialized_var(offset);
unsigned long instr = 0, instrptr;
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
unsigned int type;
diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c
index f076f209c7a4..58bc3e4d3bd0 100644
--- a/trunk/arch/arm/mm/dma-mapping.c
+++ b/trunk/arch/arm/mm/dma-mapping.c
@@ -1036,8 +1036,7 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
spin_unlock_irqrestore(&mapping->lock, flags);
}
-static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
- gfp_t gfp, struct dma_attrs *attrs)
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
{
struct page **pages;
int count = size >> PAGE_SHIFT;
@@ -1051,23 +1050,6 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
if (!pages)
return NULL;
- if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
- {
- unsigned long order = get_order(size);
- struct page *page;
-
- page = dma_alloc_from_contiguous(dev, count, order);
- if (!page)
- goto error;
-
- __dma_clear_buffer(page, size);
-
- for (i = 0; i < count; i++)
- pages[i] = page + i;
-
- return pages;
- }
-
while (count) {
int j, order = __fls(count);
@@ -1101,21 +1083,14 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
return NULL;
}
-static int __iommu_free_buffer(struct device *dev, struct page **pages,
- size_t size, struct dma_attrs *attrs)
+static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
{
int count = size >> PAGE_SHIFT;
int array_size = count * sizeof(struct page *);
int i;
-
- if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
- dma_release_from_contiguous(dev, pages[0], count);
- } else {
- for (i = 0; i < count; i++)
- if (pages[i])
- __free_pages(pages[i], 0);
- }
-
+ for (i = 0; i < count; i++)
+ if (pages[i])
+ __free_pages(pages[i], 0);
if (array_size <= PAGE_SIZE)
kfree(pages);
else
@@ -1277,7 +1252,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (gfp & GFP_ATOMIC)
return __iommu_alloc_atomic(dev, size, handle);
- pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
+ pages = __iommu_alloc_buffer(dev, size, gfp);
if (!pages)
return NULL;
@@ -1298,7 +1273,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
err_mapping:
__iommu_remove_mapping(dev, *handle, size);
err_buffer:
- __iommu_free_buffer(dev, pages, size, attrs);
+ __iommu_free_buffer(dev, pages, size);
return NULL;
}
@@ -1354,7 +1329,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
__iommu_remove_mapping(dev, handle, size);
- __iommu_free_buffer(dev, pages, size, attrs);
+ __iommu_free_buffer(dev, pages, size);
}
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
diff --git a/trunk/arch/arm/plat-omap/i2c.c b/trunk/arch/arm/plat-omap/i2c.c
index a5683a84c6ee..6013831a043e 100644
--- a/trunk/arch/arm/plat-omap/i2c.c
+++ b/trunk/arch/arm/plat-omap/i2c.c
@@ -26,12 +26,14 @@
#include
#include
#include
+#include
#include
#include
#include
#include
#include
+#include
#include
#define OMAP_I2C_SIZE 0x3f
@@ -127,6 +129,16 @@ static inline int omap1_i2c_add_bus(int bus_id)
#ifdef CONFIG_ARCH_OMAP2PLUS
+/*
+ * XXX This function is a temporary compatibility wrapper - only
+ * needed until the I2C driver can be converted to call
+ * omap_pm_set_max_dev_wakeup_lat() and handle a return code.
+ */
+static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t)
+{
+ omap_pm_set_max_mpu_wakeup_lat(dev, t);
+}
+
static inline int omap2_i2c_add_bus(int bus_id)
{
int l;
@@ -158,6 +170,15 @@ static inline int omap2_i2c_add_bus(int bus_id)
dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr;
pdata->flags = dev_attr->flags;
+ /*
+ * When waiting for completion of a i2c transfer, we need to
+ * set a wake up latency constraint for the MPU. This is to
+ * ensure quick enough wakeup from idle, when transfer
+ * completes.
+ * Only omap3 has support for constraints
+ */
+ if (cpu_is_omap34xx())
+ pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat;
pdev = omap_device_build(name, bus_id, oh, pdata,
sizeof(struct omap_i2c_bus_platform_data),
NULL, 0, 0);
diff --git a/trunk/arch/arm/plat-omap/include/plat/omap_hwmod.h b/trunk/arch/arm/plat-omap/include/plat/omap_hwmod.h
index b3349f7b1a2c..1db029438022 100644
--- a/trunk/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/trunk/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -443,6 +443,11 @@ struct omap_hwmod_omap4_prcm {
* in order to complete the reset. Optional clocks will be disabled
* again after the reset.
* HWMOD_16BIT_REG: Module has 16bit registers
+ * HWMOD_EXT_OPT_MAIN_CLK: The only main functional clock source for
+ * this IP block comes from an off-chip source and is not always
+ * enabled. This prevents the hwmod code from being able to
+ * enable and reset the IP block early. XXX Eventually it should
+ * be possible to query the clock framework for this information.
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@@ -453,6 +458,7 @@ struct omap_hwmod_omap4_prcm {
#define HWMOD_NO_IDLEST (1 << 6)
#define HWMOD_CONTROL_OPT_CLKS_IN_RESET (1 << 7)
#define HWMOD_16BIT_REG (1 << 8)
+#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9)
/*
* omap_hwmod._int_flags definitions
diff --git a/trunk/arch/arm/tools/Makefile b/trunk/arch/arm/tools/Makefile
index cd60a81163e9..32d05c8219dc 100644
--- a/trunk/arch/arm/tools/Makefile
+++ b/trunk/arch/arm/tools/Makefile
@@ -5,6 +5,6 @@
#
include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types
- $(kecho) ' Generating $@'
+ @$(kecho) ' Generating $@'
@mkdir -p $(dir $@)
$(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; }
diff --git a/trunk/arch/arm/vfp/vfpmodule.c b/trunk/arch/arm/vfp/vfpmodule.c
index c834b32af275..3b44e0dd0a93 100644
--- a/trunk/arch/arm/vfp/vfpmodule.c
+++ b/trunk/arch/arm/vfp/vfpmodule.c
@@ -701,11 +701,14 @@ static int __init vfp_init(void)
elf_hwcap |= HWCAP_VFPv3;
/*
- * Check for VFPv3 D16. CPUs in this configuration
- * only have 16 x 64bit registers.
+ * Check for VFPv3 D16 and VFPv4 D16. CPUs in
+ * this configuration only have 16 x 64bit
+ * registers.
*/
if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
- elf_hwcap |= HWCAP_VFPv3D16;
+ elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
+ else
+ elf_hwcap |= HWCAP_VFPD32;
}
#endif
/*
diff --git a/trunk/arch/arm/xen/enlighten.c b/trunk/arch/arm/xen/enlighten.c
index 59bcb96ac369..f57609275449 100644
--- a/trunk/arch/arm/xen/enlighten.c
+++ b/trunk/arch/arm/xen/enlighten.c
@@ -166,3 +166,14 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
*pages = NULL;
}
EXPORT_SYMBOL_GPL(free_xenballooned_pages);
+
+/* In the hypervisor.S file. */
+EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version);
+EXPORT_SYMBOL_GPL(HYPERVISOR_console_io);
+EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
+EXPORT_SYMBOL_GPL(privcmd_call);
diff --git a/trunk/arch/arm64/Kconfig b/trunk/arch/arm64/Kconfig
index ef54a59a9e89..15ac18a56c93 100644
--- a/trunk/arch/arm64/Kconfig
+++ b/trunk/arch/arm64/Kconfig
@@ -1,6 +1,7 @@
config ARM64
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select GENERIC_CLOCKEVENTS
select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IOMAP
diff --git a/trunk/arch/arm64/include/asm/elf.h b/trunk/arch/arm64/include/asm/elf.h
index cf284649dfcb..07fea290d7c1 100644
--- a/trunk/arch/arm64/include/asm/elf.h
+++ b/trunk/arch/arm64/include/asm/elf.h
@@ -25,12 +25,10 @@
#include
typedef unsigned long elf_greg_t;
-typedef unsigned long elf_freg_t[3];
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-typedef struct user_fp elf_fpregset_t;
+typedef struct user_fpsimd_state elf_fpregset_t;
#define EM_AARCH64 183
@@ -87,7 +85,6 @@ typedef struct user_fp elf_fpregset_t;
#define R_AARCH64_MOVW_PREL_G2_NC 292
#define R_AARCH64_MOVW_PREL_G3 293
-
/*
* These are used to set parameters in the core dumps.
*/
diff --git a/trunk/arch/arm64/include/asm/fpsimd.h b/trunk/arch/arm64/include/asm/fpsimd.h
index b42fab9f62a9..c43b4ac13008 100644
--- a/trunk/arch/arm64/include/asm/fpsimd.h
+++ b/trunk/arch/arm64/include/asm/fpsimd.h
@@ -25,9 +25,8 @@
* - FPSR and FPCR
* - 32 128-bit data registers
*
- * Note that user_fp forms a prefix of this structure, which is relied
- * upon in the ptrace FP/SIMD accessors. struct user_fpsimd_state must
- * form a prefix of struct fpsimd_state.
+ * Note that user_fpsimd forms a prefix of this structure, which is
+ * relied upon in the ptrace FP/SIMD accessors.
*/
struct fpsimd_state {
union {
diff --git a/trunk/arch/arm64/include/asm/io.h b/trunk/arch/arm64/include/asm/io.h
index 74a2a7d304a9..d2f05a608274 100644
--- a/trunk/arch/arm64/include/asm/io.h
+++ b/trunk/arch/arm64/include/asm/io.h
@@ -114,7 +114,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* I/O port access primitives.
*/
#define IO_SPACE_LIMIT 0xffff
-#define PCI_IOBASE ((void __iomem *)0xffffffbbfffe0000UL)
+#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M))
static inline u8 inb(unsigned long addr)
{
@@ -222,12 +222,12 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
extern void __iounmap(volatile void __iomem *addr);
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
-#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
-#define ioremap(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE)
-#define ioremap_nocache(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE)
-#define ioremap_wc(addr, size) __ioremap((addr), (size), PROT_NORMAL_NC)
+#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
+#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define iounmap __iounmap
#define ARCH_HAS_IOREMAP_WC
diff --git a/trunk/arch/arm64/include/asm/pgtable-hwdef.h b/trunk/arch/arm64/include/asm/pgtable-hwdef.h
index 0f3b4581d925..75fd13d289b9 100644
--- a/trunk/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/trunk/arch/arm64/include/asm/pgtable-hwdef.h
@@ -38,7 +38,8 @@
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
-#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
+#define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54)
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
@@ -57,7 +58,8 @@
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
#define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */
-#define PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
+#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
+#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
diff --git a/trunk/arch/arm64/include/asm/pgtable.h b/trunk/arch/arm64/include/asm/pgtable.h
index 8960239be722..14aba2db6776 100644
--- a/trunk/arch/arm64/include/asm/pgtable.h
+++ b/trunk/arch/arm64/include/asm/pgtable.h
@@ -62,23 +62,23 @@ extern pgprot_t pgprot_default;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
-#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN)
-#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG)
-#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
-#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
-#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_XN | PTE_DIRTY)
-#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_DIRTY)
-
-#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN)
-#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG)
-#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
-#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
-#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
+#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
+#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
+#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
+
+#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
+#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#endif /* __ASSEMBLY__ */
@@ -130,10 +130,10 @@ extern struct page *empty_zero_page;
#define pte_young(pte) (pte_val(pte) & PTE_AF)
#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
-#define pte_exec(pte) (!(pte_val(pte) & PTE_XN))
+#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_present_exec_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_XN)) == \
+ ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \
(PTE_VALID | PTE_USER))
#define PTE_BIT_FUNC(fn,op) \
@@ -262,7 +262,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- const pteval_t mask = PTE_USER | PTE_XN | PTE_RDONLY;
+ const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
diff --git a/trunk/arch/arm64/include/asm/processor.h b/trunk/arch/arm64/include/asm/processor.h
index 5d810044feda..77f696c14339 100644
--- a/trunk/arch/arm64/include/asm/processor.h
+++ b/trunk/arch/arm64/include/asm/processor.h
@@ -43,6 +43,8 @@
#else
#define STACK_TOP STACK_TOP_MAX
#endif /* CONFIG_COMPAT */
+
+#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK
#endif /* __KERNEL__ */
struct debug_info {
diff --git a/trunk/arch/arm64/include/asm/unistd.h b/trunk/arch/arm64/include/asm/unistd.h
index 63f853f8b718..68aff2816e86 100644
--- a/trunk/arch/arm64/include/asm/unistd.h
+++ b/trunk/arch/arm64/include/asm/unistd.h
@@ -14,7 +14,6 @@
* along with this program. If not, see .
*/
#ifdef CONFIG_COMPAT
-#define __ARCH_WANT_COMPAT_IPC_PARSE_VERSION
#define __ARCH_WANT_COMPAT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
diff --git a/trunk/arch/arm64/kernel/perf_event.c b/trunk/arch/arm64/kernel/perf_event.c
index ecbf2d81ec5c..c76c7241125b 100644
--- a/trunk/arch/arm64/kernel/perf_event.c
+++ b/trunk/arch/arm64/kernel/perf_event.c
@@ -613,17 +613,11 @@ enum armv8_pmuv3_perf_types {
ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
-
- /*
- * This isn't an architected event.
- * We detect this event number and use the cycle counter instead.
- */
- ARMV8_PMUV3_PERFCTR_CPU_CYCLES = 0xFF,
};
/* PMUv3 HW events mapping. */
static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
@@ -1106,7 +1100,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT;
/* Always place a cycle counter into the cycle counter. */
- if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
+ if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
diff --git a/trunk/arch/arm64/kernel/process.c b/trunk/arch/arm64/kernel/process.c
index f22965ea1cfc..e04cebdbb47f 100644
--- a/trunk/arch/arm64/kernel/process.c
+++ b/trunk/arch/arm64/kernel/process.c
@@ -309,24 +309,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
return last;
}
-/*
- * Fill in the task's elfregs structure for a core dump.
- */
-int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
-{
- elf_core_copy_regs(elfregs, task_pt_regs(t));
- return 1;
-}
-
-/*
- * fill in the fpe structure for a core dump...
- */
-int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
-{
- return 0;
-}
-EXPORT_SYMBOL(dump_fpu);
-
/*
* Shuffle the argument into the correct register before calling the
* thread function. x1 is the thread argument, x2 is the pointer to
diff --git a/trunk/arch/arm64/kernel/smp.c b/trunk/arch/arm64/kernel/smp.c
index 226b6bf6e9c2..538300f2273d 100644
--- a/trunk/arch/arm64/kernel/smp.c
+++ b/trunk/arch/arm64/kernel/smp.c
@@ -211,8 +211,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* before we continue.
*/
set_cpu_online(cpu, true);
- while (!cpu_active(cpu))
- cpu_relax();
+ complete(&cpu_running);
/*
* OK, it's off to the idle thread for us
diff --git a/trunk/arch/arm64/mm/init.c b/trunk/arch/arm64/mm/init.c
index efbf7df05d3f..4cd28931dba9 100644
--- a/trunk/arch/arm64/mm/init.c
+++ b/trunk/arch/arm64/mm/init.c
@@ -80,7 +80,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
#ifdef CONFIG_ZONE_DMA32
/* 4GB maximum for 32-bit only capable devices */
max_dma32 = min(max, MAX_DMA32_PFN);
- zone_size[ZONE_DMA32] = max_dma32 - min;
+ zone_size[ZONE_DMA32] = max(min, max_dma32) - min;
#endif
zone_size[ZONE_NORMAL] = max - max_dma32;
diff --git a/trunk/arch/h8300/include/asm/cache.h b/trunk/arch/h8300/include/asm/cache.h
index c6350283649d..05887a1d80e5 100644
--- a/trunk/arch/h8300/include/asm/cache.h
+++ b/trunk/arch/h8300/include/asm/cache.h
@@ -2,7 +2,8 @@
#define __ARCH_H8300_CACHE_H
/* bytes per L1 cache line */
-#define L1_CACHE_BYTES 4
+#define L1_CACHE_SHIFT 2
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/* m68k-elf-gcc 2.95.2 doesn't like these */
diff --git a/trunk/arch/ia64/mm/init.c b/trunk/arch/ia64/mm/init.c
index acd5b68e8871..082e383c1b6f 100644
--- a/trunk/arch/ia64/mm/init.c
+++ b/trunk/arch/ia64/mm/init.c
@@ -637,7 +637,6 @@ mem_init (void)
high_memory = __va(max_low_pfn * PAGE_SIZE);
- reset_zone_present_pages();
for_each_online_pgdat(pgdat)
if (pgdat->bdata->node_bootmem_map)
totalram_pages += free_all_bootmem_node(pgdat);
diff --git a/trunk/arch/m68k/include/asm/signal.h b/trunk/arch/m68k/include/asm/signal.h
index 67e489d8d1bd..2df26b57c26a 100644
--- a/trunk/arch/m68k/include/asm/signal.h
+++ b/trunk/arch/m68k/include/asm/signal.h
@@ -41,7 +41,7 @@ struct k_sigaction {
static inline void sigaddset(sigset_t *set, int _sig)
{
asm ("bfset %0{%1,#1}"
- : "+od" (*set)
+ : "+o" (*set)
: "id" ((_sig - 1) ^ 31)
: "cc");
}
@@ -49,7 +49,7 @@ static inline void sigaddset(sigset_t *set, int _sig)
static inline void sigdelset(sigset_t *set, int _sig)
{
asm ("bfclr %0{%1,#1}"
- : "+od" (*set)
+ : "+o" (*set)
: "id" ((_sig - 1) ^ 31)
: "cc");
}
@@ -65,7 +65,7 @@ static inline int __gen_sigismember(sigset_t *set, int _sig)
int ret;
asm ("bfextu %1{%2,#1},%0"
: "=d" (ret)
- : "od" (*set), "id" ((_sig-1) ^ 31)
+ : "o" (*set), "id" ((_sig-1) ^ 31)
: "cc");
return ret;
}
diff --git a/trunk/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/trunk/arch/mips/cavium-octeon/executive/cvmx-l2c.c
index d38246e33ddb..9f883bf76953 100644
--- a/trunk/arch/mips/cavium-octeon/executive/cvmx-l2c.c
+++ b/trunk/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -30,6 +30,7 @@
* measurement, and debugging facilities.
*/
+#include
#include
#include
#include
diff --git a/trunk/arch/mips/fw/arc/misc.c b/trunk/arch/mips/fw/arc/misc.c
index 7cf80ca2c1d2..f9f5307434c2 100644
--- a/trunk/arch/mips/fw/arc/misc.c
+++ b/trunk/arch/mips/fw/arc/misc.c
@@ -11,6 +11,7 @@
*/
#include
#include
+#include
#include
diff --git a/trunk/arch/mips/include/asm/bitops.h b/trunk/arch/mips/include/asm/bitops.h
index 82ad35ce2b45..46ac73abd5ee 100644
--- a/trunk/arch/mips/include/asm/bitops.h
+++ b/trunk/arch/mips/include/asm/bitops.h
@@ -14,7 +14,6 @@
#endif
#include
-#include
#include
#include
#include /* sigh ... */
@@ -44,6 +43,24 @@
#define smp_mb__before_clear_bit() smp_mb__before_llsc()
#define smp_mb__after_clear_bit() smp_llsc_mb()
+
+/*
+ * These are the "slower" versions of the functions and are in bitops.c.
+ * These functions call raw_local_irq_{save,restore}().
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
+int __mips_test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr);
+int __mips_test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr);
+int __mips_test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *addr);
+int __mips_test_and_change_bit(unsigned long nr,
+ volatile unsigned long *addr);
+
+
/*
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
@@ -57,7 +74,7 @@
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- *a |= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ __mips_set_bit(nr, addr);
}
/*
@@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)));
} while (unlikely(!temp));
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- *a &= ~mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ __mips_clear_bit(nr, addr);
}
/*
@@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
*/
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- *a ^= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ __mips_change_bit(nr, addr);
}
/*
@@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a);
- *a |= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ res = __mips_test_and_set_bit(nr, addr);
smp_llsc_mb();
@@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr,
static inline int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long res;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a);
- *a |= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ res = __mips_test_and_set_bit_lock(nr, addr);
smp_llsc_mb();
@@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a);
- *a &= ~mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ res = __mips_test_and_clear_bit(nr, addr);
smp_llsc_mb();
@@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr,
static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
- unsigned short bit = nr & SZLONG_MASK;
+ int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
- } else {
- volatile unsigned long *a = addr;
- unsigned long mask;
- unsigned long flags;
-
- a += nr >> SZLONG_LOG;
- mask = 1UL << bit;
- raw_local_irq_save(flags);
- res = (mask & *a);
- *a ^= mask;
- raw_local_irq_restore(flags);
- }
+ } else
+ res = __mips_test_and_change_bit(nr, addr);
smp_llsc_mb();
diff --git a/trunk/arch/mips/include/asm/compat.h b/trunk/arch/mips/include/asm/compat.h
index 58277e0e9cd4..3c5d1464b7bd 100644
--- a/trunk/arch/mips/include/asm/compat.h
+++ b/trunk/arch/mips/include/asm/compat.h
@@ -290,7 +290,7 @@ struct compat_shmid64_ds {
static inline int is_compat_task(void)
{
- return test_thread_flag(TIF_32BIT);
+ return test_thread_flag(TIF_32BIT_ADDR);
}
#endif /* _ASM_COMPAT_H */
diff --git a/trunk/arch/mips/include/asm/io.h b/trunk/arch/mips/include/asm/io.h
index 29d9c23c20c7..ff2e0345e013 100644
--- a/trunk/arch/mips/include/asm/io.h
+++ b/trunk/arch/mips/include/asm/io.h
@@ -15,6 +15,7 @@
#include
#include
#include
+#include
#include
#include
diff --git a/trunk/arch/mips/include/asm/irqflags.h b/trunk/arch/mips/include/asm/irqflags.h
index 309cbcd6909c..9f3384c789d7 100644
--- a/trunk/arch/mips/include/asm/irqflags.h
+++ b/trunk/arch/mips/include/asm/irqflags.h
@@ -16,83 +16,13 @@
#include
#include
-__asm__(
- " .macro arch_local_irq_enable \n"
- " .set push \n"
- " .set reorder \n"
- " .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
- " ori $1, 0x400 \n"
- " xori $1, 0x400 \n"
- " mtc0 $1, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
- " ei \n"
-#else
- " mfc0 $1,$12 \n"
- " ori $1,0x1f \n"
- " xori $1,0x1e \n"
- " mtc0 $1,$12 \n"
-#endif
- " irq_enable_hazard \n"
- " .set pop \n"
- " .endm");
+#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC kernel needs to do a software replay of queued
- * IPIs, at the cost of call overhead on each local_irq_enable()
- */
- smtc_ipi_replay();
-#endif
- __asm__ __volatile__(
- "arch_local_irq_enable"
- : /* no outputs */
- : /* no inputs */
- : "memory");
-}
-
-
-/*
- * For cli() we have to insert nops to make sure that the new value
- * has actually arrived in the status register before the end of this
- * macro.
- * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
- * no nops at all.
- */
-/*
- * For TX49, operating only IE bit is not enough.
- *
- * If mfc0 $12 follows store and the mfc0 is last instruction of a
- * page and fetching the next instruction causes TLB miss, the result
- * of the mfc0 might wrongly contain EXL bit.
- *
- * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
- *
- * Workaround: mask EXL bit of the result or place a nop before mfc0.
- */
__asm__(
" .macro arch_local_irq_disable\n"
" .set push \n"
" .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 $1, $2, 1 \n"
- " ori $1, 0x400 \n"
- " .set noreorder \n"
- " mtc0 $1, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
" di \n"
-#else
- " mfc0 $1,$12 \n"
- " ori $1,0x1f \n"
- " xori $1,0x1f \n"
- " .set noreorder \n"
- " mtc0 $1,$12 \n"
-#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
@@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void)
: "memory");
}
-__asm__(
- " .macro arch_local_save_flags flags \n"
- " .set push \n"
- " .set reorder \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 \\flags, $2, 1 \n"
-#else
- " mfc0 \\flags, $12 \n"
-#endif
- " .set pop \n"
- " .endm \n");
-
-static inline unsigned long arch_local_save_flags(void)
-{
- unsigned long flags;
- asm volatile("arch_local_save_flags %0" : "=r" (flags));
- return flags;
-}
__asm__(
" .macro arch_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 \\result, $2, 1 \n"
- " ori $1, \\result, 0x400 \n"
- " .set noreorder \n"
- " mtc0 $1, $2, 1 \n"
- " andi \\result, \\result, 0x400 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
" di \\result \n"
" andi \\result, 1 \n"
-#else
- " mfc0 \\result, $12 \n"
- " ori $1, \\result, 0x1f \n"
- " xori $1, 0x1f \n"
- " .set noreorder \n"
- " mtc0 $1, $12 \n"
-#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
@@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void)
return flags;
}
+
__asm__(
" .macro arch_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- "mfc0 $1, $2, 1 \n"
- "andi \\flags, 0x400 \n"
- "ori $1, 0x400 \n"
- "xori $1, 0x400 \n"
- "or \\flags, $1 \n"
- "mtc0 \\flags, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+#if defined(CONFIG_IRQ_CPU)
/*
* Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1.
*/
" beqz \\flags, 1f \n"
- " di \n"
+ " di \n"
" ei \n"
"1: \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#else
/*
* Fast, dangerous. Life is fun, life is good.
*/
" mfc0 $1, $12 \n"
" ins $1, \\flags, 0, 1 \n"
" mtc0 $1, $12 \n"
-#else
- " mfc0 $1, $12 \n"
- " andi \\flags, 1 \n"
- " ori $1, 0x1f \n"
- " xori $1, 0x1f \n"
- " or \\flags, $1 \n"
- " mtc0 \\flags, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
-
static inline void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC kernel needs to do a software replay of queued
- * IPIs, at the cost of branch and call overhead on each
- * local_irq_restore()
- */
- if (unlikely(!(flags & 0x0400)))
- smtc_ipi_replay();
-#endif
-
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
@@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags)
: "0" (flags)
: "memory");
}
+#else
+/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
+void arch_local_irq_disable(void);
+unsigned long arch_local_irq_save(void);
+void arch_local_irq_restore(unsigned long flags);
+void __arch_local_irq_restore(unsigned long flags);
+#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
+
+
+__asm__(
+ " .macro arch_local_irq_enable \n"
+ " .set push \n"
+ " .set reorder \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
+ " ori $1, 0x400 \n"
+ " xori $1, 0x400 \n"
+ " mtc0 $1, $2, 1 \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+ " ei \n"
+#else
+ " mfc0 $1,$12 \n"
+ " ori $1,0x1f \n"
+ " xori $1,0x1e \n"
+ " mtc0 $1,$12 \n"
+#endif
+ " irq_enable_hazard \n"
+ " .set pop \n"
+ " .endm");
+
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC kernel needs to do a software replay of queued
+ * IPIs, at the cost of call overhead on each local_irq_enable()
+ */
+ smtc_ipi_replay();
+#endif
+ __asm__ __volatile__(
+ "arch_local_irq_enable"
+ : /* no outputs */
+ : /* no inputs */
+ : "memory");
+}
+
+
+__asm__(
+ " .macro arch_local_save_flags flags \n"
+ " .set push \n"
+ " .set reorder \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 \\flags, $2, 1 \n"
+#else
+ " mfc0 \\flags, $12 \n"
+#endif
+ " .set pop \n"
+ " .endm \n");
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+ asm volatile("arch_local_save_flags %0" : "=r" (flags));
+ return flags;
+}
+
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
@@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
#endif
}
-#endif
+#endif /* #ifndef __ASSEMBLY__ */
/*
* Do the CPU's IRQ-state tracing from assembly code.
diff --git a/trunk/arch/mips/include/asm/thread_info.h b/trunk/arch/mips/include/asm/thread_info.h
index 8debe9e91754..18806a52061c 100644
--- a/trunk/arch/mips/include/asm/thread_info.h
+++ b/trunk/arch/mips/include/asm/thread_info.h
@@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
-#ifdef CONFIG_MIPS32_O32
-#define TIF_32BIT TIF_32BIT_REGS
-#elif defined(CONFIG_MIPS32_N32)
-#define TIF_32BIT _TIF_32BIT_ADDR
-#endif /* CONFIG_MIPS32_O32 */
-
#define _TIF_SYSCALL_TRACE (1<addr + prev->size == start && prev->type == type) {
- prev->size += size;
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ struct boot_mem_map_entry *entry = boot_mem_map.map + i;
+ unsigned long top;
+
+ if (entry->type != type)
+ continue;
+
+ if (start + size < entry->addr)
+ continue; /* no overlap */
+
+ if (entry->addr + entry->size < start)
+ continue; /* no overlap */
+
+ top = max(entry->addr + entry->size, start + size);
+ entry->addr = min(entry->addr, start);
+ entry->size = top - entry->addr;
+
return;
}
- if (x == BOOT_MEM_MAP_MAX) {
+ if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
pr_err("Ooops! Too many entries in the memory map!\n");
return;
}
diff --git a/trunk/arch/mips/lib/Makefile b/trunk/arch/mips/lib/Makefile
index c4a82e841c73..eeddc58802e1 100644
--- a/trunk/arch/mips/lib/Makefile
+++ b/trunk/arch/mips/lib/Makefile
@@ -2,8 +2,9 @@
# Makefile for MIPS-specific library files..
#
-lib-y += csum_partial.o delay.o memcpy.o memset.o \
- strlen_user.o strncpy_user.o strnlen_user.o uncached.o
+lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
+ mips-atomic.o strlen_user.o strncpy_user.o \
+ strnlen_user.o uncached.o
obj-y += iomap.o
obj-$(CONFIG_PCI) += iomap-pci.o
diff --git a/trunk/arch/mips/lib/bitops.c b/trunk/arch/mips/lib/bitops.c
new file mode 100644
index 000000000000..239a9c957b02
--- /dev/null
+++ b/trunk/arch/mips/lib/bitops.c
@@ -0,0 +1,179 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include
+#include
+#include
+
+
+/**
+ * __mips_set_bit - Atomically set a bit in memory. This is called by
+ * set_bit() if it cannot find a faster solution.
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ */
+void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ *a |= mask;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_set_bit);
+
+
+/**
+ * __mips_clear_bit - Clears a bit in memory. This is called by clear_bit() if
+ * it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ *a &= ~mask;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_clear_bit);
+
+
+/**
+ * __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
+ * if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ */
+void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ *a ^= mask;
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__mips_change_bit);
+
+
+/**
+ * __mips_test_and_set_bit - Set a bit and return its old value. This is
+ * called by test_and_set_bit() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+ unsigned long res;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a);
+ *a |= mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit);
+
+
+/**
+ * __mips_test_and_set_bit_lock - Set a bit and return its old value. This is
+ * called by test_and_set_bit_lock() if it cannot find a faster solution.
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+int __mips_test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+ unsigned long res;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a);
+ *a |= mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
+
+
+/**
+ * __mips_test_and_clear_bit - Clear a bit and return its old value. This is
+ * called by test_and_clear_bit() if it cannot find a faster solution.
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ */
+int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+ unsigned long res;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a);
+ *a &= ~mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_clear_bit);
+
+
+/**
+ * __mips_test_and_change_bit - Change a bit and return its old value. This is
+ * called by test_and_change_bit() if it cannot find a faster solution.
+ * @nr: Bit to change
+ * @addr: Address to count from
+ */
+int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ volatile unsigned long *a = addr;
+ unsigned bit = nr & SZLONG_MASK;
+ unsigned long mask;
+ unsigned long flags;
+ unsigned long res;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a);
+ *a ^= mask;
+ raw_local_irq_restore(flags);
+ return res;
+}
+EXPORT_SYMBOL(__mips_test_and_change_bit);
diff --git a/trunk/arch/mips/lib/mips-atomic.c b/trunk/arch/mips/lib/mips-atomic.c
new file mode 100644
index 000000000000..cd160be3ce4d
--- /dev/null
+++ b/trunk/arch/mips/lib/mips-atomic.c
@@ -0,0 +1,176 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1996 by Paul M. Antoine
+ * Copyright (C) 1999 Silicon Graphics
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#include
+#include
+#include
+#include
+#include
+
+#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
+
+/*
+ * For cli() we have to insert nops to make sure that the new value
+ * has actually arrived in the status register before the end of this
+ * macro.
+ * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
+ * no nops at all.
+ */
+/*
+ * For TX49, operating only IE bit is not enough.
+ *
+ * If mfc0 $12 follows store and the mfc0 is last instruction of a
+ * page and fetching the next instruction causes TLB miss, the result
+ * of the mfc0 might wrongly contain EXL bit.
+ *
+ * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
+ *
+ * Workaround: mask EXL bit of the result or place a nop before mfc0.
+ */
+__asm__(
+ " .macro arch_local_irq_disable\n"
+ " .set push \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 $1, $2, 1 \n"
+ " ori $1, 0x400 \n"
+ " .set noreorder \n"
+ " mtc0 $1, $2, 1 \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+ /* see irqflags.h for inline function */
+#else
+ " mfc0 $1,$12 \n"
+ " ori $1,0x1f \n"
+ " xori $1,0x1f \n"
+ " .set noreorder \n"
+ " mtc0 $1,$12 \n"
+#endif
+ " irq_disable_hazard \n"
+ " .set pop \n"
+ " .endm \n");
+
+notrace void arch_local_irq_disable(void)
+{
+ preempt_disable();
+ __asm__ __volatile__(
+ "arch_local_irq_disable"
+ : /* no outputs */
+ : /* no inputs */
+ : "memory");
+ preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_disable);
+
+
+__asm__(
+ " .macro arch_local_irq_save result \n"
+ " .set push \n"
+ " .set reorder \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ " mfc0 \\result, $2, 1 \n"
+ " ori $1, \\result, 0x400 \n"
+ " .set noreorder \n"
+ " mtc0 $1, $2, 1 \n"
+ " andi \\result, \\result, 0x400 \n"
+#elif defined(CONFIG_CPU_MIPSR2)
+ /* see irqflags.h for inline function */
+#else
+ " mfc0 \\result, $12 \n"
+ " ori $1, \\result, 0x1f \n"
+ " xori $1, 0x1f \n"
+ " .set noreorder \n"
+ " mtc0 $1, $12 \n"
+#endif
+ " irq_disable_hazard \n"
+ " .set pop \n"
+ " .endm \n");
+
+notrace unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+ preempt_disable();
+ asm volatile("arch_local_irq_save\t%0"
+ : "=r" (flags)
+ : /* no inputs */
+ : "memory");
+ preempt_enable();
+ return flags;
+}
+EXPORT_SYMBOL(arch_local_irq_save);
+
+
+__asm__(
+ " .macro arch_local_irq_restore flags \n"
+ " .set push \n"
+ " .set noreorder \n"
+ " .set noat \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+ "mfc0 $1, $2, 1 \n"
+ "andi \\flags, 0x400 \n"
+ "ori $1, 0x400 \n"
+ "xori $1, 0x400 \n"
+ "or \\flags, $1 \n"
+ "mtc0 \\flags, $2, 1 \n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+ /* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+ /* see irqflags.h for inline function */
+#else
+ " mfc0 $1, $12 \n"
+ " andi \\flags, 1 \n"
+ " ori $1, 0x1f \n"
+ " xori $1, 0x1f \n"
+ " or \\flags, $1 \n"
+ " mtc0 \\flags, $12 \n"
+#endif
+ " irq_disable_hazard \n"
+ " .set pop \n"
+ " .endm \n");
+
+notrace void arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC kernel needs to do a software replay of queued
+ * IPIs, at the cost of branch and call overhead on each
+ * local_irq_restore()
+ */
+ if (unlikely(!(flags & 0x0400)))
+ smtc_ipi_replay();
+#endif
+ preempt_disable();
+ __asm__ __volatile__(
+ "arch_local_irq_restore\t%0"
+ : "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
+ preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+
+notrace void __arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
+
+ preempt_disable();
+ __asm__ __volatile__(
+ "arch_local_irq_restore\t%0"
+ : "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
+ preempt_enable();
+}
+EXPORT_SYMBOL(__arch_local_irq_restore);
+
+#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */
diff --git a/trunk/arch/mips/mti-malta/malta-platform.c b/trunk/arch/mips/mti-malta/malta-platform.c
index 80562b81f0f2..74732177851c 100644
--- a/trunk/arch/mips/mti-malta/malta-platform.c
+++ b/trunk/arch/mips/mti-malta/malta-platform.c
@@ -29,6 +29,7 @@
#include
#include
#include
+#include
#include
#define SMC_PORT(base, int) \
@@ -48,7 +49,7 @@ static struct plat_serial8250_port uart8250_data[] = {
SMC_PORT(0x2F8, 3),
{
.mapbase = 0x1f000900, /* The CBUS UART */
- .irq = MIPS_CPU_IRQ_BASE + 2,
+ .irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
.uartclk = 3686400, /* Twice the usual clk! */
.iotype = UPIO_MEM32,
.flags = CBUS_UART_FLAGS,
diff --git a/trunk/arch/parisc/kernel/signal32.c b/trunk/arch/parisc/kernel/signal32.c
index fd49aeda9eb8..5dede04f2f3e 100644
--- a/trunk/arch/parisc/kernel/signal32.c
+++ b/trunk/arch/parisc/kernel/signal32.c
@@ -65,7 +65,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
{
compat_sigset_t s;
- if (sz != sizeof *set) panic("put_sigset32()");
+ if (sz != sizeof *set)
+ return -EINVAL;
sigset_64to32(&s, set);
return copy_to_user(up, &s, sizeof s);
@@ -77,7 +78,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
compat_sigset_t s;
int r;
- if (sz != sizeof *set) panic("put_sigset32()");
+ if (sz != sizeof *set)
+ return -EINVAL;
if ((r = copy_from_user(&s, up, sz)) == 0) {
sigset_32to64(set, &s);
diff --git a/trunk/arch/parisc/kernel/sys_parisc.c b/trunk/arch/parisc/kernel/sys_parisc.c
index 7426e40699bd..f76c10863c62 100644
--- a/trunk/arch/parisc/kernel/sys_parisc.c
+++ b/trunk/arch/parisc/kernel/sys_parisc.c
@@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping,
struct vm_area_struct *vma;
int offset = mapping ? get_offset(mapping) : 0;
+ offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
+
addr = DCACHE_ALIGN(addr - offset) + offset;
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
diff --git a/trunk/arch/powerpc/boot/dts/mpc5200b.dtsi b/trunk/arch/powerpc/boot/dts/mpc5200b.dtsi
index 7ab286ab5300..39ed65a44c5f 100644
--- a/trunk/arch/powerpc/boot/dts/mpc5200b.dtsi
+++ b/trunk/arch/powerpc/boot/dts/mpc5200b.dtsi
@@ -231,6 +231,12 @@
interrupts = <2 7 0>;
};
+ sclpc@3c00 {
+ compatible = "fsl,mpc5200-lpbfifo";
+ reg = <0x3c00 0x60>;
+ interrupts = <2 23 0>;
+ };
+
i2c@3d00 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/trunk/arch/powerpc/boot/dts/o2d.dtsi b/trunk/arch/powerpc/boot/dts/o2d.dtsi
index 3444eb8f0ade..24f668039295 100644
--- a/trunk/arch/powerpc/boot/dts/o2d.dtsi
+++ b/trunk/arch/powerpc/boot/dts/o2d.dtsi
@@ -86,12 +86,6 @@
reg = <0>;
};
};
-
- sclpc@3c00 {
- compatible = "fsl,mpc5200-lpbfifo";
- reg = <0x3c00 0x60>;
- interrupts = <3 23 0>;
- };
};
localbus {
diff --git a/trunk/arch/powerpc/boot/dts/pcm030.dts b/trunk/arch/powerpc/boot/dts/pcm030.dts
index 9e354997eb7e..96512c058033 100644
--- a/trunk/arch/powerpc/boot/dts/pcm030.dts
+++ b/trunk/arch/powerpc/boot/dts/pcm030.dts
@@ -59,7 +59,7 @@
#gpio-cells = <2>;
};
- psc@2000 { /* PSC1 in ac97 mode */
+ audioplatform: psc@2000 { /* PSC1 in ac97 mode */
compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97";
cell-index = <0>;
};
@@ -134,4 +134,9 @@
localbus {
status = "disabled";
};
+
+ sound {
+ compatible = "phytec,pcm030-audio-fabric";
+ asoc-platform = <&audioplatform>;
+ };
};
diff --git a/trunk/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/trunk/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 8520b58a5e9a..b89ef65392dc 100644
--- a/trunk/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/trunk/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -372,10 +372,11 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
case MPC52xx_IRQ_L1_MAIN: irqchip = &mpc52xx_main_irqchip; break;
case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
- default:
- pr_err("%s: invalid irq: virq=%i, l1=%i, l2=%i\n",
- __func__, virq, l1irq, l2irq);
- return -EINVAL;
+ case MPC52xx_IRQ_L1_CRIT:
+ pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n",
+ __func__, l2irq);
+ irq_set_chip(virq, &no_irq_chip);
+ return 0;
}
irq_set_chip_and_handler(virq, irqchip, handle_level_irq);
diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_pe.c b/trunk/arch/powerpc/platforms/pseries/eeh_pe.c
index 797cd181dc3f..d16c8ded1084 100644
--- a/trunk/arch/powerpc/platforms/pseries/eeh_pe.c
+++ b/trunk/arch/powerpc/platforms/pseries/eeh_pe.c
@@ -449,7 +449,7 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
if (list_empty(&pe->edevs)) {
cnt = 0;
list_for_each_entry(child, &pe->child_list, child) {
- if (!(pe->type & EEH_PE_INVALID)) {
+ if (!(child->type & EEH_PE_INVALID)) {
cnt++;
break;
}
diff --git a/trunk/arch/powerpc/platforms/pseries/msi.c b/trunk/arch/powerpc/platforms/pseries/msi.c
index d19f4977c834..e5b084723131 100644
--- a/trunk/arch/powerpc/platforms/pseries/msi.c
+++ b/trunk/arch/powerpc/platforms/pseries/msi.c
@@ -220,7 +220,8 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
/* Get the top level device in the PE */
edev = of_node_to_eeh_dev(dn);
- edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
+ if (edev->pe)
+ edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
dn = eeh_dev_to_of_node(edev);
if (!dn)
return NULL;
diff --git a/trunk/arch/s390/Kconfig b/trunk/arch/s390/Kconfig
index 5dba755a43e6..d385f396dfee 100644
--- a/trunk/arch/s390/Kconfig
+++ b/trunk/arch/s390/Kconfig
@@ -96,6 +96,7 @@ config S390
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_CMPXCHG_LOCAL
select HAVE_CMPXCHG_DOUBLE
+ select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_VIRT_CPU_ACCOUNTING
select VIRT_CPU_ACCOUNTING
select ARCH_DISCARD_MEMBLOCK
diff --git a/trunk/arch/s390/include/asm/cio.h b/trunk/arch/s390/include/asm/cio.h
index 55bde6035216..ad2b924167d7 100644
--- a/trunk/arch/s390/include/asm/cio.h
+++ b/trunk/arch/s390/include/asm/cio.h
@@ -9,6 +9,8 @@
#define LPM_ANYPATH 0xff
#define __MAX_CSSID 0
+#define __MAX_SUBCHANNEL 65535
+#define __MAX_SSID 3
#include
diff --git a/trunk/arch/s390/include/asm/compat.h b/trunk/arch/s390/include/asm/compat.h
index a34a9d612fc0..18cd6b592650 100644
--- a/trunk/arch/s390/include/asm/compat.h
+++ b/trunk/arch/s390/include/asm/compat.h
@@ -20,7 +20,7 @@
#define PSW32_MASK_CC 0x00003000UL
#define PSW32_MASK_PM 0x00000f00UL
-#define PSW32_MASK_USER 0x00003F00UL
+#define PSW32_MASK_USER 0x0000FF00UL
#define PSW32_ADDR_AMODE 0x80000000UL
#define PSW32_ADDR_INSN 0x7FFFFFFFUL
diff --git a/trunk/arch/s390/include/asm/pgtable.h b/trunk/arch/s390/include/asm/pgtable.h
index dd647c919a66..2d3b7cb26005 100644
--- a/trunk/arch/s390/include/asm/pgtable.h
+++ b/trunk/arch/s390/include/asm/pgtable.h
@@ -506,12 +506,15 @@ static inline int pud_bad(pud_t pud)
static inline int pmd_present(pmd_t pmd)
{
- return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
+ unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO;
+ return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
+ !(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
}
static inline int pmd_none(pmd_t pmd)
{
- return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) &&
+ !(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
}
static inline int pmd_large(pmd_t pmd)
@@ -1223,6 +1226,11 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
+#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
+#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
+
#define __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
@@ -1242,16 +1250,15 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{
- unsigned long pgprot_pmd = 0;
-
- if (pgprot_val(pgprot) & _PAGE_INVALID) {
- if (pgprot_val(pgprot) & _PAGE_SWT)
- pgprot_pmd |= _HPAGE_TYPE_NONE;
- pgprot_pmd |= _SEGMENT_ENTRY_INV;
- }
- if (pgprot_val(pgprot) & _PAGE_RO)
- pgprot_pmd |= _SEGMENT_ENTRY_RO;
- return pgprot_pmd;
+ /*
+ * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx)
+ * Convert to segment table entry format.
+ */
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
+ return pgprot_val(SEGMENT_NONE);
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
+ return pgprot_val(SEGMENT_RO);
+ return pgprot_val(SEGMENT_RW);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
@@ -1269,7 +1276,9 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
+ /* Do not clobber _HPAGE_TYPE_NONE pages! */
+ if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV))
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
return pmd;
}
diff --git a/trunk/arch/s390/include/asm/topology.h b/trunk/arch/s390/include/asm/topology.h
index 9ca305383760..9935cbd6a46f 100644
--- a/trunk/arch/s390/include/asm/topology.h
+++ b/trunk/arch/s390/include/asm/topology.h
@@ -8,6 +8,9 @@ struct cpu;
#ifdef CONFIG_SCHED_BOOK
+extern unsigned char cpu_socket_id[NR_CPUS];
+#define topology_physical_package_id(cpu) (cpu_socket_id[cpu])
+
extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
diff --git a/trunk/arch/s390/include/uapi/asm/ptrace.h b/trunk/arch/s390/include/uapi/asm/ptrace.h
index 705588a16d70..a5ca214b34fd 100644
--- a/trunk/arch/s390/include/uapi/asm/ptrace.h
+++ b/trunk/arch/s390/include/uapi/asm/ptrace.h
@@ -239,7 +239,7 @@ typedef struct
#define PSW_MASK_EA 0x00000000UL
#define PSW_MASK_BA 0x00000000UL
-#define PSW_MASK_USER 0x00003F00UL
+#define PSW_MASK_USER 0x0000FF00UL
#define PSW_ADDR_AMODE 0x80000000UL
#define PSW_ADDR_INSN 0x7FFFFFFFUL
@@ -269,7 +269,7 @@ typedef struct
#define PSW_MASK_EA 0x0000000100000000UL
#define PSW_MASK_BA 0x0000000080000000UL
-#define PSW_MASK_USER 0x00003F8180000000UL
+#define PSW_MASK_USER 0x0000FF8180000000UL
#define PSW_ADDR_AMODE 0x0000000000000000UL
#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
diff --git a/trunk/arch/s390/kernel/compat_signal.c b/trunk/arch/s390/kernel/compat_signal.c
index a1e8a8694bb7..593fcc9253fc 100644
--- a/trunk/arch/s390/kernel/compat_signal.c
+++ b/trunk/arch/s390/kernel/compat_signal.c
@@ -309,6 +309,10 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 |
(__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
+ /* Check for invalid user address space control. */
+ if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
+ regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+ (regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
for (i = 0; i < NUM_GPRS; i++)
regs->gprs[i] = (__u64) regs32.gprs[i];
@@ -481,7 +485,10 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
/* Set up registers for signal handler */
regs->gprs[15] = (__force __u64) frame;
- regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */
+ /* Force 31 bit amode and default user address space control. */
+ regs->psw.mask = PSW_MASK_BA |
+ (psw_user_bits & PSW_MASK_ASC) |
+ (regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__force __u64) ka->sa.sa_handler;
regs->gprs[2] = map_signal(sig);
@@ -549,7 +556,10 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up registers for signal handler */
regs->gprs[15] = (__force __u64) frame;
- regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */
+ /* Force 31 bit amode and default user address space control. */
+ regs->psw.mask = PSW_MASK_BA |
+ (psw_user_bits & PSW_MASK_ASC) |
+ (regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64) ka->sa.sa_handler;
regs->gprs[2] = map_signal(sig);
diff --git a/trunk/arch/s390/kernel/sclp.S b/trunk/arch/s390/kernel/sclp.S
index bf053898630d..b6506ee32a36 100644
--- a/trunk/arch/s390/kernel/sclp.S
+++ b/trunk/arch/s390/kernel/sclp.S
@@ -44,6 +44,12 @@ _sclp_wait_int:
#endif
mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
mvc 0(16,%r8),0(%r9)
+#ifdef CONFIG_64BIT
+ epsw %r6,%r7 # set current addressing mode
+ nill %r6,0x1 # in new psw (31 or 64 bit mode)
+ nilh %r7,0x8000
+ stm %r6,%r7,0(%r8)
+#endif
lhi %r6,0x0200 # cr mask for ext int (cr0.54)
ltr %r2,%r2
jz .LsetctS1
@@ -87,7 +93,7 @@ _sclp_wait_int:
.long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
#ifdef CONFIG_64BIT
.LextpswS1_64:
- .quad 0x0000000180000000, .LwaitS1 # PSW to handle ext int, 64 bit
+ .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit
#endif
.LwaitpswS1:
.long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
diff --git a/trunk/arch/s390/kernel/signal.c b/trunk/arch/s390/kernel/signal.c
index c13a2a37ef00..d1259d875074 100644
--- a/trunk/arch/s390/kernel/signal.c
+++ b/trunk/arch/s390/kernel/signal.c
@@ -136,6 +136,10 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
/* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(user_sregs.regs.psw.mask & PSW_MASK_USER);
+ /* Check for invalid user address space control. */
+ if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
+ regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+ (regs->psw.mask & ~PSW_MASK_ASC);
/* Check for invalid amode */
if (regs->psw.mask & PSW_MASK_EA)
regs->psw.mask |= PSW_MASK_BA;
@@ -273,7 +277,10 @@ static int setup_frame(int sig, struct k_sigaction *ka,
/* Set up registers for signal handler */
regs->gprs[15] = (unsigned long) frame;
- regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */
+ /* Force default amode and default user address space control. */
+ regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
+ (psw_user_bits & PSW_MASK_ASC) |
+ (regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
regs->gprs[2] = map_signal(sig);
@@ -346,7 +353,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up registers for signal handler */
regs->gprs[15] = (unsigned long) frame;
- regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */
+ /* Force default amode and default user address space control. */
+ regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
+ (psw_user_bits & PSW_MASK_ASC) |
+ (regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
regs->gprs[2] = map_signal(sig);
diff --git a/trunk/arch/s390/kernel/topology.c b/trunk/arch/s390/kernel/topology.c
index 54d93f4b6818..dd55f7c20104 100644
--- a/trunk/arch/s390/kernel/topology.c
+++ b/trunk/arch/s390/kernel/topology.c
@@ -40,6 +40,7 @@ static DEFINE_SPINLOCK(topology_lock);
static struct mask_info core_info;
cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS];
+unsigned char cpu_socket_id[NR_CPUS];
static struct mask_info book_info;
cpumask_t cpu_book_map[NR_CPUS];
@@ -83,11 +84,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
cpumask_set_cpu(lcpu, &book->mask);
cpu_book_id[lcpu] = book->id;
cpumask_set_cpu(lcpu, &core->mask);
+ cpu_core_id[lcpu] = rcpu;
if (one_core_per_cpu) {
- cpu_core_id[lcpu] = rcpu;
+ cpu_socket_id[lcpu] = rcpu;
core = core->next;
} else {
- cpu_core_id[lcpu] = core->id;
+ cpu_socket_id[lcpu] = core->id;
}
smp_cpu_set_polarization(lcpu, tl_cpu->pp);
}
diff --git a/trunk/arch/s390/lib/uaccess_pt.c b/trunk/arch/s390/lib/uaccess_pt.c
index 2d37bb861faf..9017a63dda3d 100644
--- a/trunk/arch/s390/lib/uaccess_pt.c
+++ b/trunk/arch/s390/lib/uaccess_pt.c
@@ -39,7 +39,7 @@ static __always_inline unsigned long follow_table(struct mm_struct *mm,
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return -0x10UL;
- if (pmd_huge(*pmd)) {
+ if (pmd_large(*pmd)) {
if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
return -0x04UL;
return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
diff --git a/trunk/arch/s390/mm/gup.c b/trunk/arch/s390/mm/gup.c
index 60acb93a4680..1f5315d1215c 100644
--- a/trunk/arch/s390/mm/gup.c
+++ b/trunk/arch/s390/mm/gup.c
@@ -126,7 +126,7 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
*/
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return 0;
- if (unlikely(pmd_huge(pmd))) {
+ if (unlikely(pmd_large(pmd))) {
if (!gup_huge_pmd(pmdp, pmd, addr, next,
write, pages, nr))
return 0;
@@ -180,8 +180,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
- (void __user *)start, len)))
+ if ((end < start) || (end > TASK_SIZE))
return 0;
local_irq_save(flags);
@@ -229,7 +228,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
- if (end < start)
+ if ((end < start) || (end > TASK_SIZE))
goto slow_irqon;
/*
diff --git a/trunk/arch/sparc/Kconfig b/trunk/arch/sparc/Kconfig
index b6b442b0d793..9f2edb5c5551 100644
--- a/trunk/arch/sparc/Kconfig
+++ b/trunk/arch/sparc/Kconfig
@@ -20,6 +20,7 @@ config SPARC
select HAVE_ARCH_TRACEHOOK
select SYSCTL_EXCEPTION_TRACE
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select RTC_CLASS
select RTC_DRV_M48T59
select HAVE_IRQ_WORK
diff --git a/trunk/arch/sparc/crypto/Makefile b/trunk/arch/sparc/crypto/Makefile
index 6ae1ad5e502b..5d469d81761f 100644
--- a/trunk/arch/sparc/crypto/Makefile
+++ b/trunk/arch/sparc/crypto/Makefile
@@ -13,13 +13,13 @@ obj-$(CONFIG_CRYPTO_DES_SPARC64) += camellia-sparc64.o
obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o
-sha1-sparc64-y := sha1_asm.o sha1_glue.o crop_devid.o
-sha256-sparc64-y := sha256_asm.o sha256_glue.o crop_devid.o
-sha512-sparc64-y := sha512_asm.o sha512_glue.o crop_devid.o
-md5-sparc64-y := md5_asm.o md5_glue.o crop_devid.o
+sha1-sparc64-y := sha1_asm.o sha1_glue.o
+sha256-sparc64-y := sha256_asm.o sha256_glue.o
+sha512-sparc64-y := sha512_asm.o sha512_glue.o
+md5-sparc64-y := md5_asm.o md5_glue.o
-aes-sparc64-y := aes_asm.o aes_glue.o crop_devid.o
-des-sparc64-y := des_asm.o des_glue.o crop_devid.o
-camellia-sparc64-y := camellia_asm.o camellia_glue.o crop_devid.o
+aes-sparc64-y := aes_asm.o aes_glue.o
+des-sparc64-y := des_asm.o des_glue.o
+camellia-sparc64-y := camellia_asm.o camellia_glue.o
-crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o crop_devid.o
+crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o
diff --git a/trunk/arch/sparc/crypto/aes_glue.c b/trunk/arch/sparc/crypto/aes_glue.c
index 8f1c9980f637..3965d1d36dfa 100644
--- a/trunk/arch/sparc/crypto/aes_glue.c
+++ b/trunk/arch/sparc/crypto/aes_glue.c
@@ -475,3 +475,5 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
MODULE_ALIAS("aes");
+
+#include "crop_devid.c"
diff --git a/trunk/arch/sparc/crypto/camellia_glue.c b/trunk/arch/sparc/crypto/camellia_glue.c
index 42905c084299..62c89af3fd3f 100644
--- a/trunk/arch/sparc/crypto/camellia_glue.c
+++ b/trunk/arch/sparc/crypto/camellia_glue.c
@@ -320,3 +320,5 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
MODULE_ALIAS("aes");
+
+#include "crop_devid.c"
diff --git a/trunk/arch/sparc/crypto/crc32c_glue.c b/trunk/arch/sparc/crypto/crc32c_glue.c
index 0bd89cea8d8e..5162fad912ce 100644
--- a/trunk/arch/sparc/crypto/crc32c_glue.c
+++ b/trunk/arch/sparc/crypto/crc32c_glue.c
@@ -177,3 +177,5 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
MODULE_ALIAS("crc32c");
+
+#include "crop_devid.c"
diff --git a/trunk/arch/sparc/crypto/des_glue.c b/trunk/arch/sparc/crypto/des_glue.c
index c4940c2d3073..41524cebcc49 100644
--- a/trunk/arch/sparc/crypto/des_glue.c
+++ b/trunk/arch/sparc/crypto/des_glue.c
@@ -527,3 +527,5 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
MODULE_ALIAS("des");
+
+#include "crop_devid.c"
diff --git a/trunk/arch/sparc/crypto/md5_glue.c b/trunk/arch/sparc/crypto/md5_glue.c
index 603d723038ce..09a9ea1dfb69 100644
--- a/trunk/arch/sparc/crypto/md5_glue.c
+++ b/trunk/arch/sparc/crypto/md5_glue.c
@@ -186,3 +186,5 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
MODULE_ALIAS("md5");
+
+#include "crop_devid.c"
diff --git a/trunk/arch/sparc/crypto/sha1_glue.c b/trunk/arch/sparc/crypto/sha1_glue.c
index 2bbb20bee9f1..6cd5f29e1e0d 100644
--- a/trunk/arch/sparc/crypto/sha1_glue.c
+++ b/trunk/arch/sparc/crypto/sha1_glue.c
@@ -181,3 +181,5 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
MODULE_ALIAS("sha1");
+
+#include "crop_devid.c"
diff --git a/trunk/arch/sparc/crypto/sha256_glue.c b/trunk/arch/sparc/crypto/sha256_glue.c
index 591e656bd891..04f555ab2680 100644
--- a/trunk/arch/sparc/crypto/sha256_glue.c
+++ b/trunk/arch/sparc/crypto/sha256_glue.c
@@ -239,3 +239,5 @@ MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 op
MODULE_ALIAS("sha224");
MODULE_ALIAS("sha256");
+
+#include "crop_devid.c"
diff --git a/trunk/arch/sparc/crypto/sha512_glue.c b/trunk/arch/sparc/crypto/sha512_glue.c
index 486f0a2b7001..f04d1994d19a 100644
--- a/trunk/arch/sparc/crypto/sha512_glue.c
+++ b/trunk/arch/sparc/crypto/sha512_glue.c
@@ -224,3 +224,5 @@ MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 op
MODULE_ALIAS("sha384");
MODULE_ALIAS("sha512");
+
+#include "crop_devid.c"
diff --git a/trunk/arch/sparc/include/asm/atomic_64.h b/trunk/arch/sparc/include/asm/atomic_64.h
index ce35a1cf1a20..be56a244c9cf 100644
--- a/trunk/arch/sparc/include/asm/atomic_64.h
+++ b/trunk/arch/sparc/include/asm/atomic_64.h
@@ -1,7 +1,7 @@
/* atomic.h: Thankfully the V9 is at least reasonable for this
* stuff.
*
- * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1996, 1997, 2000, 2012 David S. Miller (davem@redhat.com)
*/
#ifndef __ARCH_SPARC64_ATOMIC__
@@ -106,6 +106,8 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+extern long atomic64_dec_if_positive(atomic64_t *v);
+
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
diff --git a/trunk/arch/sparc/include/asm/backoff.h b/trunk/arch/sparc/include/asm/backoff.h
index db3af0d30fb1..4e02086b839c 100644
--- a/trunk/arch/sparc/include/asm/backoff.h
+++ b/trunk/arch/sparc/include/asm/backoff.h
@@ -1,6 +1,46 @@
#ifndef _SPARC64_BACKOFF_H
#define _SPARC64_BACKOFF_H
+/* The macros in this file implement an exponential backoff facility
+ * for atomic operations.
+ *
+ * When multiple threads compete on an atomic operation, it is
+ * possible for one thread to be continually denied a successful
+ * completion of the compare-and-swap instruction. Heavily
+ * threaded cpu implementations like Niagara can compound this
+ * problem even further.
+ *
+ * When an atomic operation fails and needs to be retried, we spin a
+ * certain number of times. At each subsequent failure of the same
+ * operation we double the spin count, realizing an exponential
+ * backoff.
+ *
+ * When we spin, we try to use an operation that will cause the
+ * current cpu strand to block, and therefore make the core fully
+ * available to any other other runnable strands. There are two
+ * options, based upon cpu capabilities.
+ *
+ * On all cpus prior to SPARC-T4 we do three dummy reads of the
+ * condition code register. Each read blocks the strand for something
+ * between 40 and 50 cpu cycles.
+ *
+ * For SPARC-T4 and later we have a special "pause" instruction
+ * available. This is implemented using writes to register %asr27.
+ * The cpu will block the number of cycles written into the register,
+ * unless a disrupting trap happens first. SPARC-T4 specifically
+ * implements pause with a granularity of 8 cycles. Each strand has
+ * an internal pause counter which decrements every 8 cycles. So the
+ * chip shifts the %asr27 value down by 3 bits, and writes the result
+ * into the pause counter. If a value smaller than 8 is written, the
+ * chip blocks for 1 cycle.
+ *
+ * To achieve the same amount of backoff as the three %ccr reads give
+ * on earlier chips, we shift the backoff value up by 7 bits. (Three
+ * %ccr reads block for about 128 cycles, 1 << 7 == 128) We write the
+ * whole amount we want to block into the pause register, rather than
+ * loop writing 128 each time.
+ */
+
#define BACKOFF_LIMIT (4 * 1024)
#ifdef CONFIG_SMP
@@ -11,16 +51,25 @@
#define BACKOFF_LABEL(spin_label, continue_label) \
spin_label
-#define BACKOFF_SPIN(reg, tmp, label) \
- mov reg, tmp; \
-88: brnz,pt tmp, 88b; \
- sub tmp, 1, tmp; \
- set BACKOFF_LIMIT, tmp; \
- cmp reg, tmp; \
- bg,pn %xcc, label; \
- nop; \
- ba,pt %xcc, label; \
- sllx reg, 1, reg;
+#define BACKOFF_SPIN(reg, tmp, label) \
+ mov reg, tmp; \
+88: rd %ccr, %g0; \
+ rd %ccr, %g0; \
+ rd %ccr, %g0; \
+ .section .pause_3insn_patch,"ax";\
+ .word 88b; \
+ sllx tmp, 7, tmp; \
+ wr tmp, 0, %asr27; \
+ clr tmp; \
+ .previous; \
+ brnz,pt tmp, 88b; \
+ sub tmp, 1, tmp; \
+ set BACKOFF_LIMIT, tmp; \
+ cmp reg, tmp; \
+ bg,pn %xcc, label; \
+ nop; \
+ ba,pt %xcc, label; \
+ sllx reg, 1, reg;
#else
diff --git a/trunk/arch/sparc/include/asm/compat.h b/trunk/arch/sparc/include/asm/compat.h
index cef99fbc0a21..830502fe62b4 100644
--- a/trunk/arch/sparc/include/asm/compat.h
+++ b/trunk/arch/sparc/include/asm/compat.h
@@ -232,9 +232,10 @@ static inline void __user *arch_compat_alloc_user_space(long len)
struct pt_regs *regs = current_thread_info()->kregs;
unsigned long usp = regs->u_regs[UREG_I6];
- if (!(test_thread_flag(TIF_32BIT)))
+ if (test_thread_64bit_stack(usp))
usp += STACK_BIAS;
- else
+
+ if (test_thread_flag(TIF_32BIT))
usp &= 0xffffffffUL;
usp -= len;
diff --git a/trunk/arch/sparc/include/asm/processor_64.h b/trunk/arch/sparc/include/asm/processor_64.h
index 4e5a483122a0..721e25f0e2ea 100644
--- a/trunk/arch/sparc/include/asm/processor_64.h
+++ b/trunk/arch/sparc/include/asm/processor_64.h
@@ -196,7 +196,22 @@ extern unsigned long get_wchan(struct task_struct *task);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
-#define cpu_relax() barrier()
+/* Please see the commentary in asm/backoff.h for a description of
+ * what these instructions are doing and how they have been choosen.
+ * To make a long story short, we are trying to yield the current cpu
+ * strand during busy loops.
+ */
+#define cpu_relax() asm volatile("\n99:\n\t" \
+ "rd %%ccr, %%g0\n\t" \
+ "rd %%ccr, %%g0\n\t" \
+ "rd %%ccr, %%g0\n\t" \
+ ".section .pause_3insn_patch,\"ax\"\n\t"\
+ ".word 99b\n\t" \
+ "wr %%g0, 128, %%asr27\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".previous" \
+ ::: "memory")
/* Prefetch support. This is tuned for UltraSPARC-III and later.
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has
diff --git a/trunk/arch/sparc/include/asm/prom.h b/trunk/arch/sparc/include/asm/prom.h
index c28765110706..67c62578d170 100644
--- a/trunk/arch/sparc/include/asm/prom.h
+++ b/trunk/arch/sparc/include/asm/prom.h
@@ -63,5 +63,13 @@ extern char *of_console_options;
extern void irq_trans_init(struct device_node *dp);
extern char *build_path_component(struct device_node *dp);
+/* SPARC has local implementations */
+extern int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+#define of_address_to_resource of_address_to_resource
+
+void __iomem *of_iomap(struct device_node *node, int index);
+#define of_iomap of_iomap
+
#endif /* __KERNEL__ */
#endif /* _SPARC_PROM_H */
diff --git a/trunk/arch/sparc/include/asm/thread_info_64.h b/trunk/arch/sparc/include/asm/thread_info_64.h
index 4e2276631081..a3fe4dcc0aa6 100644
--- a/trunk/arch/sparc/include/asm/thread_info_64.h
+++ b/trunk/arch/sparc/include/asm/thread_info_64.h
@@ -259,6 +259,11 @@ static inline bool test_and_clear_restore_sigmask(void)
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
+#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0)
+#define test_thread_64bit_stack(__SP) \
+ ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \
+ false : true)
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/trunk/arch/sparc/include/asm/ttable.h b/trunk/arch/sparc/include/asm/ttable.h
index 48f2807d3265..71b5a67522ab 100644
--- a/trunk/arch/sparc/include/asm/ttable.h
+++ b/trunk/arch/sparc/include/asm/ttable.h
@@ -372,7 +372,9 @@ etrap_spill_fixup_64bit: \
/* Normal 32bit spill */
#define SPILL_2_GENERIC(ASI) \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, (. - (128 + 4)); \
+ srl %sp, 0, %sp; \
stwa %l0, [%sp + %g0] ASI; \
mov 0x04, %g3; \
stwa %l1, [%sp + %g3] ASI; \
@@ -398,14 +400,16 @@ etrap_spill_fixup_64bit: \
stwa %i6, [%g1 + %g0] ASI; \
stwa %i7, [%g1 + %g3] ASI; \
saved; \
- retry; nop; nop; \
+ retry; \
b,a,pt %xcc, spill_fixup_dax; \
b,a,pt %xcc, spill_fixup_mna; \
b,a,pt %xcc, spill_fixup;
#define SPILL_2_GENERIC_ETRAP \
etrap_user_spill_32bit: \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, etrap_user_spill_64bit; \
+ srl %sp, 0, %sp; \
stwa %l0, [%sp + 0x00] %asi; \
stwa %l1, [%sp + 0x04] %asi; \
stwa %l2, [%sp + 0x08] %asi; \
@@ -427,7 +431,7 @@ etrap_user_spill_32bit: \
ba,pt %xcc, etrap_save; \
wrpr %g1, %cwp; \
nop; nop; nop; nop; \
- nop; nop; nop; nop; \
+ nop; nop; \
ba,a,pt %xcc, etrap_spill_fixup_32bit; \
ba,a,pt %xcc, etrap_spill_fixup_32bit; \
ba,a,pt %xcc, etrap_spill_fixup_32bit;
@@ -592,7 +596,9 @@ user_rtt_fill_64bit: \
/* Normal 32bit fill */
#define FILL_2_GENERIC(ASI) \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, (. - (128 + 4)); \
+ srl %sp, 0, %sp; \
lduwa [%sp + %g0] ASI, %l0; \
mov 0x04, %g2; \
mov 0x08, %g3; \
@@ -616,14 +622,16 @@ user_rtt_fill_64bit: \
lduwa [%g1 + %g3] ASI, %i6; \
lduwa [%g1 + %g5] ASI, %i7; \
restored; \
- retry; nop; nop; nop; nop; \
+ retry; nop; nop; \
b,a,pt %xcc, fill_fixup_dax; \
b,a,pt %xcc, fill_fixup_mna; \
b,a,pt %xcc, fill_fixup;
#define FILL_2_GENERIC_RTRAP \
user_rtt_fill_32bit: \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, user_rtt_fill_64bit; \
+ srl %sp, 0, %sp; \
lduwa [%sp + 0x00] %asi, %l0; \
lduwa [%sp + 0x04] %asi, %l1; \
lduwa [%sp + 0x08] %asi, %l2; \
@@ -643,7 +651,7 @@ user_rtt_fill_32bit: \
ba,pt %xcc, user_rtt_pre_restore; \
restored; \
nop; nop; nop; nop; nop; \
- nop; nop; nop; nop; nop; \
+ nop; nop; nop; \
ba,a,pt %xcc, user_rtt_fill_fixup; \
ba,a,pt %xcc, user_rtt_fill_fixup; \
ba,a,pt %xcc, user_rtt_fill_fixup;
diff --git a/trunk/arch/sparc/include/uapi/asm/unistd.h b/trunk/arch/sparc/include/uapi/asm/unistd.h
index 8974ef7ae920..cac719d1bc5c 100644
--- a/trunk/arch/sparc/include/uapi/asm/unistd.h
+++ b/trunk/arch/sparc/include/uapi/asm/unistd.h
@@ -405,8 +405,13 @@
#define __NR_setns 337
#define __NR_process_vm_readv 338
#define __NR_process_vm_writev 339
+#define __NR_kern_features 340
+#define __NR_kcmp 341
-#define NR_syscalls 340
+#define NR_syscalls 342
+
+/* Bitmask values returned from kern_features system call. */
+#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/trunk/arch/sparc/kernel/entry.h b/trunk/arch/sparc/kernel/entry.h
index 0c218e4c0881..cc3c5cb47cda 100644
--- a/trunk/arch/sparc/kernel/entry.h
+++ b/trunk/arch/sparc/kernel/entry.h
@@ -59,6 +59,13 @@ struct popc_6insn_patch_entry {
extern struct popc_6insn_patch_entry __popc_6insn_patch,
__popc_6insn_patch_end;
+struct pause_patch_entry {
+ unsigned int addr;
+ unsigned int insns[3];
+};
+extern struct pause_patch_entry __pause_3insn_patch,
+ __pause_3insn_patch_end;
+
extern void __init per_cpu_patch(void);
extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
struct sun4v_1insn_patch_entry *);
diff --git a/trunk/arch/sparc/kernel/leon_kernel.c b/trunk/arch/sparc/kernel/leon_kernel.c
index f8b6eee40bde..87f60ee65433 100644
--- a/trunk/arch/sparc/kernel/leon_kernel.c
+++ b/trunk/arch/sparc/kernel/leon_kernel.c
@@ -56,11 +56,13 @@ static inline unsigned int leon_eirq_get(int cpu)
static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
{
unsigned int eirq;
+ struct irq_bucket *p;
int cpu = sparc_leon3_cpuid();
eirq = leon_eirq_get(cpu);
- if ((eirq & 0x10) && irq_map[eirq]->irq) /* bit4 tells if IRQ happened */
- generic_handle_irq(irq_map[eirq]->irq);
+ p = irq_map[eirq];
+ if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */
+ generic_handle_irq(p->irq);
}
/* The extended IRQ controller has been found, this function registers it */
diff --git a/trunk/arch/sparc/kernel/perf_event.c b/trunk/arch/sparc/kernel/perf_event.c
index 885a8af74064..b5c38faa4ead 100644
--- a/trunk/arch/sparc/kernel/perf_event.c
+++ b/trunk/arch/sparc/kernel/perf_event.c
@@ -1762,15 +1762,25 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
do {
- struct sparc_stackf32 *usf, sf;
unsigned long pc;
- usf = (struct sparc_stackf32 *) ufp;
- if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
- break;
+ if (thread32_stack_is_64bit(ufp)) {
+ struct sparc_stackf *usf, sf;
- pc = sf.callers_pc;
- ufp = (unsigned long)sf.fp;
+ ufp += STACK_BIAS;
+ usf = (struct sparc_stackf *) ufp;
+ if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
+ break;
+ pc = sf.callers_pc & 0xffffffff;
+ ufp = ((unsigned long) sf.fp) & 0xffffffff;
+ } else {
+ struct sparc_stackf32 *usf, sf;
+ usf = (struct sparc_stackf32 *) ufp;
+ if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
+ break;
+ pc = sf.callers_pc;
+ ufp = (unsigned long)sf.fp;
+ }
perf_callchain_store(entry, pc);
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
diff --git a/trunk/arch/sparc/kernel/process_64.c b/trunk/arch/sparc/kernel/process_64.c
index d778248ef3f8..c6e0c2910043 100644
--- a/trunk/arch/sparc/kernel/process_64.c
+++ b/trunk/arch/sparc/kernel/process_64.c
@@ -452,13 +452,16 @@ void flush_thread(void)
/* It's a bit more tricky when 64-bit tasks are involved... */
static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
{
+ bool stack_64bit = test_thread_64bit_stack(psp);
unsigned long fp, distance, rval;
- if (!(test_thread_flag(TIF_32BIT))) {
+ if (stack_64bit) {
csp += STACK_BIAS;
psp += STACK_BIAS;
__get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
fp += STACK_BIAS;
+ if (test_thread_flag(TIF_32BIT))
+ fp &= 0xffffffff;
} else
__get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
@@ -472,7 +475,7 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
rval = (csp - distance);
if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
rval = 0;
- else if (test_thread_flag(TIF_32BIT)) {
+ else if (!stack_64bit) {
if (put_user(((u32)csp),
&(((struct reg_window32 __user *)rval)->ins[6])))
rval = 0;
@@ -507,18 +510,18 @@ void synchronize_user_stack(void)
flush_user_windows();
if ((window = get_thread_wsaved()) != 0) {
- int winsize = sizeof(struct reg_window);
- int bias = 0;
-
- if (test_thread_flag(TIF_32BIT))
- winsize = sizeof(struct reg_window32);
- else
- bias = STACK_BIAS;
-
window -= 1;
do {
- unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
struct reg_window *rwin = &t->reg_window[window];
+ int winsize = sizeof(struct reg_window);
+ unsigned long sp;
+
+ sp = t->rwbuf_stkptrs[window];
+
+ if (test_thread_64bit_stack(sp))
+ sp += STACK_BIAS;
+ else
+ winsize = sizeof(struct reg_window32);
if (!copy_to_user((char __user *)sp, rwin, winsize)) {
shift_window_buffer(window, get_thread_wsaved() - 1, t);
@@ -544,13 +547,6 @@ void fault_in_user_windows(void)
{
struct thread_info *t = current_thread_info();
unsigned long window;
- int winsize = sizeof(struct reg_window);
- int bias = 0;
-
- if (test_thread_flag(TIF_32BIT))
- winsize = sizeof(struct reg_window32);
- else
- bias = STACK_BIAS;
flush_user_windows();
window = get_thread_wsaved();
@@ -558,8 +554,16 @@ void fault_in_user_windows(void)
if (likely(window != 0)) {
window -= 1;
do {
- unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
struct reg_window *rwin = &t->reg_window[window];
+ int winsize = sizeof(struct reg_window);
+ unsigned long sp;
+
+ sp = t->rwbuf_stkptrs[window];
+
+ if (test_thread_64bit_stack(sp))
+ sp += STACK_BIAS;
+ else
+ winsize = sizeof(struct reg_window32);
if (unlikely(sp & 0x7UL))
stack_unaligned(sp);
diff --git a/trunk/arch/sparc/kernel/ptrace_64.c b/trunk/arch/sparc/kernel/ptrace_64.c
index 484dabac7045..7ff45e4ba681 100644
--- a/trunk/arch/sparc/kernel/ptrace_64.c
+++ b/trunk/arch/sparc/kernel/ptrace_64.c
@@ -151,7 +151,7 @@ static int regwindow64_get(struct task_struct *target,
{
unsigned long rw_addr = regs->u_regs[UREG_I6];
- if (test_tsk_thread_flag(current, TIF_32BIT)) {
+ if (!test_thread_64bit_stack(rw_addr)) {
struct reg_window32 win32;
int i;
@@ -176,7 +176,7 @@ static int regwindow64_set(struct task_struct *target,
{
unsigned long rw_addr = regs->u_regs[UREG_I6];
- if (test_tsk_thread_flag(current, TIF_32BIT)) {
+ if (!test_thread_64bit_stack(rw_addr)) {
struct reg_window32 win32;
int i;
diff --git a/trunk/arch/sparc/kernel/setup_64.c b/trunk/arch/sparc/kernel/setup_64.c
index 0800e71d8a88..0eaf0059aaef 100644
--- a/trunk/arch/sparc/kernel/setup_64.c
+++ b/trunk/arch/sparc/kernel/setup_64.c
@@ -316,6 +316,25 @@ static void __init popc_patch(void)
}
}
+static void __init pause_patch(void)
+{
+ struct pause_patch_entry *p;
+
+ p = &__pause_3insn_patch;
+ while (p < &__pause_3insn_patch_end) {
+ unsigned long i, addr = p->addr;
+
+ for (i = 0; i < 3; i++) {
+ *(unsigned int *) (addr + (i * 4)) = p->insns[i];
+ wmb();
+ __asm__ __volatile__("flush %0"
+ : : "r" (addr + (i * 4)));
+ }
+
+ p++;
+ }
+}
+
#ifdef CONFIG_SMP
void __init boot_cpu_id_too_large(int cpu)
{
@@ -528,6 +547,8 @@ static void __init init_sparc64_elf_hwcap(void)
if (sparc64_elf_hwcap & AV_SPARC_POPC)
popc_patch();
+ if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
+ pause_patch();
}
void __init setup_arch(char **cmdline_p)
diff --git a/trunk/arch/sparc/kernel/signal_64.c b/trunk/arch/sparc/kernel/signal_64.c
index 867de2f8189c..689e1ba62809 100644
--- a/trunk/arch/sparc/kernel/signal_64.c
+++ b/trunk/arch/sparc/kernel/signal_64.c
@@ -295,9 +295,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
err |= restore_fpu_state(regs, fpu_save);
err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
- err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
-
- if (err)
+ if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT)
goto segv;
err |= __get_user(rwin_save, &sf->rwin_save);
diff --git a/trunk/arch/sparc/kernel/sys_sparc_64.c b/trunk/arch/sparc/kernel/sys_sparc_64.c
index 11c6c9603e71..878ef3d5fec5 100644
--- a/trunk/arch/sparc/kernel/sys_sparc_64.c
+++ b/trunk/arch/sparc/kernel/sys_sparc_64.c
@@ -751,3 +751,8 @@ int kernel_execve(const char *filename,
: "cc");
return __res;
}
+
+asmlinkage long sys_kern_features(void)
+{
+ return KERN_FEATURE_MIXED_MODE_STACK;
+}
diff --git a/trunk/arch/sparc/kernel/systbls_32.S b/trunk/arch/sparc/kernel/systbls_32.S
index 63402f9e9f51..5147f574f125 100644
--- a/trunk/arch/sparc/kernel/systbls_32.S
+++ b/trunk/arch/sparc/kernel/systbls_32.S
@@ -85,3 +85,4 @@ sys_call_table:
/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
+/*340*/ .long sys_ni_syscall, sys_kcmp
diff --git a/trunk/arch/sparc/kernel/systbls_64.S b/trunk/arch/sparc/kernel/systbls_64.S
index 3a58e0d66f51..1c9af9fa38e9 100644
--- a/trunk/arch/sparc/kernel/systbls_64.S
+++ b/trunk/arch/sparc/kernel/systbls_64.S
@@ -86,6 +86,7 @@ sys_call_table32:
.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
+/*340*/ .word sys_kern_features, sys_kcmp
#endif /* CONFIG_COMPAT */
@@ -163,3 +164,4 @@ sys_call_table:
.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
+/*340*/ .word sys_kern_features, sys_kcmp
diff --git a/trunk/arch/sparc/kernel/unaligned_64.c b/trunk/arch/sparc/kernel/unaligned_64.c
index f81d038f7340..8201c25e7669 100644
--- a/trunk/arch/sparc/kernel/unaligned_64.c
+++ b/trunk/arch/sparc/kernel/unaligned_64.c
@@ -113,21 +113,24 @@ static inline long sign_extend_imm13(long imm)
static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
{
- unsigned long value;
+ unsigned long value, fp;
if (reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
+
+ fp = regs->u_regs[UREG_FP];
+
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
- win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window *)(fp + STACK_BIAS);
value = win->locals[reg - 16];
- } else if (test_thread_flag(TIF_32BIT)) {
+ } else if (!test_thread_64bit_stack(fp)) {
struct reg_window32 __user *win32;
- win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+ win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
get_user(value, &win32->locals[reg - 16]);
} else {
struct reg_window __user *win;
- win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window __user *)(fp + STACK_BIAS);
get_user(value, &win->locals[reg - 16]);
}
return value;
@@ -135,19 +138,24 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
{
+ unsigned long fp;
+
if (reg < 16)
return ®s->u_regs[reg];
+
+ fp = regs->u_regs[UREG_FP];
+
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
- win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window *)(fp + STACK_BIAS);
return &win->locals[reg - 16];
- } else if (test_thread_flag(TIF_32BIT)) {
+ } else if (!test_thread_64bit_stack(fp)) {
struct reg_window32 *win32;
- win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+ win32 = (struct reg_window32 *)((unsigned long)((u32)fp));
return (unsigned long *)&win32->locals[reg - 16];
} else {
struct reg_window *win;
- win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window *)(fp + STACK_BIAS);
return &win->locals[reg - 16];
}
}
@@ -392,13 +400,15 @@ int handle_popc(u32 insn, struct pt_regs *regs)
if (rd)
regs->u_regs[rd] = ret;
} else {
- if (test_thread_flag(TIF_32BIT)) {
+ unsigned long fp = regs->u_regs[UREG_FP];
+
+ if (!test_thread_64bit_stack(fp)) {
struct reg_window32 __user *win32;
- win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+ win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
put_user(ret, &win32->locals[rd - 16]);
} else {
struct reg_window __user *win;
- win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window __user *)(fp + STACK_BIAS);
put_user(ret, &win->locals[rd - 16]);
}
}
@@ -554,7 +564,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
reg[0] = 0;
if ((insn & 0x780000) == 0x180000)
reg[1] = 0;
- } else if (test_thread_flag(TIF_32BIT)) {
+ } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) {
put_user(0, (int __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, ((int __user *) reg) + 1);
diff --git a/trunk/arch/sparc/kernel/visemul.c b/trunk/arch/sparc/kernel/visemul.c
index 08e074b7eb6a..c096c624ac4d 100644
--- a/trunk/arch/sparc/kernel/visemul.c
+++ b/trunk/arch/sparc/kernel/visemul.c
@@ -149,21 +149,24 @@ static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
{
- unsigned long value;
+ unsigned long value, fp;
if (reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
+
+ fp = regs->u_regs[UREG_FP];
+
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
- win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window *)(fp + STACK_BIAS);
value = win->locals[reg - 16];
- } else if (test_thread_flag(TIF_32BIT)) {
+ } else if (!test_thread_64bit_stack(fp)) {
struct reg_window32 __user *win32;
- win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+ win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
get_user(value, &win32->locals[reg - 16]);
} else {
struct reg_window __user *win;
- win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window __user *)(fp + STACK_BIAS);
get_user(value, &win->locals[reg - 16]);
}
return value;
@@ -172,16 +175,18 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg,
struct pt_regs *regs)
{
+ unsigned long fp = regs->u_regs[UREG_FP];
+
BUG_ON(reg < 16);
BUG_ON(regs->tstate & TSTATE_PRIV);
- if (test_thread_flag(TIF_32BIT)) {
+ if (!test_thread_64bit_stack(fp)) {
struct reg_window32 __user *win32;
- win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+ win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
return (unsigned long __user *)&win32->locals[reg - 16];
} else {
struct reg_window __user *win;
- win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ win = (struct reg_window __user *)(fp + STACK_BIAS);
return &win->locals[reg - 16];
}
}
@@ -204,7 +209,7 @@ static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd)
} else {
unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs);
- if (test_thread_flag(TIF_32BIT))
+ if (!test_thread_64bit_stack(regs->u_regs[UREG_FP]))
__put_user((u32)val, (u32 __user *)rd_user);
else
__put_user(val, rd_user);
diff --git a/trunk/arch/sparc/kernel/vmlinux.lds.S b/trunk/arch/sparc/kernel/vmlinux.lds.S
index 89c2c29f154b..0bacceb19150 100644
--- a/trunk/arch/sparc/kernel/vmlinux.lds.S
+++ b/trunk/arch/sparc/kernel/vmlinux.lds.S
@@ -132,6 +132,11 @@ SECTIONS
*(.popc_6insn_patch)
__popc_6insn_patch_end = .;
}
+ .pause_3insn_patch : {
+ __pause_3insn_patch = .;
+ *(.pause_3insn_patch)
+ __pause_3insn_patch_end = .;
+ }
PERCPU_SECTION(SMP_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
diff --git a/trunk/arch/sparc/kernel/winfixup.S b/trunk/arch/sparc/kernel/winfixup.S
index a6b0863c27df..1e67ce958369 100644
--- a/trunk/arch/sparc/kernel/winfixup.S
+++ b/trunk/arch/sparc/kernel/winfixup.S
@@ -43,6 +43,8 @@ spill_fixup_mna:
spill_fixup_dax:
TRAP_LOAD_THREAD_REG(%g6, %g1)
ldx [%g6 + TI_FLAGS], %g1
+ andcc %sp, 0x1, %g0
+ movne %icc, 0, %g1
andcc %g1, _TIF_32BIT, %g0
ldub [%g6 + TI_WSAVED], %g1
sll %g1, 3, %g3
diff --git a/trunk/arch/sparc/lib/atomic_64.S b/trunk/arch/sparc/lib/atomic_64.S
index 4d502da3de78..85c233d0a340 100644
--- a/trunk/arch/sparc/lib/atomic_64.S
+++ b/trunk/arch/sparc/lib/atomic_64.S
@@ -1,6 +1,6 @@
/* atomic.S: These things are too big to do inline.
*
- * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net)
*/
#include
@@ -117,3 +117,17 @@ ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
sub %g1, %o0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
ENDPROC(atomic64_sub_ret)
+
+ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+1: ldx [%o0], %g1
+ brlez,pn %g1, 3f
+ sub %g1, 1, %g7
+ casx [%o0], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
+ nop
+3: retl
+ sub %g1, 1, %o0
+2: BACKOFF_SPIN(%o2, %o3, 1b)
+ENDPROC(atomic64_dec_if_positive)
diff --git a/trunk/arch/sparc/lib/ksyms.c b/trunk/arch/sparc/lib/ksyms.c
index ee31b884c61b..0c4e35e522fa 100644
--- a/trunk/arch/sparc/lib/ksyms.c
+++ b/trunk/arch/sparc/lib/ksyms.c
@@ -116,6 +116,7 @@ EXPORT_SYMBOL(atomic64_add);
EXPORT_SYMBOL(atomic64_add_ret);
EXPORT_SYMBOL(atomic64_sub);
EXPORT_SYMBOL(atomic64_sub_ret);
+EXPORT_SYMBOL(atomic64_dec_if_positive);
/* Atomic bit operations. */
EXPORT_SYMBOL(test_and_set_bit);
diff --git a/trunk/arch/sparc/math-emu/math_64.c b/trunk/arch/sparc/math-emu/math_64.c
index 1704068da928..034aadbff036 100644
--- a/trunk/arch/sparc/math-emu/math_64.c
+++ b/trunk/arch/sparc/math-emu/math_64.c
@@ -320,7 +320,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap)
XR = 0;
else if (freg < 16)
XR = regs->u_regs[freg];
- else if (test_thread_flag(TIF_32BIT)) {
+ else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) {
struct reg_window32 __user *win32;
flushw_user ();
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
diff --git a/trunk/arch/unicore32/Kconfig b/trunk/arch/unicore32/Kconfig
index e5c5473e69ce..c4fbb21e802b 100644
--- a/trunk/arch/unicore32/Kconfig
+++ b/trunk/arch/unicore32/Kconfig
@@ -16,6 +16,8 @@ config UNICORE32
select ARCH_WANT_FRAME_POINTERS
select GENERIC_IOMAP
select MODULES_USE_ELF_REL
+ select GENERIC_KERNEL_THREAD
+ select GENERIC_KERNEL_EXECVE
help
UniCore-32 is 32-bit Instruction Set Architecture,
including a series of low-power-consumption RISC chip
@@ -64,6 +66,9 @@ config GENERIC_CALIBRATE_DELAY
config ARCH_MAY_HAVE_PC_FDC
bool
+config ZONE_DMA
+ def_bool y
+
config NEED_DMA_MAP_STATE
def_bool y
@@ -216,7 +221,7 @@ config PUV3_GPIO
bool
depends on !ARCH_FPGA
select GENERIC_GPIO
- select GPIO_SYSFS if EXPERIMENTAL
+ select GPIO_SYSFS
default y
if PUV3_NB0916
diff --git a/trunk/arch/unicore32/include/asm/Kbuild b/trunk/arch/unicore32/include/asm/Kbuild
index c910c9857e11..601e92f18af6 100644
--- a/trunk/arch/unicore32/include/asm/Kbuild
+++ b/trunk/arch/unicore32/include/asm/Kbuild
@@ -1,4 +1,3 @@
-include include/asm-generic/Kbuild.asm
generic-y += atomic.h
generic-y += auxvec.h
diff --git a/trunk/arch/unicore32/include/asm/bug.h b/trunk/arch/unicore32/include/asm/bug.h
index b1ff8cadb086..93a56f3e2344 100644
--- a/trunk/arch/unicore32/include/asm/bug.h
+++ b/trunk/arch/unicore32/include/asm/bug.h
@@ -19,9 +19,4 @@ extern void die(const char *msg, struct pt_regs *regs, int err);
extern void uc32_notify_die(const char *str, struct pt_regs *regs,
struct siginfo *info, unsigned long err, unsigned long trap);
-extern asmlinkage void __backtrace(void);
-extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
-
-extern void __show_regs(struct pt_regs *);
-
#endif /* __UNICORE_BUG_H__ */
diff --git a/trunk/arch/unicore32/include/asm/cmpxchg.h b/trunk/arch/unicore32/include/asm/cmpxchg.h
index df4d5acfd19f..8e797ad4fa24 100644
--- a/trunk/arch/unicore32/include/asm/cmpxchg.h
+++ b/trunk/arch/unicore32/include/asm/cmpxchg.h
@@ -35,7 +35,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
: "memory", "cc");
break;
default:
- ret = __xchg_bad_pointer();
+ __xchg_bad_pointer();
}
return ret;
diff --git a/trunk/arch/unicore32/include/asm/kvm_para.h b/trunk/arch/unicore32/include/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b957..000000000000
--- a/trunk/arch/unicore32/include/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include
diff --git a/trunk/arch/unicore32/include/asm/processor.h b/trunk/arch/unicore32/include/asm/processor.h
index 14382cb09657..4eaa42167667 100644
--- a/trunk/arch/unicore32/include/asm/processor.h
+++ b/trunk/arch/unicore32/include/asm/processor.h
@@ -72,11 +72,6 @@ unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
-/*
- * Create a new kernel thread
- */
-extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
diff --git a/trunk/arch/unicore32/include/asm/ptrace.h b/trunk/arch/unicore32/include/asm/ptrace.h
index b9caf9b0997b..726749dab52f 100644
--- a/trunk/arch/unicore32/include/asm/ptrace.h
+++ b/trunk/arch/unicore32/include/asm/ptrace.h
@@ -12,80 +12,10 @@
#ifndef __UNICORE_PTRACE_H__
#define __UNICORE_PTRACE_H__
-#define PTRACE_GET_THREAD_AREA 22
-
-/*
- * PSR bits
- */
-#define USER_MODE 0x00000010
-#define REAL_MODE 0x00000011
-#define INTR_MODE 0x00000012
-#define PRIV_MODE 0x00000013
-#define ABRT_MODE 0x00000017
-#define EXTN_MODE 0x0000001b
-#define SUSR_MODE 0x0000001f
-#define MODE_MASK 0x0000001f
-#define PSR_R_BIT 0x00000040
-#define PSR_I_BIT 0x00000080
-#define PSR_V_BIT 0x10000000
-#define PSR_C_BIT 0x20000000
-#define PSR_Z_BIT 0x40000000
-#define PSR_S_BIT 0x80000000
-
-/*
- * Groups of PSR bits
- */
-#define PSR_f 0xff000000 /* Flags */
-#define PSR_c 0x000000ff /* Control */
+#include
#ifndef __ASSEMBLY__
-/*
- * This struct defines the way the registers are stored on the
- * stack during a system call. Note that sizeof(struct pt_regs)
- * has to be a multiple of 8.
- */
-struct pt_regs {
- unsigned long uregs[34];
-};
-
-#define UCreg_asr uregs[32]
-#define UCreg_pc uregs[31]
-#define UCreg_lr uregs[30]
-#define UCreg_sp uregs[29]
-#define UCreg_ip uregs[28]
-#define UCreg_fp uregs[27]
-#define UCreg_26 uregs[26]
-#define UCreg_25 uregs[25]
-#define UCreg_24 uregs[24]
-#define UCreg_23 uregs[23]
-#define UCreg_22 uregs[22]
-#define UCreg_21 uregs[21]
-#define UCreg_20 uregs[20]
-#define UCreg_19 uregs[19]
-#define UCreg_18 uregs[18]
-#define UCreg_17 uregs[17]
-#define UCreg_16 uregs[16]
-#define UCreg_15 uregs[15]
-#define UCreg_14 uregs[14]
-#define UCreg_13 uregs[13]
-#define UCreg_12 uregs[12]
-#define UCreg_11 uregs[11]
-#define UCreg_10 uregs[10]
-#define UCreg_09 uregs[9]
-#define UCreg_08 uregs[8]
-#define UCreg_07 uregs[7]
-#define UCreg_06 uregs[6]
-#define UCreg_05 uregs[5]
-#define UCreg_04 uregs[4]
-#define UCreg_03 uregs[3]
-#define UCreg_02 uregs[2]
-#define UCreg_01 uregs[1]
-#define UCreg_00 uregs[0]
-#define UCreg_ORIG_00 uregs[33]
-
-#ifdef __KERNEL__
-
#define user_mode(regs) \
(processor_mode(regs) == USER_MODE)
@@ -125,9 +55,5 @@ static inline int valid_user_regs(struct pt_regs *regs)
#define instruction_pointer(regs) ((regs)->UCreg_pc)
-#endif /* __KERNEL__ */
-
#endif /* __ASSEMBLY__ */
-
#endif
-
diff --git a/trunk/arch/unicore32/include/uapi/asm/Kbuild b/trunk/arch/unicore32/include/uapi/asm/Kbuild
index baebb3da1d44..0514d7ad6855 100644
--- a/trunk/arch/unicore32/include/uapi/asm/Kbuild
+++ b/trunk/arch/unicore32/include/uapi/asm/Kbuild
@@ -1,3 +1,10 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+header-y += byteorder.h
+header-y += kvm_para.h
+header-y += ptrace.h
+header-y += sigcontext.h
+header-y += unistd.h
+
+generic-y += kvm_para.h
diff --git a/trunk/arch/unicore32/include/asm/byteorder.h b/trunk/arch/unicore32/include/uapi/asm/byteorder.h
similarity index 100%
rename from trunk/arch/unicore32/include/asm/byteorder.h
rename to trunk/arch/unicore32/include/uapi/asm/byteorder.h
diff --git a/trunk/arch/unicore32/include/uapi/asm/ptrace.h b/trunk/arch/unicore32/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..187aa2e98a53
--- /dev/null
+++ b/trunk/arch/unicore32/include/uapi/asm/ptrace.h
@@ -0,0 +1,90 @@
+/*
+ * linux/arch/unicore32/include/asm/ptrace.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _UAPI__UNICORE_PTRACE_H__
+#define _UAPI__UNICORE_PTRACE_H__
+
+#define PTRACE_GET_THREAD_AREA 22
+
+/*
+ * PSR bits
+ */
+#define USER_MODE 0x00000010
+#define REAL_MODE 0x00000011
+#define INTR_MODE 0x00000012
+#define PRIV_MODE 0x00000013
+#define ABRT_MODE 0x00000017
+#define EXTN_MODE 0x0000001b
+#define SUSR_MODE 0x0000001f
+#define MODE_MASK 0x0000001f
+#define PSR_R_BIT 0x00000040
+#define PSR_I_BIT 0x00000080
+#define PSR_V_BIT 0x10000000
+#define PSR_C_BIT 0x20000000
+#define PSR_Z_BIT 0x40000000
+#define PSR_S_BIT 0x80000000
+
+/*
+ * Groups of PSR bits
+ */
+#define PSR_f 0xff000000 /* Flags */
+#define PSR_c 0x000000ff /* Control */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are stored on the
+ * stack during a system call. Note that sizeof(struct pt_regs)
+ * has to be a multiple of 8.
+ */
+struct pt_regs {
+ unsigned long uregs[34];
+};
+
+#define UCreg_asr uregs[32]
+#define UCreg_pc uregs[31]
+#define UCreg_lr uregs[30]
+#define UCreg_sp uregs[29]
+#define UCreg_ip uregs[28]
+#define UCreg_fp uregs[27]
+#define UCreg_26 uregs[26]
+#define UCreg_25 uregs[25]
+#define UCreg_24 uregs[24]
+#define UCreg_23 uregs[23]
+#define UCreg_22 uregs[22]
+#define UCreg_21 uregs[21]
+#define UCreg_20 uregs[20]
+#define UCreg_19 uregs[19]
+#define UCreg_18 uregs[18]
+#define UCreg_17 uregs[17]
+#define UCreg_16 uregs[16]
+#define UCreg_15 uregs[15]
+#define UCreg_14 uregs[14]
+#define UCreg_13 uregs[13]
+#define UCreg_12 uregs[12]
+#define UCreg_11 uregs[11]
+#define UCreg_10 uregs[10]
+#define UCreg_09 uregs[9]
+#define UCreg_08 uregs[8]
+#define UCreg_07 uregs[7]
+#define UCreg_06 uregs[6]
+#define UCreg_05 uregs[5]
+#define UCreg_04 uregs[4]
+#define UCreg_03 uregs[3]
+#define UCreg_02 uregs[2]
+#define UCreg_01 uregs[1]
+#define UCreg_00 uregs[0]
+#define UCreg_ORIG_00 uregs[33]
+
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI__UNICORE_PTRACE_H__ */
diff --git a/trunk/arch/unicore32/include/asm/sigcontext.h b/trunk/arch/unicore32/include/uapi/asm/sigcontext.h
similarity index 100%
rename from trunk/arch/unicore32/include/asm/sigcontext.h
rename to trunk/arch/unicore32/include/uapi/asm/sigcontext.h
diff --git a/trunk/arch/unicore32/include/asm/unistd.h b/trunk/arch/unicore32/include/uapi/asm/unistd.h
similarity index 92%
rename from trunk/arch/unicore32/include/asm/unistd.h
rename to trunk/arch/unicore32/include/uapi/asm/unistd.h
index 2abcf61c615d..d18a3be89b38 100644
--- a/trunk/arch/unicore32/include/asm/unistd.h
+++ b/trunk/arch/unicore32/include/uapi/asm/unistd.h
@@ -12,3 +12,4 @@
/* Use the standard ABI for syscalls. */
#include
+#define __ARCH_WANT_SYS_EXECVE
diff --git a/trunk/arch/unicore32/kernel/entry.S b/trunk/arch/unicore32/kernel/entry.S
index dcb87ab19ddd..7049350c790f 100644
--- a/trunk/arch/unicore32/kernel/entry.S
+++ b/trunk/arch/unicore32/kernel/entry.S
@@ -573,17 +573,16 @@ ENDPROC(ret_to_user)
*/
ENTRY(ret_from_fork)
b.l schedule_tail
- get_thread_info tsk
- ldw r1, [tsk+], #TI_FLAGS @ check for syscall tracing
- mov why, #1
- cand.a r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
- beq ret_slow_syscall
- mov r1, sp
- mov r0, #1 @ trace exit [IP = 1]
- b.l syscall_trace
b ret_slow_syscall
ENDPROC(ret_from_fork)
+ENTRY(ret_from_kernel_thread)
+ b.l schedule_tail
+ mov r0, r5
+ adr lr, ret_slow_syscall
+ mov pc, r4
+ENDPROC(ret_from_kernel_thread)
+
/*=============================================================================
* SWI handler
*-----------------------------------------------------------------------------
@@ -669,11 +668,6 @@ __cr_alignment:
#endif
.ltorg
-ENTRY(sys_execve)
- add r3, sp, #S_OFF
- b __sys_execve
-ENDPROC(sys_execve)
-
ENTRY(sys_clone)
add ip, sp, #S_OFF
stw ip, [sp+], #4
diff --git a/trunk/arch/unicore32/kernel/process.c b/trunk/arch/unicore32/kernel/process.c
index b008586dad75..a8fe265ce2c0 100644
--- a/trunk/arch/unicore32/kernel/process.c
+++ b/trunk/arch/unicore32/kernel/process.c
@@ -258,6 +258,7 @@ void release_thread(struct task_struct *dead_task)
}
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
int
copy_thread(unsigned long clone_flags, unsigned long stack_start,
@@ -266,17 +267,22 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
struct thread_info *thread = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
- *childregs = *regs;
- childregs->UCreg_00 = 0;
- childregs->UCreg_sp = stack_start;
-
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
thread->cpu_context.sp = (unsigned long)childregs;
- thread->cpu_context.pc = (unsigned long)ret_from_fork;
-
- if (clone_flags & CLONE_SETTLS)
- childregs->UCreg_16 = regs->UCreg_03;
+ if (unlikely(!regs)) {
+ thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread;
+ thread->cpu_context.r4 = stack_start;
+ thread->cpu_context.r5 = stk_sz;
+ memset(childregs, 0, sizeof(struct pt_regs));
+ } else {
+ thread->cpu_context.pc = (unsigned long)ret_from_fork;
+ *childregs = *regs;
+ childregs->UCreg_00 = 0;
+ childregs->UCreg_sp = stack_start;
+ if (clone_flags & CLONE_SETTLS)
+ childregs->UCreg_16 = regs->UCreg_03;
+ }
return 0;
}
@@ -305,42 +311,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fp)
}
EXPORT_SYMBOL(dump_fpu);
-/*
- * Shuffle the argument into the correct register before calling the
- * thread function. r1 is the thread argument, r2 is the pointer to
- * the thread function, and r3 points to the exit function.
- */
-asm(".pushsection .text\n"
-" .align\n"
-" .type kernel_thread_helper, #function\n"
-"kernel_thread_helper:\n"
-" mov.a asr, r7\n"
-" mov r0, r4\n"
-" mov lr, r6\n"
-" mov pc, r5\n"
-" .size kernel_thread_helper, . - kernel_thread_helper\n"
-" .popsection");
-
-/*
- * Create a kernel thread.
- */
-pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
-{
- struct pt_regs regs;
-
- memset(®s, 0, sizeof(regs));
-
- regs.UCreg_04 = (unsigned long)arg;
- regs.UCreg_05 = (unsigned long)fn;
- regs.UCreg_06 = (unsigned long)do_exit;
- regs.UCreg_07 = PRIV_MODE;
- regs.UCreg_pc = (unsigned long)kernel_thread_helper;
- regs.UCreg_asr = regs.UCreg_07 | PSR_I_BIT;
-
- return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL);
-}
-EXPORT_SYMBOL(kernel_thread);
-
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
diff --git a/trunk/arch/unicore32/kernel/setup.h b/trunk/arch/unicore32/kernel/setup.h
index f23955028a18..30f749da8f73 100644
--- a/trunk/arch/unicore32/kernel/setup.h
+++ b/trunk/arch/unicore32/kernel/setup.h
@@ -30,4 +30,10 @@ extern char __vectors_start[], __vectors_end[];
extern void kernel_thread_helper(void);
extern void __init early_signal_init(void);
+
+extern asmlinkage void __backtrace(void);
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+
+extern void __show_regs(struct pt_regs *);
+
#endif
diff --git a/trunk/arch/unicore32/kernel/sys.c b/trunk/arch/unicore32/kernel/sys.c
index fabdee96110b..9680134b31f0 100644
--- a/trunk/arch/unicore32/kernel/sys.c
+++ b/trunk/arch/unicore32/kernel/sys.c
@@ -42,69 +42,6 @@ asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp,
parent_tid, child_tid);
}
-/* sys_execve() executes a new program.
- * This is called indirectly via a small wrapper
- */
-asmlinkage long __sys_execve(const char __user *filename,
- const char __user *const __user *argv,
- const char __user *const __user *envp,
- struct pt_regs *regs)
-{
- int error;
- struct filename *fn;
-
- fn = getname(filename);
- error = PTR_ERR(fn);
- if (IS_ERR(fn))
- goto out;
- error = do_execve(fn->name, argv, envp, regs);
- putname(fn);
-out:
- return error;
-}
-
-int kernel_execve(const char *filename,
- const char *const argv[],
- const char *const envp[])
-{
- struct pt_regs regs;
- int ret;
-
- memset(®s, 0, sizeof(struct pt_regs));
- ret = do_execve(filename,
- (const char __user *const __user *)argv,
- (const char __user *const __user *)envp, ®s);
- if (ret < 0)
- goto out;
-
- /*
- * Save argc to the register structure for userspace.
- */
- regs.UCreg_00 = ret;
-
- /*
- * We were successful. We won't be returning to our caller, but
- * instead to user space by manipulating the kernel stack.
- */
- asm("add r0, %0, %1\n\t"
- "mov r1, %2\n\t"
- "mov r2, %3\n\t"
- "mov r22, #0\n\t" /* not a syscall */
- "mov r23, %0\n\t" /* thread structure */
- "b.l memmove\n\t" /* copy regs to top of stack */
- "mov sp, r0\n\t" /* reposition stack pointer */
- "b ret_to_user"
- :
- : "r" (current_thread_info()),
- "Ir" (THREAD_START_SP - sizeof(regs)),
- "r" (®s),
- "Ir" (sizeof(regs))
- : "r0", "r1", "r2", "r3", "ip", "lr", "memory");
-
- out:
- return ret;
-}
-
/* Note: used by the compat code even in 64-bit Linux. */
SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
diff --git a/trunk/arch/unicore32/mm/fault.c b/trunk/arch/unicore32/mm/fault.c
index 2eeb9c04cab0..f9b5c10bccee 100644
--- a/trunk/arch/unicore32/mm/fault.c
+++ b/trunk/arch/unicore32/mm/fault.c
@@ -168,7 +168,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
}
static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
- struct task_struct *tsk)
+ unsigned int flags, struct task_struct *tsk)
{
struct vm_area_struct *vma;
int fault;
@@ -194,14 +194,7 @@ static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the fault.
*/
- fault = handle_mm_fault(mm, vma, addr & PAGE_MASK,
- (!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
- if (unlikely(fault & VM_FAULT_ERROR))
- return fault;
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
+ fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
return fault;
check_stack:
@@ -216,6 +209,8 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ ((!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
tsk = current;
mm = tsk->mm;
@@ -236,6 +231,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (!user_mode(regs)
&& !search_exception_tables(regs->UCreg_pc))
goto no_context;
+retry:
down_read(&mm->mmap_sem);
} else {
/*
@@ -251,7 +247,28 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
#endif
}
- fault = __do_pf(mm, addr, fsr, tsk);
+ fault = __do_pf(mm, addr, fsr, flags, tsk);
+
+ /* If we need to retry but a fatal signal is pending, handle the
+ * signal first. We do not need to release the mmap_sem because
+ * it would already be released in __lock_page_or_retry in
+ * mm/filemap.c. */
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return 0;
+
+ if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) {
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+ * of starvation. */
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ goto retry;
+ }
+ }
+
up_read(&mm->mmap_sem);
/*
diff --git a/trunk/arch/x86/boot/compressed/eboot.c b/trunk/arch/x86/boot/compressed/eboot.c
index c760e073963e..e87b0cac14b5 100644
--- a/trunk/arch/x86/boot/compressed/eboot.c
+++ b/trunk/arch/x86/boot/compressed/eboot.c
@@ -12,6 +12,8 @@
#include
#include
+#undef memcpy /* Use memcpy from misc.c */
+
#include "eboot.h"
static efi_system_table_t *sys_table;
diff --git a/trunk/arch/x86/boot/header.S b/trunk/arch/x86/boot/header.S
index 2a017441b8b2..8c132a625b94 100644
--- a/trunk/arch/x86/boot/header.S
+++ b/trunk/arch/x86/boot/header.S
@@ -476,6 +476,3 @@ die:
setup_corrupt:
.byte 7
.string "No setup signature found...\n"
-
- .data
-dummy: .long 0
diff --git a/trunk/arch/x86/include/asm/ptrace.h b/trunk/arch/x86/include/asm/ptrace.h
index dcfde52979c3..19f16ebaf4fa 100644
--- a/trunk/arch/x86/include/asm/ptrace.h
+++ b/trunk/arch/x86/include/asm/ptrace.h
@@ -205,21 +205,14 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
}
#endif
-/*
- * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
- * when it traps. The previous stack will be directly underneath the saved
- * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'.
- *
- * This is valid only for kernel mode traps.
- */
-static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
-{
#ifdef CONFIG_X86_32
- return (unsigned long)(®s->sp);
+extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
#else
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
return regs->sp;
-#endif
}
+#endif
#define GET_IP(regs) ((regs)->ip)
#define GET_FP(regs) ((regs)->bp)
diff --git a/trunk/arch/x86/include/asm/xen/hypercall.h b/trunk/arch/x86/include/asm/xen/hypercall.h
index 59c226d120cd..c20d1ce62dc6 100644
--- a/trunk/arch/x86/include/asm/xen/hypercall.h
+++ b/trunk/arch/x86/include/asm/xen/hypercall.h
@@ -359,18 +359,14 @@ HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
return _hypercall4(int, update_va_mapping, va,
new_val.pte, new_val.pte >> 32, flags);
}
+extern int __must_check xen_event_channel_op_compat(int, void *);
static inline int
HYPERVISOR_event_channel_op(int cmd, void *arg)
{
int rc = _hypercall2(int, event_channel_op, cmd, arg);
- if (unlikely(rc == -ENOSYS)) {
- struct evtchn_op op;
- op.cmd = cmd;
- memcpy(&op.u, arg, sizeof(op.u));
- rc = _hypercall1(int, event_channel_op_compat, &op);
- memcpy(arg, &op.u, sizeof(op.u));
- }
+ if (unlikely(rc == -ENOSYS))
+ rc = xen_event_channel_op_compat(cmd, arg);
return rc;
}
@@ -386,17 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
return _hypercall3(int, console_io, cmd, count, str);
}
+extern int __must_check HYPERVISOR_physdev_op_compat(int, void *);
+
static inline int
HYPERVISOR_physdev_op(int cmd, void *arg)
{
int rc = _hypercall2(int, physdev_op, cmd, arg);
- if (unlikely(rc == -ENOSYS)) {
- struct physdev_op op;
- op.cmd = cmd;
- memcpy(&op.u, arg, sizeof(op.u));
- rc = _hypercall1(int, physdev_op_compat, &op);
- memcpy(arg, &op.u, sizeof(op.u));
- }
+ if (unlikely(rc == -ENOSYS))
+ rc = HYPERVISOR_physdev_op_compat(cmd, arg);
return rc;
}
diff --git a/trunk/arch/x86/kernel/cpu/amd.c b/trunk/arch/x86/kernel/cpu/amd.c
index f7e98a2c0d12..1b7d1656a042 100644
--- a/trunk/arch/x86/kernel/cpu/amd.c
+++ b/trunk/arch/x86/kernel/cpu/amd.c
@@ -631,6 +631,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
}
}
+ /*
+ * The way access filter has a performance penalty on some workloads.
+ * Disable it on the affected CPUs.
+ */
+ if ((c->x86 == 0x15) &&
+ (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
+ u64 val;
+
+ if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) {
+ val |= 0x1E;
+ wrmsrl_safe(0xc0011021, val);
+ }
+ }
+
cpu_detect_cache_sizes(c);
/* Multi core CPU? */
diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 698b6ec12e0f..1ac581f38dfa 100644
--- a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -6,7 +6,7 @@
*
* Written by Jacob Shin - AMD, Inc.
*
- * Support: borislav.petkov@amd.com
+ * Maintained by: Borislav Petkov
*
* April 2006
* - added support for AMD Family 0x10 processors
diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce_intel.c b/trunk/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 5f88abf07e9c..4f9a3cbfc4a3 100644
--- a/trunk/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/trunk/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -285,34 +285,39 @@ void cmci_clear(void)
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
}
+static long cmci_rediscover_work_func(void *arg)
+{
+ int banks;
+
+ /* Recheck banks in case CPUs don't all have the same */
+ if (cmci_supported(&banks))
+ cmci_discover(banks);
+
+ return 0;
+}
+
/*
* After a CPU went down cycle through all the others and rediscover
* Must run in process context.
*/
void cmci_rediscover(int dying)
{
- int banks;
- int cpu;
- cpumask_var_t old;
+ int cpu, banks;
if (!cmci_supported(&banks))
return;
- if (!alloc_cpumask_var(&old, GFP_KERNEL))
- return;
- cpumask_copy(old, ¤t->cpus_allowed);
for_each_online_cpu(cpu) {
if (cpu == dying)
continue;
- if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
+
+ if (cpu == smp_processor_id()) {
+ cmci_rediscover_work_func(NULL);
continue;
- /* Recheck banks in case CPUs don't all have the same */
- if (cmci_supported(&banks))
- cmci_discover(banks);
- }
+ }
- set_cpus_allowed_ptr(current, old);
- free_cpumask_var(old);
+ work_on_cpu(cpu, cmci_rediscover_work_func, NULL);
+ }
}
/*
diff --git a/trunk/arch/x86/kernel/entry_64.S b/trunk/arch/x86/kernel/entry_64.S
index b51b2c7ee51f..1328fe49a3f1 100644
--- a/trunk/arch/x86/kernel/entry_64.S
+++ b/trunk/arch/x86/kernel/entry_64.S
@@ -995,8 +995,8 @@ END(interrupt)
*/
.p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt:
- ASM_CLAC
XCPT_FRAME
+ ASM_CLAC
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
interrupt do_IRQ
/* 0(%rsp): old_rsp-ARGOFFSET */
@@ -1135,8 +1135,8 @@ END(common_interrupt)
*/
.macro apicinterrupt num sym do_sym
ENTRY(\sym)
- ASM_CLAC
INTR_FRAME
+ ASM_CLAC
pushq_cfi $~(\num)
.Lcommon_\sym:
interrupt \do_sym
@@ -1190,8 +1190,8 @@ apicinterrupt IRQ_WORK_VECTOR \
*/
.macro zeroentry sym do_sym
ENTRY(\sym)
- ASM_CLAC
INTR_FRAME
+ ASM_CLAC
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
subq $ORIG_RAX-R15, %rsp
@@ -1208,8 +1208,8 @@ END(\sym)
.macro paranoidzeroentry sym do_sym
ENTRY(\sym)
- ASM_CLAC
INTR_FRAME
+ ASM_CLAC
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
subq $ORIG_RAX-R15, %rsp
@@ -1227,8 +1227,8 @@ END(\sym)
#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
.macro paranoidzeroentry_ist sym do_sym ist
ENTRY(\sym)
- ASM_CLAC
INTR_FRAME
+ ASM_CLAC
PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
subq $ORIG_RAX-R15, %rsp
@@ -1247,8 +1247,8 @@ END(\sym)
.macro errorentry sym do_sym
ENTRY(\sym)
- ASM_CLAC
XCPT_FRAME
+ ASM_CLAC
PARAVIRT_ADJUST_EXCEPTION_FRAME
subq $ORIG_RAX-R15, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
@@ -1266,8 +1266,8 @@ END(\sym)
/* error code is on the stack already */
.macro paranoiderrorentry sym do_sym
ENTRY(\sym)
- ASM_CLAC
XCPT_FRAME
+ ASM_CLAC
PARAVIRT_ADJUST_EXCEPTION_FRAME
subq $ORIG_RAX-R15, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
diff --git a/trunk/arch/x86/kernel/microcode_amd.c b/trunk/arch/x86/kernel/microcode_amd.c
index 7720ff5a9ee2..efdec7cd8e01 100644
--- a/trunk/arch/x86/kernel/microcode_amd.c
+++ b/trunk/arch/x86/kernel/microcode_amd.c
@@ -8,8 +8,8 @@
* Tigran Aivazian
*
* Maintainers:
- * Andreas Herrmann
- * Borislav Petkov
+ * Andreas Herrmann
+ * Borislav Petkov
*
* This driver allows to upgrade microcode on F10h AMD
* CPUs and later.
@@ -190,6 +190,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
#define F1XH_MPB_MAX_SIZE 2048
#define F14H_MPB_MAX_SIZE 1824
#define F15H_MPB_MAX_SIZE 4096
+#define F16H_MPB_MAX_SIZE 3458
switch (c->x86) {
case 0x14:
@@ -198,6 +199,9 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
case 0x15:
max_size = F15H_MPB_MAX_SIZE;
break;
+ case 0x16:
+ max_size = F16H_MPB_MAX_SIZE;
+ break;
default:
max_size = F1XH_MPB_MAX_SIZE;
break;
diff --git a/trunk/arch/x86/kernel/ptrace.c b/trunk/arch/x86/kernel/ptrace.c
index b00b33a18390..5e0596b0632e 100644
--- a/trunk/arch/x86/kernel/ptrace.c
+++ b/trunk/arch/x86/kernel/ptrace.c
@@ -22,6 +22,7 @@
#include
#include
#include
+#include
#include
#include
@@ -166,6 +167,35 @@ static inline bool invalid_selector(u16 value)
#define FLAG_MASK FLAG_MASK_32
+/*
+ * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
+ * when it traps. The previous stack will be directly underneath the saved
+ * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'.
+ *
+ * Now, if the stack is empty, '®s->sp' is out of range. In this
+ * case we try to take the previous stack. To always return a non-null
+ * stack pointer we fall back to regs as stack if no previous stack
+ * exists.
+ *
+ * This is valid only for kernel mode traps.
+ */
+unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
+ unsigned long sp = (unsigned long)®s->sp;
+ struct thread_info *tinfo;
+
+ if (context == (sp & ~(THREAD_SIZE - 1)))
+ return sp;
+
+ tinfo = (struct thread_info *)context;
+ if (tinfo->previous_esp)
+ return tinfo->previous_esp;
+
+ return (unsigned long)regs;
+}
+EXPORT_SYMBOL_GPL(kernel_stack_pointer);
+
static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
{
BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
diff --git a/trunk/arch/x86/kvm/cpuid.h b/trunk/arch/x86/kvm/cpuid.h
index a10e46016851..58fc51488828 100644
--- a/trunk/arch/x86/kvm/cpuid.h
+++ b/trunk/arch/x86/kvm/cpuid.h
@@ -24,6 +24,9 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
+ if (!static_cpu_has(X86_FEATURE_XSAVE))
+ return 0;
+
best = kvm_find_cpuid_entry(vcpu, 1, 0);
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
}
diff --git a/trunk/arch/x86/kvm/vmx.c b/trunk/arch/x86/kvm/vmx.c
index ad6b1dd06f8b..f85815945fc6 100644
--- a/trunk/arch/x86/kvm/vmx.c
+++ b/trunk/arch/x86/kvm/vmx.c
@@ -6549,19 +6549,22 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
}
}
- exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
/* Exposing INVPCID only when PCID is exposed */
best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
if (vmx_invpcid_supported() &&
best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
guest_cpuid_has_pcid(vcpu)) {
+ exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
exec_control);
} else {
- exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
- exec_control);
+ if (cpu_has_secondary_exec_ctrls()) {
+ exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+ exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+ exec_control);
+ }
if (best)
best->ebx &= ~bit(X86_FEATURE_INVPCID);
}
diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c
index 224a7e78cb6c..4f7641756be2 100644
--- a/trunk/arch/x86/kvm/x86.c
+++ b/trunk/arch/x86/kvm/x86.c
@@ -5781,6 +5781,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
int pending_vec, max_bits, idx;
struct desc_ptr dt;
+ if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
+ return -EINVAL;
+
dt.size = sregs->idt.limit;
dt.address = sregs->idt.base;
kvm_x86_ops->set_idt(vcpu, &dt);
diff --git a/trunk/arch/x86/mm/tlb.c b/trunk/arch/x86/mm/tlb.c
index 0777f042e400..60f926cd8b0e 100644
--- a/trunk/arch/x86/mm/tlb.c
+++ b/trunk/arch/x86/mm/tlb.c
@@ -197,7 +197,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
}
if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
- || vmflag == VM_HUGETLB) {
+ || vmflag & VM_HUGETLB) {
local_flush_tlb();
goto flush_all;
}
diff --git a/trunk/arch/x86/pci/ce4100.c b/trunk/arch/x86/pci/ce4100.c
index 41bd2a2d2c50..b914e20b5a00 100644
--- a/trunk/arch/x86/pci/ce4100.c
+++ b/trunk/arch/x86/pci/ce4100.c
@@ -115,6 +115,16 @@ static void sata_revid_read(struct sim_dev_reg *reg, u32 *value)
reg_read(reg, value);
}
+static void reg_noirq_read(struct sim_dev_reg *reg, u32 *value)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
+ /* force interrupt pin value to 0 */
+ *value = reg->sim_reg.value & 0xfff00ff;
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
+}
+
static struct sim_dev_reg bus1_fixups[] = {
DEFINE_REG(2, 0, 0x10, (16*MB), reg_init, reg_read, reg_write)
DEFINE_REG(2, 0, 0x14, (256), reg_init, reg_read, reg_write)
@@ -144,6 +154,7 @@ static struct sim_dev_reg bus1_fixups[] = {
DEFINE_REG(11, 5, 0x10, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(11, 6, 0x10, (256), reg_init, reg_read, reg_write)
DEFINE_REG(11, 7, 0x10, (64*KB), reg_init, reg_read, reg_write)
+ DEFINE_REG(11, 7, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
DEFINE_REG(12, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
DEFINE_REG(12, 0, 0x14, (256), reg_init, reg_read, reg_write)
DEFINE_REG(12, 1, 0x10, (1024), reg_init, reg_read, reg_write)
@@ -161,8 +172,10 @@ static struct sim_dev_reg bus1_fixups[] = {
DEFINE_REG(16, 0, 0x10, (64*KB), reg_init, reg_read, reg_write)
DEFINE_REG(16, 0, 0x14, (64*MB), reg_init, reg_read, reg_write)
DEFINE_REG(16, 0, 0x18, (64*MB), reg_init, reg_read, reg_write)
+ DEFINE_REG(16, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
DEFINE_REG(17, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
DEFINE_REG(18, 0, 0x10, (1*KB), reg_init, reg_read, reg_write)
+ DEFINE_REG(18, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
};
static void __init init_sim_regs(void)
diff --git a/trunk/arch/x86/platform/ce4100/ce4100.c b/trunk/arch/x86/platform/ce4100/ce4100.c
index 4c61b52191eb..92525cb8e54c 100644
--- a/trunk/arch/x86/platform/ce4100/ce4100.c
+++ b/trunk/arch/x86/platform/ce4100/ce4100.c
@@ -21,12 +21,25 @@
#include
#include
#include
+#include
static int ce4100_i8042_detect(void)
{
return 0;
}
+/*
+ * The CE4100 platform has an internal 8051 Microcontroller which is
+ * responsible for signaling to the external Power Management Unit the
+ * intention to reset, reboot or power off the system. This 8051 device has
+ * its command register mapped at I/O port 0xcf9 and the value 0x4 is used
+ * to power off the system.
+ */
+static void ce4100_power_off(void)
+{
+ outb(0x4, 0xcf9);
+}
+
#ifdef CONFIG_SERIAL_8250
static unsigned int mem_serial_in(struct uart_port *p, int offset)
@@ -139,8 +152,19 @@ void __init x86_ce4100_early_setup(void)
x86_init.mpparse.find_smp_config = x86_init_noop;
x86_init.pci.init = ce4100_pci_init;
+ /*
+ * By default, the reboot method is ACPI which is supported by the
+ * CE4100 bootloader CEFDK using FADT.ResetReg Address and ResetValue
+ * the bootloader will however issue a system power off instead of
+ * reboot. By using BOOT_KBD we ensure proper system reboot as
+ * expected.
+ */
+ reboot_type = BOOT_KBD;
+
#ifdef CONFIG_X86_IO_APIC
x86_init.pci.init_irq = sdv_pci_init;
x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck;
#endif
+
+ pm_power_off = ce4100_power_off;
}
diff --git a/trunk/block/blk-exec.c b/trunk/block/blk-exec.c
index 8b6dc5bd4dd0..f71eac35c1b9 100644
--- a/trunk/block/blk-exec.c
+++ b/trunk/block/blk-exec.c
@@ -52,11 +52,17 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq_end_io_fn *done)
{
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+ bool is_pm_resume;
WARN_ON(irqs_disabled());
rq->rq_disk = bd_disk;
rq->end_io = done;
+ /*
+ * need to check this before __blk_run_queue(), because rq can
+ * be freed before that returns.
+ */
+ is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
spin_lock_irq(q->queue_lock);
@@ -71,7 +77,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
__elv_add_request(q, rq, where);
__blk_run_queue(q);
/* the queue is stopped so it won't be run */
- if (rq->cmd_type == REQ_TYPE_PM_RESUME)
+ if (is_pm_resume)
q->request_fn(q);
spin_unlock_irq(q->queue_lock);
}
diff --git a/trunk/crypto/cryptd.c b/trunk/crypto/cryptd.c
index 671d4d6d14df..7bdd61b867c8 100644
--- a/trunk/crypto/cryptd.c
+++ b/trunk/crypto/cryptd.c
@@ -137,13 +137,18 @@ static void cryptd_queue_worker(struct work_struct *work)
struct crypto_async_request *req, *backlog;
cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
- /* Only handle one request at a time to avoid hogging crypto
- * workqueue. preempt_disable/enable is used to prevent
- * being preempted by cryptd_enqueue_request() */
+ /*
+ * Only handle one request at a time to avoid hogging crypto workqueue.
+ * preempt_disable/enable is used to prevent being preempted by
+ * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
+ * cryptd_enqueue_request() being accessed from software interrupts.
+ */
+ local_bh_disable();
preempt_disable();
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
preempt_enable();
+ local_bh_enable();
if (!req)
return;
diff --git a/trunk/drivers/ata/ahci_platform.c b/trunk/drivers/ata/ahci_platform.c
index b1ae48054dc5..b7078afddb74 100644
--- a/trunk/drivers/ata/ahci_platform.c
+++ b/trunk/drivers/ata/ahci_platform.c
@@ -238,7 +238,7 @@ static int __devexit ahci_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int ahci_suspend(struct device *dev)
{
struct ahci_platform_data *pdata = dev_get_platdata(dev);
diff --git a/trunk/drivers/ata/libata-acpi.c b/trunk/drivers/ata/libata-acpi.c
index fd9ecf74e631..5b0ba3f20edc 100644
--- a/trunk/drivers/ata/libata-acpi.c
+++ b/trunk/drivers/ata/libata-acpi.c
@@ -1105,10 +1105,15 @@ static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev,
struct acpi_device *acpi_dev;
struct acpi_device_power_state *states;
- if (ap->flags & ATA_FLAG_ACPI_SATA)
- ata_dev = &ap->link.device[sdev->channel];
- else
+ if (ap->flags & ATA_FLAG_ACPI_SATA) {
+ if (!sata_pmp_attached(ap))
+ ata_dev = &ap->link.device[sdev->id];
+ else
+ ata_dev = &ap->pmp_link[sdev->channel].device[sdev->id];
+ }
+ else {
ata_dev = &ap->link.device[sdev->id];
+ }
*handle = ata_dev_acpi_handle(ata_dev);
diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c
index 3cc7096cfda7..f46fbd3bd3fb 100644
--- a/trunk/drivers/ata/libata-core.c
+++ b/trunk/drivers/ata/libata-core.c
@@ -2942,6 +2942,10 @@ const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
if (xfer_mode == t->mode)
return t;
+
+ WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
+ __func__, xfer_mode);
+
return NULL;
}
diff --git a/trunk/drivers/ata/libata-scsi.c b/trunk/drivers/ata/libata-scsi.c
index e3bda074fa12..a6df6a351d6e 100644
--- a/trunk/drivers/ata/libata-scsi.c
+++ b/trunk/drivers/ata/libata-scsi.c
@@ -1052,6 +1052,8 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
+ sdev->no_report_opcodes = 1;
+ sdev->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer() callback and
* it needs to see every deferred qc. Set dev_blocked to 1 to
diff --git a/trunk/drivers/ata/pata_arasan_cf.c b/trunk/drivers/ata/pata_arasan_cf.c
index 26201ebef3ca..371fd2c698b7 100644
--- a/trunk/drivers/ata/pata_arasan_cf.c
+++ b/trunk/drivers/ata/pata_arasan_cf.c
@@ -317,6 +317,12 @@ static int cf_init(struct arasan_cf_dev *acdev)
return ret;
}
+ ret = clk_set_rate(acdev->clk, 166000000);
+ if (ret) {
+ dev_warn(acdev->host->dev, "clock set rate failed");
+ return ret;
+ }
+
spin_lock_irqsave(&acdev->host->lock, flags);
/* configure CF interface clock */
writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk :
@@ -908,7 +914,7 @@ static int __devexit arasan_cf_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int arasan_cf_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
diff --git a/trunk/drivers/ata/sata_highbank.c b/trunk/drivers/ata/sata_highbank.c
index 0d7c4c2cd26f..400bf1c3e982 100644
--- a/trunk/drivers/ata/sata_highbank.c
+++ b/trunk/drivers/ata/sata_highbank.c
@@ -260,7 +260,7 @@ static const struct of_device_id ahci_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
-static int __init ahci_highbank_probe(struct platform_device *pdev)
+static int __devinit ahci_highbank_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
@@ -378,7 +378,7 @@ static int __devexit ahci_highbank_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int ahci_highbank_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
diff --git a/trunk/drivers/ata/sata_svw.c b/trunk/drivers/ata/sata_svw.c
index 44a4256533e1..08608de87e4e 100644
--- a/trunk/drivers/ata/sata_svw.c
+++ b/trunk/drivers/ata/sata_svw.c
@@ -142,6 +142,39 @@ static int k2_sata_scr_write(struct ata_link *link,
return 0;
}
+static int k2_sata_softreset(struct ata_link *link,
+ unsigned int *class, unsigned long deadline)
+{
+ u8 dmactl;
+ void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
+
+ dmactl = readb(mmio + ATA_DMA_CMD);
+
+ /* Clear the start bit */
+ if (dmactl & ATA_DMA_START) {
+ dmactl &= ~ATA_DMA_START;
+ writeb(dmactl, mmio + ATA_DMA_CMD);
+ }
+
+ return ata_sff_softreset(link, class, deadline);
+}
+
+static int k2_sata_hardreset(struct ata_link *link,
+ unsigned int *class, unsigned long deadline)
+{
+ u8 dmactl;
+ void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
+
+ dmactl = readb(mmio + ATA_DMA_CMD);
+
+ /* Clear the start bit */
+ if (dmactl & ATA_DMA_START) {
+ dmactl &= ~ATA_DMA_START;
+ writeb(dmactl, mmio + ATA_DMA_CMD);
+ }
+
+ return sata_sff_hardreset(link, class, deadline);
+}
static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
{
@@ -346,6 +379,8 @@ static struct scsi_host_template k2_sata_sht = {
static struct ata_port_operations k2_sata_ops = {
.inherits = &ata_bmdma_port_ops,
+ .softreset = k2_sata_softreset,
+ .hardreset = k2_sata_hardreset,
.sff_tf_load = k2_sata_tf_load,
.sff_tf_read = k2_sata_tf_read,
.sff_check_status = k2_stat_check_status,
diff --git a/trunk/drivers/base/platform.c b/trunk/drivers/base/platform.c
index 8727e9c5eea4..72c776f2a1f5 100644
--- a/trunk/drivers/base/platform.c
+++ b/trunk/drivers/base/platform.c
@@ -83,9 +83,16 @@ EXPORT_SYMBOL_GPL(platform_get_resource);
*/
int platform_get_irq(struct platform_device *dev, unsigned int num)
{
+#ifdef CONFIG_SPARC
+ /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
+ if (!dev || num >= dev->archdata.num_irqs)
+ return -ENXIO;
+ return dev->archdata.irqs[num];
+#else
struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
return r ? r->start : -ENXIO;
+#endif
}
EXPORT_SYMBOL_GPL(platform_get_irq);
diff --git a/trunk/drivers/base/power/qos.c b/trunk/drivers/base/power/qos.c
index 74a67e0019a2..fbbd4ed2edf2 100644
--- a/trunk/drivers/base/power/qos.c
+++ b/trunk/drivers/base/power/qos.c
@@ -451,7 +451,7 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
if (ancestor)
error = dev_pm_qos_add_request(ancestor, req, value);
- if (error)
+ if (error < 0)
req->dev = NULL;
return error;
diff --git a/trunk/drivers/block/aoe/aoecmd.c b/trunk/drivers/block/aoe/aoecmd.c
index 3804a0af3ef1..9fe4f1865558 100644
--- a/trunk/drivers/block/aoe/aoecmd.c
+++ b/trunk/drivers/block/aoe/aoecmd.c
@@ -935,7 +935,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
/* cf. http://lkml.org/lkml/2006/10/31/28 */
if (!fastfail)
- q->request_fn(q);
+ __blk_run_queue(q);
}
static void
diff --git a/trunk/drivers/block/floppy.c b/trunk/drivers/block/floppy.c
index 1c49d7173966..2ddd64a9ffde 100644
--- a/trunk/drivers/block/floppy.c
+++ b/trunk/drivers/block/floppy.c
@@ -4330,6 +4330,7 @@ static int __init do_floppy_init(void)
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
+ destroy_workqueue(floppy_wq);
for (drive = 0; drive < N_DRIVE; drive++) {
if (!disks[drive])
break;
@@ -4340,7 +4341,6 @@ static int __init do_floppy_init(void)
}
put_disk(disks[drive]);
}
- destroy_workqueue(floppy_wq);
return err;
}
@@ -4555,6 +4555,8 @@ static void __exit floppy_module_exit(void)
unregister_blkdev(FLOPPY_MAJOR, "fd");
platform_driver_unregister(&floppy_driver);
+ destroy_workqueue(floppy_wq);
+
for (drive = 0; drive < N_DRIVE; drive++) {
del_timer_sync(&motor_off_timer[drive]);
@@ -4578,7 +4580,6 @@ static void __exit floppy_module_exit(void)
cancel_delayed_work_sync(&fd_timeout);
cancel_delayed_work_sync(&fd_timer);
- destroy_workqueue(floppy_wq);
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.c b/trunk/drivers/block/mtip32xx/mtip32xx.c
index adc6f36564cf..9694dd99bbbc 100644
--- a/trunk/drivers/block/mtip32xx/mtip32xx.c
+++ b/trunk/drivers/block/mtip32xx/mtip32xx.c
@@ -559,7 +559,7 @@ static void mtip_timeout_function(unsigned long int data)
struct mtip_cmd *command;
int tag, cmdto_cnt = 0;
unsigned int bit, group;
- unsigned int num_command_slots = port->dd->slot_groups * 32;
+ unsigned int num_command_slots;
unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
if (unlikely(!port))
@@ -572,6 +572,7 @@ static void mtip_timeout_function(unsigned long int data)
}
/* clear the tag accumulator */
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+ num_command_slots = port->dd->slot_groups * 32;
for (tag = 0; tag < num_command_slots; tag++) {
/*
@@ -2218,8 +2219,8 @@ static int exec_drive_taskfile(struct driver_data *dd,
fis.device);
/* check for erase mode support during secure erase.*/
- if ((fis.command == ATA_CMD_SEC_ERASE_UNIT)
- && (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
+ if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf &&
+ (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
erasemode = 1;
}
@@ -2439,7 +2440,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
* return value
* None
*/
-static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
+static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
int nsect, int nents, int tag, void *callback,
void *data, int dir)
{
@@ -2447,6 +2448,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
struct mtip_port *port = dd->port;
struct mtip_cmd *command = &port->commands[tag];
int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ u64 start = sector;
/* Map the scatter list for DMA access */
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
@@ -2465,8 +2467,12 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
fis->opts = 1 << 7;
fis->command =
(dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE);
- *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF);
- *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF);
+ fis->lba_low = start & 0xFF;
+ fis->lba_mid = (start >> 8) & 0xFF;
+ fis->lba_hi = (start >> 16) & 0xFF;
+ fis->lba_low_ex = (start >> 24) & 0xFF;
+ fis->lba_mid_ex = (start >> 32) & 0xFF;
+ fis->lba_hi_ex = (start >> 40) & 0xFF;
fis->device = 1 << 6;
fis->features = nsect & 0xFF;
fis->features_ex = (nsect >> 8) & 0xFF;
diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.h b/trunk/drivers/block/mtip32xx/mtip32xx.h
index 5f4a917bd8bb..b1742640556a 100644
--- a/trunk/drivers/block/mtip32xx/mtip32xx.h
+++ b/trunk/drivers/block/mtip32xx/mtip32xx.h
@@ -34,7 +34,7 @@
#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
/* check for erase mode support during secure erase */
-#define MTIP_SEC_ERASE_MODE 0x3
+#define MTIP_SEC_ERASE_MODE 0x2
/* # of times to retry timed out/failed IOs */
#define MTIP_MAX_RETRIES 2
@@ -155,14 +155,14 @@ enum {
MTIP_DDF_REBUILD_FAILED_BIT = 8,
};
-__packed struct smart_attr{
+struct smart_attr {
u8 attr_id;
u16 flags;
u8 cur;
u8 worst;
u32 data;
u8 res[3];
-};
+} __packed;
/* Register Frame Information Structure (FIS), host to device. */
struct host_to_dev_fis {
diff --git a/trunk/drivers/bluetooth/ath3k.c b/trunk/drivers/bluetooth/ath3k.c
index fc2de5528dcc..b00000e8aef6 100644
--- a/trunk/drivers/bluetooth/ath3k.c
+++ b/trunk/drivers/bluetooth/ath3k.c
@@ -67,6 +67,7 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3304) },
{ USB_DEVICE(0x0930, 0x0215) },
{ USB_DEVICE(0x0489, 0xE03D) },
+ { USB_DEVICE(0x0489, 0xE027) },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03F0, 0x311D) },
diff --git a/trunk/drivers/bluetooth/btusb.c b/trunk/drivers/bluetooth/btusb.c
index debda27df9b0..ee82f2fb65f0 100644
--- a/trunk/drivers/bluetooth/btusb.c
+++ b/trunk/drivers/bluetooth/btusb.c
@@ -124,6 +124,7 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
diff --git a/trunk/drivers/bus/omap-ocp2scp.c b/trunk/drivers/bus/omap-ocp2scp.c
index ff63560b8467..0c48b0e05ed6 100644
--- a/trunk/drivers/bus/omap-ocp2scp.c
+++ b/trunk/drivers/bus/omap-ocp2scp.c
@@ -22,6 +22,26 @@
#include
#include
#include
+#include
+
+/**
+ * _count_resources - count for the number of resources
+ * @res: struct resource *
+ *
+ * Count and return the number of resources populated for the device that is
+ * connected to ocp2scp.
+ */
+static unsigned _count_resources(struct resource *res)
+{
+ int cnt = 0;
+
+ while (res->start != res->end) {
+ cnt++;
+ res++;
+ }
+
+ return cnt;
+}
static int ocp2scp_remove_devices(struct device *dev, void *c)
{
@@ -34,20 +54,62 @@ static int ocp2scp_remove_devices(struct device *dev, void *c)
static int __devinit omap_ocp2scp_probe(struct platform_device *pdev)
{
- int ret;
- struct device_node *np = pdev->dev.of_node;
+ int ret;
+ unsigned res_cnt, i;
+ struct device_node *np = pdev->dev.of_node;
+ struct platform_device *pdev_child;
+ struct omap_ocp2scp_platform_data *pdata = pdev->dev.platform_data;
+ struct omap_ocp2scp_dev *dev;
if (np) {
ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
if (ret) {
- dev_err(&pdev->dev, "failed to add resources for ocp2scp child\n");
+ dev_err(&pdev->dev,
+ "failed to add resources for ocp2scp child\n");
goto err0;
}
+ } else if (pdata) {
+ for (i = 0, dev = *pdata->devices; i < pdata->dev_cnt; i++,
+ dev++) {
+ res_cnt = _count_resources(dev->res);
+
+ pdev_child = platform_device_alloc(dev->drv_name,
+ PLATFORM_DEVID_AUTO);
+ if (!pdev_child) {
+ dev_err(&pdev->dev,
+ "failed to allocate mem for ocp2scp child\n");
+ goto err0;
+ }
+
+ ret = platform_device_add_resources(pdev_child,
+ dev->res, res_cnt);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to add resources for ocp2scp child\n");
+ goto err1;
+ }
+
+ pdev_child->dev.parent = &pdev->dev;
+
+ ret = platform_device_add(pdev_child);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register ocp2scp child device\n");
+ goto err1;
+ }
+ }
+ } else {
+ dev_err(&pdev->dev, "OCP2SCP initialized without plat data\n");
+ return -EINVAL;
}
+
pm_runtime_enable(&pdev->dev);
return 0;
+err1:
+ platform_device_put(pdev_child);
+
err0:
device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
diff --git a/trunk/drivers/char/agp/intel-agp.h b/trunk/drivers/char/agp/intel-agp.h
index 1042c1b90376..6ec0fff79bc2 100644
--- a/trunk/drivers/char/agp/intel-agp.h
+++ b/trunk/drivers/char/agp/intel-agp.h
@@ -62,6 +62,12 @@
#define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006
+/* GT PTE cache control fields */
+#define GEN6_PTE_UNCACHED 0x00000002
+#define HSW_PTE_UNCACHED 0x00000000
+#define GEN6_PTE_LLC 0x00000004
+#define GEN6_PTE_LLC_MLC 0x00000006
+#define GEN6_PTE_GFDT 0x00000008
#define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000
@@ -91,6 +97,7 @@
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
+#define GFX_FLSH_CNTL_VLV 0x101008
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
@@ -141,6 +148,29 @@
#define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
+#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
+#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
+#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
+#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
+#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
+#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
+#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
+#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
+#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
+#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
+#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
+#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
+#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
+#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
+#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
+#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
+#define SNB_GTT_SIZE_0M (0 << 8)
+#define SNB_GTT_SIZE_1M (1 << 8)
+#define SNB_GTT_SIZE_2M (2 << 8)
+#define SNB_GTT_SIZE_MASK (3 << 8)
+
/* pci devices ids */
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
@@ -189,5 +219,66 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
+#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
+#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
+#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
+#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
+#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
+#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
#endif
diff --git a/trunk/drivers/char/agp/intel-gtt.c b/trunk/drivers/char/agp/intel-gtt.c
index dbd901e94ea6..38390f7c6ab6 100644
--- a/trunk/drivers/char/agp/intel-gtt.c
+++ b/trunk/drivers/char/agp/intel-gtt.c
@@ -367,6 +367,62 @@ static unsigned int intel_gtt_stolen_size(void)
stolen_size = 0;
break;
}
+ } else if (INTEL_GTT_GEN == 6) {
+ /*
+ * SandyBridge has new memory control reg at 0x50.w
+ */
+ u16 snb_gmch_ctl;
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
+ case SNB_GMCH_GMS_STOLEN_32M:
+ stolen_size = MB(32);
+ break;
+ case SNB_GMCH_GMS_STOLEN_64M:
+ stolen_size = MB(64);
+ break;
+ case SNB_GMCH_GMS_STOLEN_96M:
+ stolen_size = MB(96);
+ break;
+ case SNB_GMCH_GMS_STOLEN_128M:
+ stolen_size = MB(128);
+ break;
+ case SNB_GMCH_GMS_STOLEN_160M:
+ stolen_size = MB(160);
+ break;
+ case SNB_GMCH_GMS_STOLEN_192M:
+ stolen_size = MB(192);
+ break;
+ case SNB_GMCH_GMS_STOLEN_224M:
+ stolen_size = MB(224);
+ break;
+ case SNB_GMCH_GMS_STOLEN_256M:
+ stolen_size = MB(256);
+ break;
+ case SNB_GMCH_GMS_STOLEN_288M:
+ stolen_size = MB(288);
+ break;
+ case SNB_GMCH_GMS_STOLEN_320M:
+ stolen_size = MB(320);
+ break;
+ case SNB_GMCH_GMS_STOLEN_352M:
+ stolen_size = MB(352);
+ break;
+ case SNB_GMCH_GMS_STOLEN_384M:
+ stolen_size = MB(384);
+ break;
+ case SNB_GMCH_GMS_STOLEN_416M:
+ stolen_size = MB(416);
+ break;
+ case SNB_GMCH_GMS_STOLEN_448M:
+ stolen_size = MB(448);
+ break;
+ case SNB_GMCH_GMS_STOLEN_480M:
+ stolen_size = MB(480);
+ break;
+ case SNB_GMCH_GMS_STOLEN_512M:
+ stolen_size = MB(512);
+ break;
+ }
} else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
@@ -500,9 +556,29 @@ static unsigned int i965_gtt_total_entries(void)
static unsigned int intel_gtt_total_entries(void)
{
+ int size;
+
if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
return i965_gtt_total_entries();
- else {
+ else if (INTEL_GTT_GEN == 6) {
+ u16 snb_gmch_ctl;
+
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
+ default:
+ case SNB_GTT_SIZE_0M:
+ printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
+ size = MB(0);
+ break;
+ case SNB_GTT_SIZE_1M:
+ size = MB(1);
+ break;
+ case SNB_GTT_SIZE_2M:
+ size = MB(2);
+ break;
+ }
+ return size/4;
+ } else {
/* On previous hardware, the GTT size was just what was
* required to map the aperture.
*/
@@ -702,6 +778,9 @@ bool intel_enable_gtt(void)
{
u8 __iomem *reg;
+ if (INTEL_GTT_GEN >= 6)
+ return true;
+
if (INTEL_GTT_GEN == 2) {
u16 gmch_ctrl;
@@ -1070,6 +1149,85 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry);
}
+static bool gen6_check_flags(unsigned int flags)
+{
+ return true;
+}
+
+static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
+ unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
+ u32 pte_flags;
+
+ if (type_mask == AGP_USER_MEMORY)
+ pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
+ else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
+ pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
+ } else { /* set 'normal'/'cached' to LLC by default */
+ pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
+ }
+
+ /* gen6 has bit11-4 for physical addr bit39-32 */
+ addr |= (addr >> 28) & 0xff0;
+ writel(addr | pte_flags, intel_private.gtt + entry);
+}
+
+static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
+ unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
+ u32 pte_flags;
+
+ if (type_mask == AGP_USER_MEMORY)
+ pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
+ else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
+ pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
+ } else { /* set 'normal'/'cached' to LLC by default */
+ pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
+ }
+
+ /* gen6 has bit11-4 for physical addr bit39-32 */
+ addr |= (addr >> 28) & 0xff0;
+ writel(addr | pte_flags, intel_private.gtt + entry);
+}
+
+static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
+ unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
+ u32 pte_flags;
+
+ if (type_mask == AGP_USER_MEMORY)
+ pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
+ else {
+ pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
+ }
+
+ /* gen6 has bit11-4 for physical addr bit39-32 */
+ addr |= (addr >> 28) & 0xff0;
+ writel(addr | pte_flags, intel_private.gtt + entry);
+
+ writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
+}
+
+static void gen6_cleanup(void)
+{
+}
+
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
@@ -1091,29 +1249,41 @@ static inline int needs_idle_maps(void)
static int i9xx_setup(void)
{
- u32 reg_addr, gtt_addr;
+ u32 reg_addr;
int size = KB(512);
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr);
reg_addr &= 0xfff80000;
+ if (INTEL_GTT_GEN >= 7)
+ size = MB(2);
+
intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
return -ENOMEM;
- switch (INTEL_GTT_GEN) {
- case 3:
+ if (INTEL_GTT_GEN == 3) {
+ u32 gtt_addr;
+
pci_read_config_dword(intel_private.pcidev,
I915_PTEADDR, >t_addr);
intel_private.gtt_bus_addr = gtt_addr;
- break;
- case 5:
- intel_private.gtt_bus_addr = reg_addr + MB(2);
- break;
- default:
- intel_private.gtt_bus_addr = reg_addr + KB(512);
- break;
+ } else {
+ u32 gtt_offset;
+
+ switch (INTEL_GTT_GEN) {
+ case 5:
+ case 6:
+ case 7:
+ gtt_offset = MB(2);
+ break;
+ case 4:
+ default:
+ gtt_offset = KB(512);
+ break;
+ }
+ intel_private.gtt_bus_addr = reg_addr + gtt_offset;
}
if (needs_idle_maps())
@@ -1225,6 +1395,32 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
+static const struct intel_gtt_driver sandybridge_gtt_driver = {
+ .gen = 6,
+ .setup = i9xx_setup,
+ .cleanup = gen6_cleanup,
+ .write_entry = gen6_write_entry,
+ .dma_mask_size = 40,
+ .check_flags = gen6_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver haswell_gtt_driver = {
+ .gen = 6,
+ .setup = i9xx_setup,
+ .cleanup = gen6_cleanup,
+ .write_entry = haswell_write_entry,
+ .dma_mask_size = 40,
+ .check_flags = gen6_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
+static const struct intel_gtt_driver valleyview_gtt_driver = {
+ .gen = 7,
+ .setup = i9xx_setup,
+ .cleanup = gen6_cleanup,
+ .write_entry = valleyview_write_entry,
+ .dma_mask_size = 40,
+ .check_flags = gen6_check_flags,
+};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1305,6 +1501,106 @@ static const struct intel_gtt_driver_description {
"HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", &ironlake_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
+ "Sandybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
+ "Sandybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
+ "Sandybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
+ "Sandybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
+ "Sandybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
+ "Sandybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
+ "Sandybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
+ "ValleyView", &valleyview_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
{ 0, NULL, NULL }
};
@@ -1390,7 +1686,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-struct intel_gtt *intel_gtt_get(void)
+const struct intel_gtt *intel_gtt_get(void)
{
return &intel_private.base;
}
diff --git a/trunk/drivers/clk/ux500/u8500_clk.c b/trunk/drivers/clk/ux500/u8500_clk.c
index ca4a25ed844c..e2c17d187d98 100644
--- a/trunk/drivers/clk/ux500/u8500_clk.c
+++ b/trunk/drivers/clk/ux500/u8500_clk.c
@@ -40,7 +40,7 @@ void u8500_clk_init(void)
CLK_IS_ROOT|CLK_IGNORE_UNUSED,
32768);
clk_register_clkdev(clk, "clk32k", NULL);
- clk_register_clkdev(clk, NULL, "rtc-pl031");
+ clk_register_clkdev(clk, "apb_pclk", "rtc-pl031");
/* PRCMU clocks */
fw_version = prcmu_get_fw_version();
@@ -228,10 +228,17 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", U8500_CLKRST1_BASE,
BIT(2), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1");
+
clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", U8500_CLKRST1_BASE,
BIT(3), 0);
+ clk_register_clkdev(clk, "apb_pclk", "msp0");
+ clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.0");
+
clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", U8500_CLKRST1_BASE,
BIT(4), 0);
+ clk_register_clkdev(clk, "apb_pclk", "msp1");
+ clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.1");
clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", U8500_CLKRST1_BASE,
BIT(5), 0);
@@ -239,6 +246,7 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", U8500_CLKRST1_BASE,
BIT(6), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2");
clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", U8500_CLKRST1_BASE,
BIT(7), 0);
@@ -246,6 +254,7 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", U8500_CLKRST1_BASE,
BIT(8), 0);
+ clk_register_clkdev(clk, "apb_pclk", "slimbus0");
clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", U8500_CLKRST1_BASE,
BIT(9), 0);
@@ -255,11 +264,16 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", U8500_CLKRST1_BASE,
BIT(10), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4");
+
clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", U8500_CLKRST1_BASE,
BIT(11), 0);
+ clk_register_clkdev(clk, "apb_pclk", "msp3");
+ clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.3");
clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", U8500_CLKRST2_BASE,
BIT(0), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3");
clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", U8500_CLKRST2_BASE,
BIT(1), 0);
@@ -279,12 +293,13 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", U8500_CLKRST2_BASE,
BIT(5), 0);
+ clk_register_clkdev(clk, "apb_pclk", "msp2");
+ clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.2");
clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", U8500_CLKRST2_BASE,
BIT(6), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi1");
-
clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", U8500_CLKRST2_BASE,
BIT(7), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi3");
@@ -316,10 +331,15 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", U8500_CLKRST3_BASE,
BIT(1), 0);
+ clk_register_clkdev(clk, "apb_pclk", "ssp0");
+
clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", U8500_CLKRST3_BASE,
BIT(2), 0);
+ clk_register_clkdev(clk, "apb_pclk", "ssp1");
+
clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", U8500_CLKRST3_BASE,
BIT(3), 0);
+ clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0");
clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", U8500_CLKRST3_BASE,
BIT(4), 0);
@@ -401,10 +421,17 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
U8500_CLKRST1_BASE, BIT(2), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.1");
+
clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "msp0");
+ clk_register_clkdev(clk, NULL, "ux500-msp-i2s.0");
+
clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
U8500_CLKRST1_BASE, BIT(4), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "msp1");
+ clk_register_clkdev(clk, NULL, "ux500-msp-i2s.1");
clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk",
U8500_CLKRST1_BASE, BIT(5), CLK_SET_RATE_GATE);
@@ -412,17 +439,25 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
U8500_CLKRST1_BASE, BIT(6), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.2");
+
clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
- U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE);
- /* FIXME: Redefinition of BIT(3). */
+ U8500_CLKRST1_BASE, BIT(8), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "slimbus0");
+
clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
U8500_CLKRST1_BASE, BIT(9), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.4");
+
clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
U8500_CLKRST1_BASE, BIT(10), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "msp3");
+ clk_register_clkdev(clk, NULL, "ux500-msp-i2s.3");
/* Periph2 */
clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
U8500_CLKRST2_BASE, BIT(0), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.3");
clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk",
U8500_CLKRST2_BASE, BIT(2), CLK_SET_RATE_GATE);
@@ -430,6 +465,8 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
U8500_CLKRST2_BASE, BIT(3), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "msp2");
+ clk_register_clkdev(clk, NULL, "ux500-msp-i2s.2");
clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk",
U8500_CLKRST2_BASE, BIT(4), CLK_SET_RATE_GATE);
@@ -450,10 +487,15 @@ void u8500_clk_init(void)
/* Periph3 */
clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
U8500_CLKRST3_BASE, BIT(1), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "ssp0");
+
clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
U8500_CLKRST3_BASE, BIT(2), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "ssp1");
+
clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
U8500_CLKRST3_BASE, BIT(3), CLK_SET_RATE_GATE);
+ clk_register_clkdev(clk, NULL, "nmk-i2c.0");
clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk",
U8500_CLKRST3_BASE, BIT(4), CLK_SET_RATE_GATE);
diff --git a/trunk/drivers/edac/amd64_edac.h b/trunk/drivers/edac/amd64_edac.h
index 8d4804732bac..8c4139647efc 100644
--- a/trunk/drivers/edac/amd64_edac.h
+++ b/trunk/drivers/edac/amd64_edac.h
@@ -33,7 +33,7 @@
* detection. The mods to Rev F required more family
* information detection.
*
- * Changes/Fixes by Borislav Petkov :
+ * Changes/Fixes by Borislav Petkov :
* - misc fixes and code cleanups
*
* This module is based on the following documents
diff --git a/trunk/drivers/edac/edac_stub.c b/trunk/drivers/edac/edac_stub.c
index 6c86f6e54558..351945fa2ecd 100644
--- a/trunk/drivers/edac/edac_stub.c
+++ b/trunk/drivers/edac/edac_stub.c
@@ -5,7 +5,7 @@
*
* 2007 (c) MontaVista Software, Inc.
* 2010 (c) Advanced Micro Devices Inc.
- * Borislav Petkov
+ * Borislav Petkov
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/trunk/drivers/edac/mce_amd_inj.c b/trunk/drivers/edac/mce_amd_inj.c
index 66b5151c1080..2ae78f20cc28 100644
--- a/trunk/drivers/edac/mce_amd_inj.c
+++ b/trunk/drivers/edac/mce_amd_inj.c
@@ -6,7 +6,7 @@
* This file may be distributed under the terms of the GNU General Public
* License version 2.
*
- * Copyright (c) 2010: Borislav Petkov
+ * Copyright (c) 2010: Borislav Petkov
* Advanced Micro Devices Inc.
*/
@@ -168,6 +168,6 @@ module_init(edac_init_mce_inject);
module_exit(edac_exit_mce_inject);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Borislav Petkov ");
+MODULE_AUTHOR("Borislav Petkov ");
MODULE_AUTHOR("AMD Inc.");
MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding");
diff --git a/trunk/drivers/firewire/sbp2.c b/trunk/drivers/firewire/sbp2.c
index 1162d6b3bf85..bb1b392f5cda 100644
--- a/trunk/drivers/firewire/sbp2.c
+++ b/trunk/drivers/firewire/sbp2.c
@@ -1546,6 +1546,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
struct sbp2_logical_unit *lu = sdev->hostdata;
sdev->use_10_for_rw = 1;
+ sdev->no_report_opcodes = 1;
+ sdev->no_write_same = 1;
if (sbp2_param_exclusive_login)
sdev->manage_start_stop = 1;
diff --git a/trunk/drivers/gpio/Kconfig b/trunk/drivers/gpio/Kconfig
index d055cee36942..47150f5ded04 100644
--- a/trunk/drivers/gpio/Kconfig
+++ b/trunk/drivers/gpio/Kconfig
@@ -47,7 +47,7 @@ if GPIOLIB
config OF_GPIO
def_bool y
- depends on OF && !SPARC
+ depends on OF
config DEBUG_GPIO
bool "Debug GPIO calls"
@@ -466,7 +466,7 @@ config GPIO_ADP5588_IRQ
config GPIO_ADNP
tristate "Avionic Design N-bit GPIO expander"
- depends on I2C && OF
+ depends on I2C && OF_GPIO
help
This option enables support for N GPIOs found on Avionic Design
I2C GPIO expanders. The register space will be extended by powers
diff --git a/trunk/drivers/gpio/gpio-mcp23s08.c b/trunk/drivers/gpio/gpio-mcp23s08.c
index 0f425189de11..ce1c84760076 100644
--- a/trunk/drivers/gpio/gpio-mcp23s08.c
+++ b/trunk/drivers/gpio/gpio-mcp23s08.c
@@ -77,7 +77,7 @@ struct mcp23s08_driver_data {
/*----------------------------------------------------------------------*/
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
static int mcp23008_read(struct mcp23s08 *mcp, unsigned reg)
{
@@ -399,7 +399,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
break;
#endif /* CONFIG_SPI_MASTER */
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
case MCP_TYPE_008:
mcp->ops = &mcp23008_ops;
mcp->chip.ngpio = 8;
@@ -473,7 +473,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
/*----------------------------------------------------------------------*/
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
static int __devinit mcp230xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
diff --git a/trunk/drivers/gpio/gpio-mvebu.c b/trunk/drivers/gpio/gpio-mvebu.c
index cf7afb9eb61a..be65c0451ad5 100644
--- a/trunk/drivers/gpio/gpio-mvebu.c
+++ b/trunk/drivers/gpio/gpio-mvebu.c
@@ -92,6 +92,11 @@ static inline void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip)
return mvchip->membase + GPIO_OUT_OFF;
}
+static inline void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip)
+{
+ return mvchip->membase + GPIO_BLINK_EN_OFF;
+}
+
static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip)
{
return mvchip->membase + GPIO_IO_CONF_OFF;
@@ -206,6 +211,23 @@ static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin)
return (u >> pin) & 1;
}
+static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned pin, int value)
+{
+ struct mvebu_gpio_chip *mvchip =
+ container_of(chip, struct mvebu_gpio_chip, chip);
+ unsigned long flags;
+ u32 u;
+
+ spin_lock_irqsave(&mvchip->lock, flags);
+ u = readl_relaxed(mvebu_gpioreg_blink(mvchip));
+ if (value)
+ u |= 1 << pin;
+ else
+ u &= ~(1 << pin);
+ writel_relaxed(u, mvebu_gpioreg_blink(mvchip));
+ spin_unlock_irqrestore(&mvchip->lock, flags);
+}
+
static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
{
struct mvebu_gpio_chip *mvchip =
@@ -244,6 +266,7 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin,
if (ret)
return ret;
+ mvebu_gpio_blink(chip, pin, 0);
mvebu_gpio_set(chip, pin, value);
spin_lock_irqsave(&mvchip->lock, flags);
diff --git a/trunk/drivers/gpu/drm/Kconfig b/trunk/drivers/gpu/drm/Kconfig
index 983201b450f1..18321b68b880 100644
--- a/trunk/drivers/gpu/drm/Kconfig
+++ b/trunk/drivers/gpu/drm/Kconfig
@@ -210,5 +210,3 @@ source "drivers/gpu/drm/mgag200/Kconfig"
source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
-
-source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/trunk/drivers/gpu/drm/Makefile b/trunk/drivers/gpu/drm/Makefile
index 6f58c81cfcbc..2ff5cefe9ead 100644
--- a/trunk/drivers/gpu/drm/Makefile
+++ b/trunk/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
- drm_agpsupport.o drm_scatter.o drm_pci.o \
+ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
@@ -16,11 +16,10 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
-drm-$(CONFIG_PCI) += ati_pcigart.o
drm-usb-y := drm_usb.o
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@@ -49,5 +48,4 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
-obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-y += i2c/
diff --git a/trunk/drivers/gpu/drm/ast/ast_ttm.c b/trunk/drivers/gpu/drm/ast/ast_ttm.c
index 0a54f65a8ebb..1a026ac2dfb4 100644
--- a/trunk/drivers/gpu/drm/ast/ast_ttm.c
+++ b/trunk/drivers/gpu/drm/ast/ast_ttm.c
@@ -356,7 +356,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
ttm_bo_type_device, &astbo->placement,
- align >> PAGE_SHIFT, false, NULL, acc_size,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
NULL, ast_bo_ttm_destroy);
if (ret)
return ret;
diff --git a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c
index dcd1a8c029eb..101e423c8991 100644
--- a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,15 +35,12 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
};
-static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
-
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
@@ -52,18 +49,12 @@ static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
#endif
remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
kfree(ap);
-
- return 0;
}
static int __devinit
cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int ret;
-
- ret = cirrus_kick_out_firmware_fb(pdev);
- if (ret)
- return ret;
+ cirrus_kick_out_firmware_fb(pdev);
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c b/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 90d770143cc2..bc83f835c830 100644
--- a/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -361,7 +361,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
ttm_bo_type_device, &cirrusbo->placement,
- align >> PAGE_SHIFT, false, NULL, acc_size,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
NULL, cirrus_bo_ttm_destroy);
if (ret)
return ret;
diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c
index f2d667b8bee2..ef1b22144d37 100644
--- a/trunk/drivers/gpu/drm/drm_crtc.c
+++ b/trunk/drivers/gpu/drm/drm_crtc.c
@@ -470,8 +470,10 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- kfree(crtc->gamma_store);
- crtc->gamma_store = NULL;
+ if (crtc->gamma_store) {
+ kfree(crtc->gamma_store);
+ crtc->gamma_store = NULL;
+ }
drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head);
@@ -553,17 +555,16 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
- connector->status = connector_status_unknown;
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev->mode_config.edid_property,
0);
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev->mode_config.dpms_property, 0);
out:
@@ -2279,21 +2280,13 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
for (i = 0; i < num_planes; i++) {
unsigned int width = r->width / (i != 0 ? hsub : 1);
- unsigned int height = r->height / (i != 0 ? vsub : 1);
- unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
return -EINVAL;
}
- if ((uint64_t) width * cpp > UINT_MAX)
- return -ERANGE;
-
- if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
- return -ERANGE;
-
- if (r->pitches[i] < width * cpp) {
+ if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
@@ -2330,11 +2323,6 @@ int drm_mode_addfb2(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- if (r->flags & ~DRM_MODE_FB_INTERLACED) {
- DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
- return -EINVAL;
- }
-
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
@@ -2928,6 +2916,27 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
}
EXPORT_SYMBOL(drm_property_destroy);
+void drm_connector_attach_property(struct drm_connector *connector,
+ struct drm_property *property, uint64_t init_val)
+{
+ drm_object_attach_property(&connector->base, property, init_val);
+}
+EXPORT_SYMBOL(drm_connector_attach_property);
+
+int drm_connector_property_set_value(struct drm_connector *connector,
+ struct drm_property *property, uint64_t value)
+{
+ return drm_object_property_set_value(&connector->base, property, value);
+}
+EXPORT_SYMBOL(drm_connector_property_set_value);
+
+int drm_connector_property_get_value(struct drm_connector *connector,
+ struct drm_property *property, uint64_t *val)
+{
+ return drm_object_property_get_value(&connector->base, property, val);
+}
+EXPORT_SYMBOL(drm_connector_property_get_value);
+
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
@@ -3164,17 +3173,15 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
/* Delete edid, when there is none. */
if (!edid) {
connector->edid_blob_ptr = NULL;
- ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
+ ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
return ret;
}
size = EDID_LENGTH * (1 + edid->extensions);
connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
size, edid);
- if (!connector->edid_blob_ptr)
- return -EINVAL;
- ret = drm_object_property_set_value(&connector->base,
+ ret = drm_connector_property_set_value(connector,
dev->mode_config.edid_property,
connector->edid_blob_ptr->base.id);
@@ -3197,9 +3204,6 @@ static bool drm_property_change_is_valid(struct drm_property *property,
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
- } else if (property->flags & DRM_MODE_PROP_BLOB) {
- /* Only the driver knows */
- return true;
} else {
int i;
for (i = 0; i < property->num_values; i++)
@@ -3241,7 +3245,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
/* store the property value if successful */
if (!ret)
- drm_object_property_set_value(&connector->base, property, value);
+ drm_connector_property_set_value(connector, property, value);
return ret;
}
@@ -3652,12 +3656,9 @@ void drm_mode_config_reset(struct drm_device *dev)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- connector->status = connector_status_unknown;
-
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->funcs->reset)
connector->funcs->reset(connector);
- }
}
EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/trunk/drivers/gpu/drm/drm_crtc_helper.c b/trunk/drivers/gpu/drm/drm_crtc_helper.c
index 7b2d378b2576..1227adf74dbc 100644
--- a/trunk/drivers/gpu/drm/drm_crtc_helper.c
+++ b/trunk/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,35 +39,6 @@
#include
#include
-/**
- * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
- * connector list
- * @dev: drm device to operate on
- *
- * Some userspace presumes that the first connected connector is the main
- * display, where it's supposed to display e.g. the login screen. For
- * laptops, this should be the main panel. Use this function to sort all
- * (eDP/LVDS) panels to the front of the connector list, instead of
- * painstakingly trying to initialize them in the right order.
- */
-void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
-{
- struct drm_connector *connector, *tmp;
- struct list_head panel_list;
-
- INIT_LIST_HEAD(&panel_list);
-
- list_for_each_entry_safe(connector, tmp,
- &dev->mode_config.connector_list, head) {
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- list_move_tail(&connector->head, &panel_list);
- }
-
- list_splice(&panel_list, &dev->mode_config.connector_list);
-}
-EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
-
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
@@ -93,21 +64,22 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
- * @connector: connector to probe
+ * @dev: DRM device
* @maxX: max width for modes
* @maxY: max height for modes
*
* LOCKING:
* Caller must hold mode config lock.
*
- * Based on the helper callbacks implemented by @connector try to detect all
- * valid modes. Modes will first be added to the connector's probed_modes list,
- * then culled (based on validity and the @maxX, @maxY parameters) and put into
- * the normal modes list.
+ * Based on @dev's mode_config layout, scan all the connectors and try to detect
+ * modes on them. Modes will first be added to the connector's probed_modes
+ * list, then culled (based on validity and the @maxX, @maxY parameters) and
+ * put into the normal modes list.
*
- * Intended to be use as a generic implementation of the ->probe() @connector
- * callback for drivers that use the crtc helpers for output mode filtering and
- * detection.
+ * Intended to be used either at bootup time or when major configuration
+ * changes have occurred.
+ *
+ * FIXME: take into account monitor limits
*
* RETURNS:
* Number of modes found on @connector.
@@ -137,13 +109,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
connector->funcs->force(connector);
} else {
connector->status = connector->funcs->detect(connector, true);
- }
-
- /* Re-enable polling in case the global poll config changed. */
- if (drm_kms_helper_poll != dev->mode_config.poll_running)
drm_kms_helper_poll_enable(dev);
-
- dev->mode_config.poll_running = drm_kms_helper_poll;
+ }
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@@ -358,24 +325,17 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
}
/**
- * drm_crtc_helper_set_mode - internal helper to set a mode
+ * drm_crtc_set_mode - set a mode
* @crtc: CRTC to program
* @mode: mode to use
- * @x: horizontal offset into the surface
- * @y: vertical offset into the surface
- * @old_fb: old framebuffer, for cleanup
+ * @x: width of mode
+ * @y: height of mode
*
* LOCKING:
* Caller must hold mode config lock.
*
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
- * to fixup or reject the mode prior to trying to set it. This is an internal
- * helper that drivers could e.g. use to update properties that require the
- * entire output pipe to be disabled and re-enabled in a new configuration. For
- * example for changing whether audio is enabled on a hdmi link or for changing
- * panel fitter or dither attributes. It is also called by the
- * drm_crtc_helper_set_config() helper function to drive the mode setting
- * sequence.
+ * to fixup or reject the mode prior to trying to set it.
*
* RETURNS:
* True if the mode was set successfully, or false otherwise.
@@ -531,19 +491,20 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
/**
* drm_crtc_helper_set_config - set a new config from userspace
- * @set: mode set configuration
+ * @crtc: CRTC to setup
+ * @crtc_info: user provided configuration
+ * @new_mode: new mode to set
+ * @connector_set: set of connectors for the new config
+ * @fb: new framebuffer
*
* LOCKING:
* Caller must hold mode config lock.
*
- * Setup a new configuration, provided by the upper layers (either an ioctl call
- * from userspace or internally e.g. from the fbdev suppport code) in @set, and
- * enable it. This is the main helper functions for drivers that implement
- * kernel mode setting with the crtc helper functions and the assorted
- * ->prepare(), ->modeset() and ->commit() helper callbacks.
+ * Setup a new configuration, provided by the user in @crtc_info, and enable
+ * it.
*
* RETURNS:
- * Returns 0 on success, -ERRNO on failure.
+ * Zero. (FIXME)
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
@@ -839,14 +800,12 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
}
/**
- * drm_helper_connector_dpms() - connector dpms helper implementation
- * @connector: affected connector
- * @mode: DPMS mode
+ * drm_helper_connector_dpms
+ * @connector affected connector
+ * @mode DPMS mode
*
- * This is the main helper function provided by the crtc helper framework for
- * implementing the DPMS connector attribute. It computes the new desired DPMS
- * state for all encoders and crtcs in the output mesh and calls the ->dpms()
- * callback provided by the driver appropriately.
+ * Calls the low-level connector DPMS function, then
+ * calls appropriate encoder and crtc DPMS functions as well
*/
void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
@@ -959,15 +918,6 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
-void drm_kms_helper_hotplug_event(struct drm_device *dev)
-{
- /* send a uevent + call fbdev */
- drm_sysfs_hotplug_event(dev);
- if (dev->mode_config.funcs->output_poll_changed)
- dev->mode_config.funcs->output_poll_changed(dev);
-}
-EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
-
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct work_struct *work)
{
@@ -983,22 +933,20 @@ static void output_poll_execute(struct work_struct *work)
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- /* Ignore forced connectors. */
- if (connector->force)
+ /* if this is HPD or polled don't check it -
+ TV out for instance */
+ if (!connector->polled)
continue;
- /* Ignore HPD capable connectors and connectors where we don't
- * want any hotplug detection at all for polling. */
- if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
- continue;
-
- repoll = true;
+ else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
+ repoll = true;
old_status = connector->status;
/* if we are connected and don't want to poll for disconnect
skip it */
if (old_status == connector_status_connected &&
- !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
+ !(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
connector->status = connector->funcs->detect(connector, false);
@@ -1012,8 +960,12 @@ static void output_poll_execute(struct work_struct *work)
mutex_unlock(&dev->mode_config.mutex);
- if (changed)
- drm_kms_helper_hotplug_event(dev);
+ if (changed) {
+ /* send a uevent + call fbdev */
+ drm_sysfs_hotplug_event(dev);
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+ }
if (repoll)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
@@ -1036,8 +988,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
return;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT))
+ if (connector->polled)
poll = true;
}
@@ -1063,34 +1014,12 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
void drm_helper_hpd_irq_event(struct drm_device *dev)
{
- struct drm_connector *connector;
- enum drm_connector_status old_status;
- bool changed = false;
-
if (!dev->mode_config.poll_enabled)
return;
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
- /* Only handle HPD capable connectors. */
- if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
- continue;
-
- old_status = connector->status;
-
- connector->status = connector->funcs->detect(connector, false);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
- connector->base.id,
- drm_get_connector_name(connector),
- old_status, connector->status);
- if (old_status != connector->status)
- changed = true;
- }
-
- mutex_unlock(&dev->mode_config.mutex);
-
- if (changed)
- drm_kms_helper_hotplug_event(dev);
+ /* kill timer and schedule immediate execution, this doesn't block */
+ cancel_delayed_work(&dev->mode_config.output_poll_work);
+ if (drm_kms_helper_poll)
+ schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/trunk/drivers/gpu/drm/drm_dp_helper.c b/trunk/drivers/gpu/drm/drm_dp_i2c_helper.c
similarity index 58%
rename from trunk/drivers/gpu/drm/drm_dp_helper.c
rename to trunk/drivers/gpu/drm/drm_dp_i2c_helper.c
index 89e196627160..7f246f212457 100644
--- a/trunk/drivers/gpu/drm/drm_dp_helper.c
+++ b/trunk/drivers/gpu/drm/drm_dp_i2c_helper.c
@@ -30,15 +30,6 @@
#include
#include
-/**
- * DOC: dp helpers
- *
- * These functions contain some common logic and helpers at various abstraction
- * levels to deal with Display Port sink devices and related things like DP aux
- * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
- * blocks, ...
- */
-
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
@@ -46,7 +37,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
-
+
ret = (*algo_data->aux_ch)(adapter, mode,
write_byte, read_byte);
return ret;
@@ -191,6 +182,7 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
{
(void) i2c_algo_dp_aux_address(adapter, 0, false);
(void) i2c_algo_dp_aux_stop(adapter, false);
+
}
static int
@@ -202,23 +194,11 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
return 0;
}
-/**
- * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
- * @adapter: i2c adapter to register
- *
- * This registers an i2c adapater that uses dp aux channel as it's underlaying
- * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
- * and store it in the algo_data member of the @adapter argument. This will be
- * used by the i2c over dp aux algorithm to drive the hardware.
- *
- * RETURNS:
- * 0 on success, -ERRNO on failure.
- */
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
-
+
error = i2c_dp_aux_prepare_bus(adapter);
if (error)
return error;
@@ -226,123 +206,3 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
return error;
}
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
-
-/* Helpers for DP link training */
-static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
-{
- return link_status[r - DP_LANE0_1_STATUS];
-}
-
-static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane)
-{
- int i = DP_LANE0_1_STATUS + (lane >> 1);
- int s = (lane & 1) * 4;
- u8 l = dp_link_status(link_status, i);
- return (l >> s) & 0xf;
-}
-
-bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane_count)
-{
- u8 lane_align;
- u8 lane_status;
- int lane;
-
- lane_align = dp_link_status(link_status,
- DP_LANE_ALIGN_STATUS_UPDATED);
- if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
- return false;
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = dp_get_lane_status(link_status, lane);
- if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
- return false;
- }
- return true;
-}
-EXPORT_SYMBOL(drm_dp_channel_eq_ok);
-
-bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane_count)
-{
- int lane;
- u8 lane_status;
-
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = dp_get_lane_status(link_status, lane);
- if ((lane_status & DP_LANE_CR_DONE) == 0)
- return false;
- }
- return true;
-}
-EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
-
-u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane)
-{
- int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
- int s = ((lane & 1) ?
- DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
- DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
- u8 l = dp_link_status(link_status, i);
-
- return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
-}
-EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
-
-u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
- int lane)
-{
- int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
- int s = ((lane & 1) ?
- DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
- DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
- u8 l = dp_link_status(link_status, i);
-
- return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
-}
-EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
-
-void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
- if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
- udelay(100);
- else
- mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
-}
-EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
-
-void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
- if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
- udelay(400);
- else
- mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
-}
-EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
-
-u8 drm_dp_link_rate_to_bw_code(int link_rate)
-{
- switch (link_rate) {
- case 162000:
- default:
- return DP_LINK_BW_1_62;
- case 270000:
- return DP_LINK_BW_2_7;
- case 540000:
- return DP_LINK_BW_5_4;
- }
-}
-EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
-
-int drm_dp_bw_code_to_link_rate(u8 link_bw)
-{
- switch (link_bw) {
- case DP_LINK_BW_1_62:
- default:
- return 162000;
- case DP_LINK_BW_2_7:
- return 270000;
- case DP_LINK_BW_5_4:
- return 540000;
- }
-}
-EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
diff --git a/trunk/drivers/gpu/drm/drm_edid.c b/trunk/drivers/gpu/drm/drm_edid.c
index 484c36a4b7a5..fadcd44ff196 100644
--- a/trunk/drivers/gpu/drm/drm_edid.c
+++ b/trunk/drivers/gpu/drm/drm_edid.c
@@ -307,9 +307,12 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
static bool drm_edid_is_zero(u8 *in_edid, int length)
{
- if (memchr_inv(in_edid, 0, length))
- return false;
+ int i;
+ u32 *raw_edid = (u32 *)in_edid;
+ for (i = 0; i < length / 4; i++)
+ if (*(raw_edid + i) != 0)
+ return false;
return true;
}
@@ -1513,26 +1516,6 @@ u8 *drm_find_cea_extension(struct edid *edid)
}
EXPORT_SYMBOL(drm_find_cea_extension);
-/*
- * Looks for a CEA mode matching given drm_display_mode.
- * Returns its CEA Video ID code, or 0 if not found.
- */
-u8 drm_match_cea_mode(struct drm_display_mode *to_match)
-{
- struct drm_display_mode *cea_mode;
- u8 mode;
-
- for (mode = 0; mode < drm_num_cea_modes; mode++) {
- cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
-
- if (drm_mode_equal(to_match, cea_mode))
- return mode + 1;
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_match_cea_mode);
-
-
static int
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
{
@@ -1639,7 +1622,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
if (len >= 12)
connector->audio_latency[1] = db[12];
- DRM_DEBUG_KMS("HDMI: DVI dual %d, "
+ DRM_LOG_KMS("HDMI: DVI dual %d, "
"max TMDS clock %d, "
"latency present %d %d, "
"video latency %d %d, "
diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c
index 954d175bd7fa..4d58d7e6af3f 100644
--- a/trunk/drivers/gpu/drm/drm_fb_helper.c
+++ b/trunk/drivers/gpu/drm/drm_fb_helper.c
@@ -27,8 +27,6 @@
* Dave Airlie
* Jesse Barnes
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include
#include
#include
@@ -45,15 +43,6 @@ MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
-/**
- * DOC: fbdev helpers
- *
- * The fb helper functions are useful to provide an fbdev on top of a drm kernel
- * mode setting driver. They can be used mostly independantely from the crtc
- * helper functions used by many drivers to implement the kernel mode setting
- * interfaces.
- */
-
/* simple single crtc case helper function */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
@@ -106,16 +95,10 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
if (mode->force) {
const char *s;
switch (mode->force) {
- case DRM_FORCE_OFF:
- s = "OFF";
- break;
- case DRM_FORCE_ON_DIGITAL:
- s = "ON - dig";
- break;
+ case DRM_FORCE_OFF: s = "OFF"; break;
+ case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
default:
- case DRM_FORCE_ON:
- s = "ON";
- break;
+ case DRM_FORCE_ON: s = "ON"; break;
}
DRM_INFO("forcing %s connector %s\n",
@@ -282,7 +265,7 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
if (panic_timeout < 0)
return 0;
- pr_err("panic occurred, switching back to text console\n");
+ printk(KERN_ERR "panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
}
EXPORT_SYMBOL(drm_fb_helper_panic);
@@ -348,7 +331,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->funcs->dpms(connector, dpms_mode);
- drm_object_property_set_value(&connector->base,
+ drm_connector_property_set_value(connector,
dev->mode_config.dpms_property, dpms_mode);
}
}
@@ -450,7 +433,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list)) {
- pr_info("drm: unregistered panic notifier\n");
+ printk(KERN_INFO "drm: unregistered panic notifier\n");
atomic_notifier_chain_unregister(&panic_notifier_list,
&paniced);
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -741,9 +724,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
- if (preferred_bpp != sizes.surface_bpp)
+ if (preferred_bpp != sizes.surface_bpp) {
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
-
+ }
/* first up get a count of crtcs now in use and new min/maxes width/heights */
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
@@ -811,16 +794,18 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
info = fb_helper->fbdev;
/* set the fb pointer */
- for (i = 0; i < fb_helper->crtc_count; i++)
+ for (i = 0; i < fb_helper->crtc_count; i++) {
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
+ }
if (new_fb) {
info->var.pixclock = 0;
- if (register_framebuffer(info) < 0)
+ if (register_framebuffer(info) < 0) {
return -EINVAL;
+ }
- dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+ info->fix.id);
} else {
drm_fb_helper_set_par(info);
@@ -829,7 +814,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
- dev_info(fb_helper->dev->dev, "registered panic notifier\n");
+ printk(KERN_INFO "drm: registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -1017,11 +1002,11 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
- if (strict)
+ if (strict) {
enable = connector->status == connector_status_connected;
- else
+ } else {
enable = connector->status != connector_status_disconnected;
-
+ }
return enable;
}
@@ -1206,8 +1191,9 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
for (c = 0; c < fb_helper->crtc_count; c++) {
crtc = &fb_helper->crtc_info[c];
- if ((encoder->possible_crtcs & (1 << c)) == 0)
+ if ((encoder->possible_crtcs & (1 << c)) == 0) {
continue;
+ }
for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc)
@@ -1260,11 +1246,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
sizeof(struct drm_display_mode *), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
- if (!crtcs || !modes || !enabled) {
- DRM_ERROR("Memory allocation failed\n");
- goto out;
- }
-
drm_enable_connectors(fb_helper, enabled);
@@ -1303,7 +1284,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
}
}
-out:
kfree(crtcs);
kfree(modes);
kfree(enabled);
@@ -1311,14 +1291,12 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
/**
* drm_helper_initial_config - setup a sane initial connector configuration
- * @fb_helper: fb_helper device struct
- * @bpp_sel: bpp value to use for the framebuffer configuration
+ * @dev: DRM device
*
* LOCKING:
- * Called at init time by the driver to set up the @fb_helper initial
- * configuration, must take the mode config lock.
+ * Called at init time, must take mode config lock.
*
- * Scans the CRTCs and connectors and tries to put together an initial setup.
+ * Scan the CRTCs and connectors and try to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
@@ -1341,9 +1319,9 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
/*
* we shouldn't end up with no modes here.
*/
- if (count == 0)
- dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
-
+ if (count == 0) {
+ printk(KERN_INFO "No connectors reported connected with modes\n");
+ }
drm_setup_crtcs(fb_helper);
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
@@ -1352,7 +1330,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
/**
* drm_fb_helper_hotplug_event - respond to a hotplug notification by
- * probing all the outputs attached to the fb
+ * probing all the outputs attached to the fb.
* @fb_helper: the drm_fb_helper
*
* LOCKING:
diff --git a/trunk/drivers/gpu/drm/drm_fops.c b/trunk/drivers/gpu/drm/drm_fops.c
index 7ef1b673e1be..133b4132983e 100644
--- a/trunk/drivers/gpu/drm/drm_fops.c
+++ b/trunk/drivers/gpu/drm/drm_fops.c
@@ -121,6 +121,8 @@ int drm_open(struct inode *inode, struct file *filp)
int minor_id = iminor(inode);
struct drm_minor *minor;
int retcode = 0;
+ int need_setup = 0;
+ struct address_space *old_mapping;
minor = idr_find(&drm_minors_idr, minor_id);
if (!minor)
@@ -132,23 +134,37 @@ int drm_open(struct inode *inode, struct file *filp)
if (drm_device_is_unplugged(dev))
return -ENODEV;
+ if (!dev->open_count++)
+ need_setup = 1;
+ mutex_lock(&dev->struct_mutex);
+ old_mapping = dev->dev_mapping;
+ if (old_mapping == NULL)
+ dev->dev_mapping = &inode->i_data;
+ /* ihold ensures nobody can remove inode with our i_data */
+ ihold(container_of(dev->dev_mapping, struct inode, i_data));
+ inode->i_mapping = dev->dev_mapping;
+ filp->f_mapping = dev->dev_mapping;
+ mutex_unlock(&dev->struct_mutex);
+
retcode = drm_open_helper(inode, filp, dev);
- if (!retcode) {
- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
- if (!dev->open_count++)
- retcode = drm_setup(dev);
- }
- if (!retcode) {
- mutex_lock(&dev->struct_mutex);
- if (dev->dev_mapping == NULL)
- dev->dev_mapping = &inode->i_data;
- /* ihold ensures nobody can remove inode with our i_data */
- ihold(container_of(dev->dev_mapping, struct inode, i_data));
- inode->i_mapping = dev->dev_mapping;
- filp->f_mapping = dev->dev_mapping;
- mutex_unlock(&dev->struct_mutex);
+ if (retcode)
+ goto err_undo;
+ atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+ if (need_setup) {
+ retcode = drm_setup(dev);
+ if (retcode)
+ goto err_undo;
}
+ return 0;
+err_undo:
+ mutex_lock(&dev->struct_mutex);
+ filp->f_mapping = old_mapping;
+ inode->i_mapping = old_mapping;
+ iput(container_of(dev->dev_mapping, struct inode, i_data));
+ dev->dev_mapping = old_mapping;
+ mutex_unlock(&dev->struct_mutex);
+ dev->open_count--;
return retcode;
}
EXPORT_SYMBOL(drm_open);
diff --git a/trunk/drivers/gpu/drm/drm_hashtab.c b/trunk/drivers/gpu/drm/drm_hashtab.c
index 80254547a3f8..c3745c4d46d8 100644
--- a/trunk/drivers/gpu/drm/drm_hashtab.c
+++ b/trunk/drivers/gpu/drm/drm_hashtab.c
@@ -67,8 +67,10 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
- hlist_for_each_entry(entry, list, h_list, head)
+ hlist_for_each(list, h_list) {
+ entry = hlist_entry(list, struct drm_hash_item, head);
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
+ }
}
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
@@ -81,7 +83,8 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
- hlist_for_each_entry(entry, list, h_list, head) {
+ hlist_for_each(list, h_list) {
+ entry = hlist_entry(list, struct drm_hash_item, head);
if (entry->key == key)
return list;
if (entry->key > key)
@@ -90,24 +93,6 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
return NULL;
}
-static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
- unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- struct hlist_node *list;
- unsigned int hashed_key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- hlist_for_each_entry_rcu(entry, list, h_list, head) {
- if (entry->key == key)
- return list;
- if (entry->key > key)
- break;
- }
- return NULL;
-}
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
@@ -120,7 +105,8 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
- hlist_for_each_entry(entry, list, h_list, head) {
+ hlist_for_each(list, h_list) {
+ entry = hlist_entry(list, struct drm_hash_item, head);
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
@@ -128,9 +114,9 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
parent = list;
}
if (parent) {
- hlist_add_after_rcu(parent, &item->head);
+ hlist_add_after(parent, &item->head);
} else {
- hlist_add_head_rcu(&item->head, h_list);
+ hlist_add_head(&item->head, h_list);
}
return 0;
}
@@ -170,7 +156,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
{
struct hlist_node *list;
- list = drm_ht_find_key_rcu(ht, key);
+ list = drm_ht_find_key(ht, key);
if (!list)
return -EINVAL;
@@ -185,7 +171,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
list = drm_ht_find_key(ht, key);
if (list) {
- hlist_del_init_rcu(list);
+ hlist_del_init(list);
return 0;
}
return -EINVAL;
@@ -193,7 +179,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
- hlist_del_init_rcu(&item->head);
+ hlist_del_init(&item->head);
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
diff --git a/trunk/drivers/gpu/drm/drm_ioctl.c b/trunk/drivers/gpu/drm/drm_ioctl.c
index e77bd8b57df2..23dd97506f28 100644
--- a/trunk/drivers/gpu/drm/drm_ioctl.c
+++ b/trunk/drivers/gpu/drm/drm_ioctl.c
@@ -287,9 +287,6 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
break;
- case DRM_CAP_TIMESTAMP_MONOTONIC:
- req->value = drm_timestamp_monotonic;
- break;
default:
return -EINVAL;
}
diff --git a/trunk/drivers/gpu/drm/drm_irq.c b/trunk/drivers/gpu/drm/drm_irq.c
index 19c01ca3cc76..3a3d0ce891b9 100644
--- a/trunk/drivers/gpu/drm/drm_irq.c
+++ b/trunk/drivers/gpu/drm/drm_irq.c
@@ -106,7 +106,6 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
s64 diff_ns;
int vblrc;
struct timeval tvblank;
- int count = DRM_TIMESTAMP_MAXRETRIES;
/* Prevent vblank irq processing while disabling vblank irqs,
* so no updates of timestamps or count can happen after we've
@@ -132,10 +131,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
do {
dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
- } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
-
- if (!count)
- vblrc = 0;
+ } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
/* Compute time difference to stored timestamp of last vblank
* as updated by last invocation of drm_handle_vblank() in vblank irq.
@@ -580,8 +576,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
unsigned flags,
struct drm_crtc *refcrtc)
{
- ktime_t stime, etime, mono_time_offset;
- struct timeval tv_etime;
+ struct timeval stime, raw_time;
struct drm_display_mode *mode;
int vbl_status, vtotal, vdisplay;
int vpos, hpos, i;
@@ -630,15 +625,13 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
preempt_disable();
/* Get system timestamp before query. */
- stime = ktime_get();
+ do_gettimeofday(&stime);
/* Get vertical and horizontal scanout pos. vpos, hpos. */
vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
/* Get system timestamp after query. */
- etime = ktime_get();
- if (!drm_timestamp_monotonic)
- mono_time_offset = ktime_get_monotonic_offset();
+ do_gettimeofday(&raw_time);
preempt_enable();
@@ -649,7 +642,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EIO;
}
- duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
+ duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
/* Accept result with < max_error nsecs timing uncertainty. */
if (duration_ns <= (s64) *max_error)
@@ -696,20 +689,14 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
vbl_status |= 0x8;
}
- if (!drm_timestamp_monotonic)
- etime = ktime_sub(etime, mono_time_offset);
-
- /* save this only for debugging purposes */
- tv_etime = ktime_to_timeval(etime);
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
- etime = ktime_sub_ns(etime, delta_ns);
- *vblank_time = ktime_to_timeval(etime);
+ *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
crtc, (int)vbl_status, hpos, vpos,
- (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
+ (long)raw_time.tv_sec, (long)raw_time.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
(int)duration_ns/1000, i);
@@ -721,17 +708,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
-static struct timeval get_drm_timestamp(void)
-{
- ktime_t now;
-
- now = ktime_get();
- if (!drm_timestamp_monotonic)
- now = ktime_sub(now, ktime_get_monotonic_offset());
-
- return ktime_to_timeval(now);
-}
-
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval.
@@ -769,9 +745,9 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
}
/* GPU high precision timestamp query unsupported or failed.
- * Return current monotonic/gettimeofday timestamp as best estimate.
+ * Return gettimeofday timestamp as best estimate.
*/
- *tvblank = get_drm_timestamp();
+ do_gettimeofday(tvblank);
return 0;
}
@@ -826,47 +802,6 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_vblank_count_and_time);
-static void send_vblank_event(struct drm_device *dev,
- struct drm_pending_vblank_event *e,
- unsigned long seq, struct timeval *now)
-{
- WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
- e->event.sequence = seq;
- e->event.tv_sec = now->tv_sec;
- e->event.tv_usec = now->tv_usec;
-
- list_add_tail(&e->base.link,
- &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
- e->event.sequence);
-}
-
-/**
- * drm_send_vblank_event - helper to send vblank event after pageflip
- * @dev: DRM device
- * @crtc: CRTC in question
- * @e: the event to send
- *
- * Updates sequence # and timestamp on event, and sends it to userspace.
- * Caller must hold event lock.
- */
-void drm_send_vblank_event(struct drm_device *dev, int crtc,
- struct drm_pending_vblank_event *e)
-{
- struct timeval now;
- unsigned int seq;
- if (crtc >= 0) {
- seq = drm_vblank_count_and_time(dev, crtc, &now);
- } else {
- seq = 0;
-
- now = get_drm_timestamp();
- }
- send_vblank_event(dev, e, seq, &now);
-}
-EXPORT_SYMBOL(drm_send_vblank_event);
-
/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
@@ -1001,13 +936,6 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_put);
-/**
- * drm_vblank_off - disable vblank events on a CRTC
- * @dev: DRM device
- * @crtc: CRTC in question
- *
- * Caller must hold event lock.
- */
void drm_vblank_off(struct drm_device *dev, int crtc)
{
struct drm_pending_vblank_event *e, *t;
@@ -1021,19 +949,22 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
-
- spin_lock(&dev->event_lock);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
continue;
DRM_DEBUG("Sending premature vblank event on disable: \
wanted %d, current %d\n",
e->event.sequence, seq);
- list_del(&e->base.link);
+
+ e->event.sequence = seq;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, e->pipe);
- send_vblank_event(dev, e, seq, &now);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+ e->event.sequence);
}
- spin_unlock(&dev->event_lock);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -1176,9 +1107,15 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->event.sequence = vblwait->request.sequence;
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+ e->event.sequence = seq;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, pipe);
- send_vblank_event(dev, e, seq, &now);
+ list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
vblwait->reply.sequence = seq;
+ trace_drm_vblank_event_delivered(current->pid, pipe,
+ vblwait->request.sequence);
} else {
/* drm_handle_vblank_events will call drm_vblank_put */
list_add_tail(&e->base.link, &dev->vblank_event_list);
@@ -1319,9 +1256,14 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
DRM_DEBUG("vblank event on %d, current %d\n",
e->event.sequence, seq);
- list_del(&e->base.link);
+ e->event.sequence = seq;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, e->pipe);
- send_vblank_event(dev, e, seq, &now);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+ e->event.sequence);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/trunk/drivers/gpu/drm/drm_modes.c b/trunk/drivers/gpu/drm/drm_modes.c
index d8da30e90db5..59450f39bf96 100644
--- a/trunk/drivers/gpu/drm/drm_modes.c
+++ b/trunk/drivers/gpu/drm/drm_modes.c
@@ -46,7 +46,7 @@
*
* Describe @mode using DRM_DEBUG.
*/
-void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
+void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
{
DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
"0x%x 0x%x\n",
@@ -558,7 +558,7 @@ EXPORT_SYMBOL(drm_mode_list_concat);
* RETURNS:
* @mode->hdisplay
*/
-int drm_mode_width(const struct drm_display_mode *mode)
+int drm_mode_width(struct drm_display_mode *mode)
{
return mode->hdisplay;
@@ -579,7 +579,7 @@ EXPORT_SYMBOL(drm_mode_width);
* RETURNS:
* @mode->vdisplay
*/
-int drm_mode_height(const struct drm_display_mode *mode)
+int drm_mode_height(struct drm_display_mode *mode)
{
return mode->vdisplay;
}
@@ -768,7 +768,7 @@ EXPORT_SYMBOL(drm_mode_duplicate);
* RETURNS:
* True if the modes are equal, false otherwise.
*/
-bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
{
/* do clock check convert to PICOS so fb modes get matched
* the same */
diff --git a/trunk/drivers/gpu/drm/drm_pci.c b/trunk/drivers/gpu/drm/drm_pci.c
index 754bc96e10c7..ba33144257e5 100644
--- a/trunk/drivers/gpu/drm/drm_pci.c
+++ b/trunk/drivers/gpu/drm/drm_pci.c
@@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
int pos;
- u32 lnkcap = 0, lnkcap2 = 0;
+ u32 lnkcap, lnkcap2;
*mask = 0;
if (!dev->pdev)
diff --git a/trunk/drivers/gpu/drm/drm_stub.c b/trunk/drivers/gpu/drm/drm_stub.c
index 200e104f1fa0..c236fd27eba6 100644
--- a/trunk/drivers/gpu/drm/drm_stub.c
+++ b/trunk/drivers/gpu/drm/drm_stub.c
@@ -46,24 +46,16 @@ EXPORT_SYMBOL(drm_vblank_offdelay);
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
-/*
- * Default to use monotonic timestamps for wait-for-vblank and page-flip
- * complete events.
- */
-unsigned int drm_timestamp_monotonic = 1;
-
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
-MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
-module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
struct idr drm_minors_idr;
@@ -229,20 +221,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
if (!file_priv->master)
return -EINVAL;
- if (file_priv->minor->master)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
+ if (!file_priv->minor->master &&
+ file_priv->minor->master != file_priv->master) {
+ mutex_lock(&dev->struct_mutex);
+ file_priv->minor->master = drm_master_get(file_priv->master);
+ file_priv->is_master = 1;
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, file_priv, false);
+ if (unlikely(ret != 0)) {
+ file_priv->is_master = 0;
+ drm_master_put(&file_priv->minor->master);
+ }
}
+ mutex_unlock(&dev->struct_mutex);
}
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -500,7 +492,10 @@ void drm_put_dev(struct drm_device *dev)
drm_put_minor(&dev->primary);
list_del(&dev->driver_item);
- kfree(dev->devname);
+ if (dev->devname) {
+ kfree(dev->devname);
+ dev->devname = NULL;
+ }
kfree(dev);
}
EXPORT_SYMBOL(drm_put_dev);
diff --git a/trunk/drivers/gpu/drm/drm_sysfs.c b/trunk/drivers/gpu/drm/drm_sysfs.c
index 02296653a058..05cd8fe062af 100644
--- a/trunk/drivers/gpu/drm/drm_sysfs.c
+++ b/trunk/drivers/gpu/drm/drm_sysfs.c
@@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device,
uint64_t dpms_status;
int ret;
- ret = drm_object_property_get_value(&connector->base,
+ ret = drm_connector_property_get_value(connector,
dev->mode_config.dpms_property,
&dpms_status);
if (ret)
@@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device,
return 0;
}
- ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
+ ret = drm_connector_property_get_value(connector, prop, &subconnector);
if (ret)
return 0;
@@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device,
return 0;
}
- ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
+ ret = drm_connector_property_get_value(connector, prop, &subconnector);
if (ret)
return 0;
diff --git a/trunk/drivers/gpu/drm/exynos/Kconfig b/trunk/drivers/gpu/drm/exynos/Kconfig
index 86fb75d3fcad..fc345d4ebb03 100644
--- a/trunk/drivers/gpu/drm/exynos/Kconfig
+++ b/trunk/drivers/gpu/drm/exynos/Kconfig
@@ -10,12 +10,6 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
-config DRM_EXYNOS_IOMMU
- bool "EXYNOS DRM IOMMU Support"
- depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
- help
- Choose this option if you want to use IOMMU feature for DRM.
-
config DRM_EXYNOS_DMABUF
bool "EXYNOS DRM DMABUF"
depends on DRM_EXYNOS
diff --git a/trunk/drivers/gpu/drm/exynos/Makefile b/trunk/drivers/gpu/drm/exynos/Makefile
index 26813b8a5056..eb651ca8e2a8 100644
--- a/trunk/drivers/gpu/drm/exynos/Makefile
+++ b/trunk/drivers/gpu/drm/exynos/Makefile
@@ -8,7 +8,6 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
-exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 72bf97b96ba0..118c117b3226 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -33,42 +33,73 @@
static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
+ dma_addr_t start_addr;
+ unsigned int npages, i = 0;
+ struct scatterlist *sgl;
int ret = 0;
- enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (IS_NONCONTIG_BUFFER(flags)) {
+ DRM_DEBUG_KMS("not support allocation type.\n");
+ return -EINVAL;
+ }
+
if (buf->dma_addr) {
DRM_DEBUG_KMS("already allocated.\n");
return 0;
}
- init_dma_attrs(&buf->dma_attrs);
+ if (buf->size >= SZ_1M) {
+ npages = buf->size >> SECTION_SHIFT;
+ buf->page_size = SECTION_SIZE;
+ } else if (buf->size >= SZ_64K) {
+ npages = buf->size >> 16;
+ buf->page_size = SZ_64K;
+ } else {
+ npages = buf->size >> PAGE_SHIFT;
+ buf->page_size = PAGE_SIZE;
+ }
- if (flags & EXYNOS_BO_NONCONTIG)
- attr = DMA_ATTR_WRITE_COMBINE;
+ buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!buf->sgt) {
+ DRM_ERROR("failed to allocate sg table.\n");
+ return -ENOMEM;
+ }
- dma_set_attr(attr, &buf->dma_attrs);
+ ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_ERROR("failed to initialize sg table.\n");
+ kfree(buf->sgt);
+ buf->sgt = NULL;
+ return -ENOMEM;
+ }
- buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
- &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
+ buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
+ &buf->dma_addr, GFP_KERNEL);
if (!buf->kvaddr) {
DRM_ERROR("failed to allocate buffer.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err1;
}
- buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!buf->sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
+ buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
+ if (!buf->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
ret = -ENOMEM;
- goto err_free_attrs;
+ goto err2;
}
- ret = dma_get_sgtable(dev->dev, buf->sgt, buf->kvaddr, buf->dma_addr,
- buf->size);
- if (ret < 0) {
- DRM_ERROR("failed to get sgtable.\n");
- goto err_free_sgt;
+ sgl = buf->sgt->sgl;
+ start_addr = buf->dma_addr;
+
+ while (i < npages) {
+ buf->pages[i] = phys_to_page(start_addr);
+ sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
+ sg_dma_address(sgl) = start_addr;
+ start_addr += buf->page_size;
+ sgl = sg_next(sgl);
+ i++;
}
DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
@@ -77,14 +108,14 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
buf->size);
return ret;
-
-err_free_sgt:
+err2:
+ dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
+ (dma_addr_t)buf->dma_addr);
+ buf->dma_addr = (dma_addr_t)NULL;
+err1:
+ sg_free_table(buf->sgt);
kfree(buf->sgt);
buf->sgt = NULL;
-err_free_attrs:
- dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
- buf->dma_addr = (dma_addr_t)NULL;
return ret;
}
@@ -94,6 +125,16 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
+ /*
+ * release only physically continuous memory and
+ * non-continuous memory would be released by exynos
+ * gem framework.
+ */
+ if (IS_NONCONTIG_BUFFER(flags)) {
+ DRM_DEBUG_KMS("not support allocation type.\n");
+ return;
+ }
+
if (!buf->dma_addr) {
DRM_DEBUG_KMS("dma_addr is invalid.\n");
return;
@@ -109,8 +150,11 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
kfree(buf->sgt);
buf->sgt = NULL;
- dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
+ kfree(buf->pages);
+ buf->pages = NULL;
+
+ dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
+ (dma_addr_t)buf->dma_addr);
buf->dma_addr = (dma_addr_t)NULL;
}
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 25cf16285033..3388e4eb4ba2 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer);
-/* allocate physical memory region and setup sgt. */
+/* allocate physical memory region and setup sgt and pages. */
int exynos_drm_alloc_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buf,
unsigned int flags);
-/* release physical memory region, and sgt. */
+/* release physical memory region, sgt and pages. */
void exynos_drm_free_buf(struct drm_device *dev,
unsigned int flags,
struct exynos_drm_gem_buf *buffer);
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 2efa4b031d73..fce245f64c4f 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -236,21 +236,16 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
goto out;
}
- spin_lock_irq(&dev->event_lock);
list_add_tail(&event->base.link,
&dev_priv->pageflip_event_list);
- spin_unlock_irq(&dev->event_lock);
crtc->fb = fb;
ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
NULL);
if (ret) {
crtc->fb = old_fb;
-
- spin_lock_irq(&dev->event_lock);
drm_vblank_put(dev, exynos_crtc->pipe);
list_del(&event->base.link);
- spin_unlock_irq(&dev->event_lock);
goto out;
}
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 539da9f4eb97..fae1f2ec886c 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -30,22 +30,26 @@
#include
-static struct sg_table *exynos_get_sgt(struct drm_device *drm_dev,
- struct exynos_drm_gem_buf *buf)
+static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
+ unsigned int page_size)
{
struct sg_table *sgt = NULL;
- int ret;
+ struct scatterlist *sgl;
+ int i, ret;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
goto out;
- ret = dma_get_sgtable(drm_dev->dev, sgt, buf->kvaddr,
- buf->dma_addr, buf->size);
- if (ret < 0) {
- DRM_ERROR("failed to get sgtable.\n");
+ ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
+ if (ret)
goto err_free_sgt;
- }
+
+ if (page_size < PAGE_SIZE)
+ page_size = PAGE_SIZE;
+
+ for_each_sg(sgt->sgl, sgl, nr_pages, i)
+ sg_set_page(sgl, pages[i], page_size, 0);
return sgt;
@@ -64,30 +68,32 @@ static struct sg_table *
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
struct sg_table *sgt = NULL;
+ unsigned int npages;
int nents;
DRM_DEBUG_PRIME("%s\n", __FILE__);
- buf = gem_obj->buffer;
- if (!buf) {
- DRM_ERROR("buffer is null.\n");
- return sgt;
- }
-
mutex_lock(&dev->struct_mutex);
- sgt = exynos_get_sgt(dev, buf);
- if (!sgt)
+ buf = gem_obj->buffer;
+
+ /* there should always be pages allocated. */
+ if (!buf->pages) {
+ DRM_ERROR("pages is null.\n");
goto err_unlock;
+ }
- nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- if (!nents) {
- DRM_ERROR("failed to map sgl with iommu.\n");
- sgt = NULL;
+ npages = buf->size / buf->page_size;
+
+ sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
+ if (!sgt) {
+ DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
goto err_unlock;
}
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
+ DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
+ npages, buf->size, buf->page_size);
err_unlock:
mutex_unlock(&dev->struct_mutex);
@@ -99,7 +105,6 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
-
sg_free_table(sgt);
kfree(sgt);
sgt = NULL;
@@ -191,6 +196,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer;
+ struct page *page;
int ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -227,27 +233,38 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
goto err_unmap_attach;
}
+ buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
+ if (!buffer->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ ret = -ENOMEM;
+ goto err_free_buffer;
+ }
+
exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
- goto err_free_buffer;
+ goto err_free_pages;
}
sgl = sgt->sgl;
- buffer->size = dma_buf->size;
- buffer->dma_addr = sg_dma_address(sgl);
-
if (sgt->nents == 1) {
+ buffer->dma_addr = sg_dma_address(sgt->sgl);
+ buffer->size = sg_dma_len(sgt->sgl);
+
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else {
- /*
- * this case could be CONTIG or NONCONTIG type but for now
- * sets NONCONTIG.
- * TODO. we have to find a way that exporter can notify
- * the type of its own buffer to importer.
- */
+ unsigned int i = 0;
+
+ buffer->dma_addr = sg_dma_address(sgl);
+ while (i < sgt->nents) {
+ buffer->pages[i] = sg_page(sgl);
+ buffer->size += sg_dma_len(sgl);
+ sgl = sg_next(sgl);
+ i++;
+ }
+
exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
}
@@ -260,6 +277,9 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
return &exynos_gem_obj->base;
+err_free_pages:
+ kfree(buffer->pages);
+ buffer->pages = NULL;
err_free_buffer:
kfree(buffer);
buffer = NULL;
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 2b287d2fc92e..1de7baafddd0 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -40,7 +40,6 @@
#include "exynos_drm_vidi.h"
#include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h"
-#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -67,18 +66,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&private->pageflip_event_list);
dev->dev_private = (void *)private;
- /*
- * create mapping to manage iommu table and set a pointer to iommu
- * mapping structure to iommu_mapping of private data.
- * also this iommu_mapping can be used to check if iommu is supported
- * or not.
- */
- ret = drm_create_iommu_mapping(dev);
- if (ret < 0) {
- DRM_ERROR("failed to create iommu mapping.\n");
- goto err_crtc;
- }
-
drm_mode_config_init(dev);
/* init kms poll for handling hpd */
@@ -93,7 +80,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
for (nr = 0; nr < MAX_CRTC; nr++) {
ret = exynos_drm_crtc_create(dev, nr);
if (ret)
- goto err_release_iommu_mapping;
+ goto err_crtc;
}
for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -102,12 +89,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane)
- goto err_release_iommu_mapping;
+ goto err_crtc;
}
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
- goto err_release_iommu_mapping;
+ goto err_crtc;
/*
* probe sub drivers such as display controller and hdmi driver,
@@ -139,8 +126,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
exynos_drm_device_unregister(dev);
err_vblank:
drm_vblank_cleanup(dev);
-err_release_iommu_mapping:
- drm_release_iommu_mapping(dev);
err_crtc:
drm_mode_config_cleanup(dev);
kfree(private);
@@ -157,8 +142,6 @@ static int exynos_drm_unload(struct drm_device *dev)
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
-
- drm_release_iommu_mapping(dev);
kfree(dev->dev_private);
dev->dev_private = NULL;
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 9c9c2dc75828..a34231036496 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -231,7 +231,8 @@ struct exynos_drm_g2d_private {
struct device *dev;
struct list_head inuse_cmdlist;
struct list_head event_list;
- struct list_head userptr_list;
+ struct list_head gem_list;
+ unsigned int gem_nr;
};
struct drm_exynos_file_private {
@@ -240,13 +241,6 @@ struct drm_exynos_file_private {
/*
* Exynos drm private structure.
- *
- * @da_start: start address to device address space.
- * with iommu, device address space starts from this address
- * otherwise default one.
- * @da_space_size: size of device address space.
- * if 0 then default value is used for it.
- * @da_space_order: order to device address space.
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
@@ -261,10 +255,6 @@ struct exynos_drm_private {
struct drm_crtc *crtc[MAX_CRTC];
struct drm_property *plane_zpos_property;
struct drm_property *crtc_mode_property;
-
- unsigned long da_start;
- unsigned long da_space_size;
- unsigned long da_space_order;
};
/*
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index d9afb11aac76..f2df06c603f7 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -234,39 +234,6 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
exynos_encoder->dpms = DRM_MODE_DPMS_ON;
}
-void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
-{
- struct exynos_drm_encoder *exynos_encoder;
- struct exynos_drm_overlay_ops *overlay_ops;
- struct exynos_drm_manager *manager;
- struct drm_device *dev = fb->dev;
- struct drm_encoder *encoder;
-
- /*
- * make sure that overlay data are updated to real hardware
- * for all encoders.
- */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- exynos_encoder = to_exynos_encoder(encoder);
-
- /* if exynos was disabled, just ignor it. */
- if (exynos_encoder->dpms > DRM_MODE_DPMS_ON)
- continue;
-
- manager = exynos_encoder->manager;
- overlay_ops = manager->overlay_ops;
-
- /*
- * wait for vblank interrupt
- * - this makes sure that overlay data are updated to
- * real hardware.
- */
- if (overlay_ops->wait_for_vblank)
- overlay_ops->wait_for_vblank(manager->dev);
- }
-}
-
-
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_plane *plane;
@@ -538,4 +505,14 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
if (overlay_ops && overlay_ops->disable)
overlay_ops->disable(manager->dev, zpos);
+
+ /*
+ * wait for vblank interrupt
+ * - this makes sure that hardware overlay is disabled to avoid
+ * for the dma accesses to memory after gem buffer was released
+ * because the setting for disabling the overlay will be updated
+ * at vsync.
+ */
+ if (overlay_ops && overlay_ops->wait_for_vblank)
+ overlay_ops->wait_for_vblank(manager->dev);
}
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 88bb25a2a917..6470d9ddf5a1 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -46,6 +46,5 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
#endif
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 7413f4b729b0..4ef4cd3f9936 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -30,13 +30,10 @@
#include
#include
#include
-#include
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
-#include "exynos_drm_iommu.h"
-#include "exynos_drm_encoder.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
@@ -53,32 +50,6 @@ struct exynos_drm_fb {
struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
-static int check_fb_gem_memory_type(struct drm_device *drm_dev,
- struct exynos_drm_gem_obj *exynos_gem_obj)
-{
- unsigned int flags;
-
- /*
- * if exynos drm driver supports iommu then framebuffer can use
- * all the buffer types.
- */
- if (is_drm_iommu_supported(drm_dev))
- return 0;
-
- flags = exynos_gem_obj->flags;
-
- /*
- * without iommu support, not support physically non-continuous memory
- * for framebuffer.
- */
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_ERROR("cannot use this gem memory type for fb.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -86,9 +57,6 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* make sure that overlay data are updated before relesing fb. */
- exynos_drm_encoder_complete_scanout(fb);
-
drm_framebuffer_cleanup(fb);
for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@@ -160,25 +128,14 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj)
{
struct exynos_drm_fb *exynos_fb;
- struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
- ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
- if (ret < 0) {
- DRM_ERROR("cannot use this gem memory type for fb.\n");
- return ERR_PTR(-EINVAL);
- }
-
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
- exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
-
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
@@ -186,6 +143,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
}
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
return &exynos_fb->fb;
}
@@ -256,9 +214,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < exynos_fb->buf_cnt; i++) {
- struct exynos_drm_gem_obj *exynos_gem_obj;
- int ret;
-
obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]);
if (!obj) {
@@ -267,15 +222,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
- ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
- if (ret < 0) {
- DRM_ERROR("cannot use this gem memory type for fb.\n");
- exynos_drm_fb_destroy(fb);
- return ERR_PTR(ret);
- }
-
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index a2232792e0c0..e7466c4414cb 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -46,38 +46,8 @@ struct exynos_drm_fbdev {
struct exynos_drm_gem_obj *exynos_gem_obj;
};
-static int exynos_drm_fb_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
-{
- struct drm_fb_helper *helper = info->par;
- struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
- struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
- struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
- unsigned long vm_size;
- int ret;
-
- DRM_DEBUG_KMS("%s\n", __func__);
-
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-
- vm_size = vma->vm_end - vma->vm_start;
-
- if (vm_size > buffer->size)
- return -EINVAL;
-
- ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->kvaddr,
- buffer->dma_addr, buffer->size, &buffer->dma_attrs);
- if (ret < 0) {
- DRM_ERROR("failed to mmap.\n");
- return ret;
- }
-
- return 0;
-}
-
static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
- .fb_mmap = exynos_drm_fb_mmap,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -117,8 +87,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset;
- fbi->fix.smem_start = (unsigned long)
- (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
+ fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) +
+ offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 00bd266a31bb..e08478f19f1a 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -25,7 +25,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
-#include "exynos_drm_iommu.h"
/*
* FIMD is stand for Fully Interactive Mobile Display and
@@ -624,6 +623,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
+ bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -633,6 +633,8 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
+ is_checked = true;
+
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
@@ -640,7 +642,22 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
- drm_vblank_put(drm_dev, crtc);
+ }
+
+ if (is_checked) {
+ /*
+ * call drm_vblank_put only in case that drm_vblank_get was
+ * called.
+ */
+ if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
+ drm_vblank_put(drm_dev, crtc);
+
+ /*
+ * don't off vblank if vblank_disable_allowed is 1,
+ * because vblank would be off by timer handler.
+ */
+ if (!drm_dev->vblank_disable_allowed)
+ drm_vblank_off(drm_dev, crtc);
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -692,10 +709,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
*/
drm_dev->vblank_disable_allowed = 1;
- /* attach this sub driver to iommu mapping if supported. */
- if (is_drm_iommu_supported(drm_dev))
- drm_iommu_attach_device(drm_dev, dev);
-
return 0;
}
@@ -703,9 +716,7 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* detach this sub driver from iommu mapping if supported. */
- if (is_drm_iommu_supported(drm_dev))
- drm_iommu_detach_device(drm_dev, dev);
+ /* TODO. */
}
static int fimd_calc_clkdiv(struct fimd_context *ctx,
@@ -846,16 +857,18 @@ static int __devinit fimd_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->bus_clk = devm_clk_get(dev, "fimd");
+ ctx->bus_clk = clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n");
- return PTR_ERR(ctx->bus_clk);
+ ret = PTR_ERR(ctx->bus_clk);
+ goto err_clk_get;
}
- ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
+ ctx->lcd_clk = clk_get(dev, "sclk_fimd");
if (IS_ERR(ctx->lcd_clk)) {
dev_err(dev, "failed to get lcd clock\n");
- return PTR_ERR(ctx->lcd_clk);
+ ret = PTR_ERR(ctx->lcd_clk);
+ goto err_bus_clk;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -863,13 +876,14 @@ static int __devinit fimd_probe(struct platform_device *pdev)
ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!ctx->regs) {
dev_err(dev, "failed to map registers\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto err_clk;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "irq request failed.\n");
- return -ENXIO;
+ goto err_clk;
}
ctx->irq = res->start;
@@ -878,7 +892,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
0, "drm_fimd", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
- return ret;
+ goto err_clk;
}
ctx->vidcon0 = pdata->vidcon0;
@@ -912,6 +926,17 @@ static int __devinit fimd_probe(struct platform_device *pdev)
exynos_drm_subdrv_register(subdrv);
return 0;
+
+err_clk:
+ clk_disable(ctx->lcd_clk);
+ clk_put(ctx->lcd_clk);
+
+err_bus_clk:
+ clk_disable(ctx->bus_clk);
+ clk_put(ctx->bus_clk);
+
+err_clk_get:
+ return ret;
}
static int __devexit fimd_remove(struct platform_device *pdev)
@@ -935,6 +960,9 @@ static int __devexit fimd_remove(struct platform_device *pdev)
out:
pm_runtime_disable(dev);
+ clk_put(ctx->lcd_clk);
+ clk_put(ctx->bus_clk);
+
return 0;
}
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 6ffa0763c078..f7aab24ea46c 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -17,14 +17,11 @@
#include
#include
#include
-#include
-#include
#include
#include
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
-#include "exynos_drm_iommu.h"
#define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1
@@ -95,21 +92,11 @@
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
-#define MAX_BUF_ADDR_NR 6
-
-/* maximum buffer pool size of userptr is 64MB as default */
-#define MAX_POOL (64 * 1024 * 1024)
-
-enum {
- BUF_TYPE_GEM = 1,
- BUF_TYPE_USERPTR,
-};
-
/* cmdlist data structure */
struct g2d_cmdlist {
- u32 head;
- unsigned long data[G2D_CMDLIST_DATA_NUM];
- u32 last; /* last data offset */
+ u32 head;
+ u32 data[G2D_CMDLIST_DATA_NUM];
+ u32 last; /* last data offset */
};
struct drm_exynos_pending_g2d_event {
@@ -117,26 +104,15 @@ struct drm_exynos_pending_g2d_event {
struct drm_exynos_g2d_event event;
};
-struct g2d_cmdlist_userptr {
+struct g2d_gem_node {
struct list_head list;
- dma_addr_t dma_addr;
- unsigned long userptr;
- unsigned long size;
- struct page **pages;
- unsigned int npages;
- struct sg_table *sgt;
- struct vm_area_struct *vma;
- atomic_t refcount;
- bool in_pool;
- bool out_of_list;
+ unsigned int handle;
};
struct g2d_cmdlist_node {
struct list_head list;
struct g2d_cmdlist *cmdlist;
- unsigned int map_nr;
- unsigned long handles[MAX_BUF_ADDR_NR];
- unsigned int obj_type[MAX_BUF_ADDR_NR];
+ unsigned int gem_nr;
dma_addr_t dma_addr;
struct drm_exynos_pending_g2d_event *event;
@@ -146,7 +122,6 @@ struct g2d_runqueue_node {
struct list_head list;
struct list_head run_cmdlist;
struct list_head event_list;
- struct drm_file *filp;
pid_t pid;
struct completion complete;
int async;
@@ -168,33 +143,23 @@ struct g2d_data {
struct mutex cmdlist_mutex;
dma_addr_t cmdlist_pool;
void *cmdlist_pool_virt;
- struct dma_attrs cmdlist_dma_attrs;
/* runqueue*/
struct g2d_runqueue_node *runqueue_node;
struct list_head runqueue;
struct mutex runqueue_mutex;
struct kmem_cache *runqueue_slab;
-
- unsigned long current_pool;
- unsigned long max_pool;
};
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node;
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int nr;
int ret;
- init_dma_attrs(&g2d->cmdlist_dma_attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
-
- g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
- G2D_CMDLIST_POOL_SIZE,
- &g2d->cmdlist_pool, GFP_KERNEL,
- &g2d->cmdlist_dma_attrs);
+ g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
+ &g2d->cmdlist_pool, GFP_KERNEL);
if (!g2d->cmdlist_pool_virt) {
dev_err(dev, "failed to allocate dma memory\n");
return -ENOMEM;
@@ -219,20 +184,18 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return 0;
err:
- dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
- g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+ dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool);
return ret;
}
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+ struct device *dev = g2d->dev;
kfree(g2d->cmdlist_node);
- dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
- g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+ dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool);
}
static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@@ -282,300 +245,62 @@ static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
list_add_tail(&node->event->base.link, &g2d_priv->event_list);
}
-static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
- unsigned long obj,
- bool force)
-{
- struct g2d_cmdlist_userptr *g2d_userptr =
- (struct g2d_cmdlist_userptr *)obj;
-
- if (!obj)
- return;
-
- if (force)
- goto out;
-
- atomic_dec(&g2d_userptr->refcount);
-
- if (atomic_read(&g2d_userptr->refcount) > 0)
- return;
-
- if (g2d_userptr->in_pool)
- return;
-
-out:
- exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
- DMA_BIDIRECTIONAL);
-
- exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
- g2d_userptr->npages,
- g2d_userptr->vma);
-
- if (!g2d_userptr->out_of_list)
- list_del_init(&g2d_userptr->list);
-
- sg_free_table(g2d_userptr->sgt);
- kfree(g2d_userptr->sgt);
- g2d_userptr->sgt = NULL;
-
- kfree(g2d_userptr->pages);
- g2d_userptr->pages = NULL;
- kfree(g2d_userptr);
- g2d_userptr = NULL;
-}
-
-dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
- unsigned long userptr,
- unsigned long size,
- struct drm_file *filp,
- unsigned long *obj)
-{
- struct drm_exynos_file_private *file_priv = filp->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct g2d_cmdlist_userptr *g2d_userptr;
- struct g2d_data *g2d;
- struct page **pages;
- struct sg_table *sgt;
- struct vm_area_struct *vma;
- unsigned long start, end;
- unsigned int npages, offset;
- int ret;
-
- if (!size) {
- DRM_ERROR("invalid userptr size.\n");
- return ERR_PTR(-EINVAL);
- }
-
- g2d = dev_get_drvdata(g2d_priv->dev);
-
- /* check if userptr already exists in userptr_list. */
- list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
- if (g2d_userptr->userptr == userptr) {
- /*
- * also check size because there could be same address
- * and different size.
- */
- if (g2d_userptr->size == size) {
- atomic_inc(&g2d_userptr->refcount);
- *obj = (unsigned long)g2d_userptr;
-
- return &g2d_userptr->dma_addr;
- }
-
- /*
- * at this moment, maybe g2d dma is accessing this
- * g2d_userptr memory region so just remove this
- * g2d_userptr object from userptr_list not to be
- * referred again and also except it the userptr
- * pool to be released after the dma access completion.
- */
- g2d_userptr->out_of_list = true;
- g2d_userptr->in_pool = false;
- list_del_init(&g2d_userptr->list);
-
- break;
- }
- }
-
- g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
- if (!g2d_userptr) {
- DRM_ERROR("failed to allocate g2d_userptr.\n");
- return ERR_PTR(-ENOMEM);
- }
-
- atomic_set(&g2d_userptr->refcount, 1);
-
- start = userptr & PAGE_MASK;
- offset = userptr & ~PAGE_MASK;
- end = PAGE_ALIGN(userptr + size);
- npages = (end - start) >> PAGE_SHIFT;
- g2d_userptr->npages = npages;
-
- pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
- if (!pages) {
- DRM_ERROR("failed to allocate pages.\n");
- kfree(g2d_userptr);
- return ERR_PTR(-ENOMEM);
- }
-
- vma = find_vma(current->mm, userptr);
- if (!vma) {
- DRM_ERROR("failed to get vm region.\n");
- ret = -EFAULT;
- goto err_free_pages;
- }
-
- if (vma->vm_end < userptr + size) {
- DRM_ERROR("vma is too small.\n");
- ret = -EFAULT;
- goto err_free_pages;
- }
-
- g2d_userptr->vma = exynos_gem_get_vma(vma);
- if (!g2d_userptr->vma) {
- DRM_ERROR("failed to copy vma.\n");
- ret = -ENOMEM;
- goto err_free_pages;
- }
-
- g2d_userptr->size = size;
-
- ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
- npages, pages, vma);
- if (ret < 0) {
- DRM_ERROR("failed to get user pages from userptr.\n");
- goto err_put_vma;
- }
-
- g2d_userptr->pages = pages;
-
- sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
- if (!sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
- ret = -ENOMEM;
- goto err_free_userptr;
- }
-
- ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
- size, GFP_KERNEL);
- if (ret < 0) {
- DRM_ERROR("failed to get sgt from pages.\n");
- goto err_free_sgt;
- }
-
- g2d_userptr->sgt = sgt;
-
- ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
- DMA_BIDIRECTIONAL);
- if (ret < 0) {
- DRM_ERROR("failed to map sgt with dma region.\n");
- goto err_free_sgt;
- }
-
- g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
- g2d_userptr->userptr = userptr;
-
- list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
-
- if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
- g2d->current_pool += npages << PAGE_SHIFT;
- g2d_userptr->in_pool = true;
- }
-
- *obj = (unsigned long)g2d_userptr;
-
- return &g2d_userptr->dma_addr;
-
-err_free_sgt:
- sg_free_table(sgt);
- kfree(sgt);
- sgt = NULL;
-
-err_free_userptr:
- exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
- g2d_userptr->npages,
- g2d_userptr->vma);
-
-err_put_vma:
- exynos_gem_put_vma(g2d_userptr->vma);
-
-err_free_pages:
- kfree(pages);
- kfree(g2d_userptr);
- pages = NULL;
- g2d_userptr = NULL;
-
- return ERR_PTR(ret);
-}
-
-static void g2d_userptr_free_all(struct drm_device *drm_dev,
- struct g2d_data *g2d,
- struct drm_file *filp)
+static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct g2d_cmdlist_node *node)
{
- struct drm_exynos_file_private *file_priv = filp->driver_priv;
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct g2d_cmdlist_userptr *g2d_userptr, *n;
-
- list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
- if (g2d_userptr->in_pool)
- g2d_userptr_put_dma_addr(drm_dev,
- (unsigned long)g2d_userptr,
- true);
-
- g2d->current_pool = 0;
-}
-
-static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
- struct g2d_cmdlist_node *node,
- struct drm_device *drm_dev,
- struct drm_file *file)
-{
struct g2d_cmdlist *cmdlist = node->cmdlist;
+ dma_addr_t *addr;
int offset;
int i;
- for (i = 0; i < node->map_nr; i++) {
- unsigned long handle;
- dma_addr_t *addr;
+ for (i = 0; i < node->gem_nr; i++) {
+ struct g2d_gem_node *gem_node;
+
+ gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
+ if (!gem_node) {
+ dev_err(g2d_priv->dev, "failed to allocate gem node\n");
+ return -ENOMEM;
+ }
offset = cmdlist->last - (i * 2 + 1);
- handle = cmdlist->data[offset];
-
- if (node->obj_type[i] == BUF_TYPE_GEM) {
- addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
- file);
- if (IS_ERR(addr)) {
- node->map_nr = i;
- return -EFAULT;
- }
- } else {
- struct drm_exynos_g2d_userptr g2d_userptr;
-
- if (copy_from_user(&g2d_userptr, (void __user *)handle,
- sizeof(struct drm_exynos_g2d_userptr))) {
- node->map_nr = i;
- return -EFAULT;
- }
-
- addr = g2d_userptr_get_dma_addr(drm_dev,
- g2d_userptr.userptr,
- g2d_userptr.size,
- file,
- &handle);
- if (IS_ERR(addr)) {
- node->map_nr = i;
- return -EFAULT;
- }
+ gem_node->handle = cmdlist->data[offset];
+
+ addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
+ file);
+ if (IS_ERR(addr)) {
+ node->gem_nr = i;
+ kfree(gem_node);
+ return PTR_ERR(addr);
}
cmdlist->data[offset] = *addr;
- node->handles[i] = handle;
+ list_add_tail(&gem_node->list, &g2d_priv->gem_list);
+ g2d_priv->gem_nr++;
}
return 0;
}
-static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
- struct g2d_cmdlist_node *node,
- struct drm_file *filp)
+static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
+ struct drm_file *file,
+ unsigned int nr)
{
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
- int i;
-
- for (i = 0; i < node->map_nr; i++) {
- unsigned long handle = node->handles[i];
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_gem_node *node, *n;
- if (node->obj_type[i] == BUF_TYPE_GEM)
- exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
- filp);
- else
- g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
- false);
+ list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
+ if (!nr)
+ break;
- node->handles[i] = 0;
+ exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
+ list_del_init(&node->list);
+ kfree(node);
+ nr--;
}
-
- node->map_nr = 0;
}
static void g2d_dma_start(struct g2d_data *g2d,
@@ -612,18 +337,10 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
static void g2d_free_runqueue_node(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node)
{
- struct g2d_cmdlist_node *node;
-
if (!runqueue_node)
return;
mutex_lock(&g2d->cmdlist_mutex);
- /*
- * commands in run_cmdlist have been completed so unmap all gem
- * objects in each command node so that they are unreferenced.
- */
- list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
- g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
@@ -713,28 +430,15 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int g2d_check_reg_offset(struct device *dev,
- struct g2d_cmdlist_node *node,
+static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
int nr, bool for_addr)
{
- struct g2d_cmdlist *cmdlist = node->cmdlist;
int reg_offset;
int index;
int i;
for (i = 0; i < nr; i++) {
index = cmdlist->last - 2 * (i + 1);
-
- if (for_addr) {
- /* check userptr buffer type. */
- reg_offset = (cmdlist->data[index] &
- ~0x7fffffff) >> 31;
- if (reg_offset) {
- node->obj_type[i] = BUF_TYPE_USERPTR;
- cmdlist->data[index] &= ~G2D_BUF_USERPTR;
- }
- }
-
reg_offset = cmdlist->data[index] & ~0xfffff000;
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@@ -751,9 +455,6 @@ static int g2d_check_reg_offset(struct device *dev,
case G2D_MSK_BASE_ADDR:
if (!for_addr)
goto err;
-
- if (node->obj_type[i] != BUF_TYPE_USERPTR)
- node->obj_type[i] = BUF_TYPE_GEM;
break;
default:
if (for_addr)
@@ -765,7 +466,7 @@ static int g2d_check_reg_offset(struct device *dev,
return 0;
err:
- dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
+ dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
return -EINVAL;
}
@@ -865,7 +566,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
- size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
+ size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
dev_err(dev, "cmdlist size is too big\n");
ret = -EINVAL;
@@ -882,29 +583,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_nr * 2;
- ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
+ ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
if (ret < 0)
goto err_free_event;
- node->map_nr = req->cmd_buf_nr;
- if (req->cmd_buf_nr) {
- struct drm_exynos_g2d_cmd *cmd_buf;
+ node->gem_nr = req->cmd_gem_nr;
+ if (req->cmd_gem_nr) {
+ struct drm_exynos_g2d_cmd *cmd_gem;
- cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
+ cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
if (copy_from_user(cmdlist->data + cmdlist->last,
- (void __user *)cmd_buf,
- sizeof(*cmd_buf) * req->cmd_buf_nr)) {
+ (void __user *)cmd_gem,
+ sizeof(*cmd_gem) * req->cmd_gem_nr)) {
ret = -EFAULT;
goto err_free_event;
}
- cmdlist->last += req->cmd_buf_nr * 2;
+ cmdlist->last += req->cmd_gem_nr * 2;
- ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
+ ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
if (ret < 0)
goto err_free_event;
- ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
+ ret = g2d_get_cmdlist_gem(drm_dev, file, node);
if (ret < 0)
goto err_unmap;
}
@@ -923,7 +624,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
return 0;
err_unmap:
- g2d_unmap_cmdlist_gem(g2d, node, file);
+ g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
err_free_event:
if (node->event) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -979,7 +680,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
mutex_lock(&g2d->runqueue_mutex);
runqueue_node->pid = current->pid;
- runqueue_node->filp = file;
list_add_tail(&runqueue_node->list, &g2d->runqueue);
if (!g2d->runqueue_node)
g2d_exec_runqueue(g2d);
@@ -996,43 +696,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
}
EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
-static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
-{
- struct g2d_data *g2d;
- int ret;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
- /* allocate dma-aware cmdlist buffer. */
- ret = g2d_init_cmdlist(g2d);
- if (ret < 0) {
- dev_err(dev, "cmdlist init failed\n");
- return ret;
- }
-
- if (!is_drm_iommu_supported(drm_dev))
- return 0;
-
- ret = drm_iommu_attach_device(drm_dev, dev);
- if (ret < 0) {
- dev_err(dev, "failed to enable iommu.\n");
- g2d_fini_cmdlist(g2d);
- }
-
- return ret;
-
-}
-
-static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
-{
- if (!is_drm_iommu_supported(drm_dev))
- return;
-
- drm_iommu_detach_device(drm_dev, dev);
-}
-
static int g2d_open(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
@@ -1050,7 +713,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
INIT_LIST_HEAD(&g2d_priv->event_list);
- INIT_LIST_HEAD(&g2d_priv->userptr_list);
+ INIT_LIST_HEAD(&g2d_priv->gem_list);
return 0;
}
@@ -1071,21 +734,11 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
return;
mutex_lock(&g2d->cmdlist_mutex);
- list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
- /*
- * unmap all gem objects not completed.
- *
- * P.S. if current process was terminated forcely then
- * there may be some commands in inuse_cmdlist so unmap
- * them.
- */
- g2d_unmap_cmdlist_gem(g2d, node, file);
+ list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
list_move_tail(&node->list, &g2d->free_cmdlist);
- }
mutex_unlock(&g2d->cmdlist_mutex);
- /* release all g2d_userptr in pool. */
- g2d_userptr_free_all(drm_dev, g2d, file);
+ g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
kfree(file_priv->g2d_priv);
}
@@ -1125,11 +778,15 @@ static int __devinit g2d_probe(struct platform_device *pdev)
mutex_init(&g2d->cmdlist_mutex);
mutex_init(&g2d->runqueue_mutex);
- g2d->gate_clk = devm_clk_get(dev, "fimg2d");
+ ret = g2d_init_cmdlist(g2d);
+ if (ret < 0)
+ goto err_destroy_workqueue;
+
+ g2d->gate_clk = clk_get(dev, "fimg2d");
if (IS_ERR(g2d->gate_clk)) {
dev_err(dev, "failed to get gate clock\n");
ret = PTR_ERR(g2d->gate_clk);
- goto err_destroy_workqueue;
+ goto err_fini_cmdlist;
}
pm_runtime_enable(dev);
@@ -1157,14 +814,10 @@ static int __devinit g2d_probe(struct platform_device *pdev)
goto err_put_clk;
}
- g2d->max_pool = MAX_POOL;
-
platform_set_drvdata(pdev, g2d);
subdrv = &g2d->subdrv;
subdrv->dev = dev;
- subdrv->probe = g2d_subdrv_probe;
- subdrv->remove = g2d_subdrv_remove;
subdrv->open = g2d_open;
subdrv->close = g2d_close;
@@ -1181,6 +834,9 @@ static int __devinit g2d_probe(struct platform_device *pdev)
err_put_clk:
pm_runtime_disable(dev);
+ clk_put(g2d->gate_clk);
+err_fini_cmdlist:
+ g2d_fini_cmdlist(g2d);
err_destroy_workqueue:
destroy_workqueue(g2d->g2d_workq);
err_destroy_slab:
@@ -1201,6 +857,7 @@ static int __devexit g2d_remove(struct platform_device *pdev)
}
pm_runtime_disable(&pdev->dev);
+ clk_put(g2d->gate_clk);
g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq);
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 99227246ce82..d2545560664f 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -83,40 +83,157 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
- /* TODO */
-
+ if (!IS_NONCONTIG_BUFFER(flags)) {
+ if (size >= SZ_1M)
+ return roundup(size, SECTION_SIZE);
+ else if (size >= SZ_64K)
+ return roundup(size, SZ_64K);
+ else
+ goto out;
+ }
+out:
return roundup(size, PAGE_SIZE);
}
-static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
+struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
+ gfp_t gfpmask)
+{
+ struct page *p, **pages;
+ int i, npages;
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (pages == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < npages; i++) {
+ p = alloc_page(gfpmask);
+ if (IS_ERR(p))
+ goto fail;
+ pages[i] = p;
+ }
+
+ return pages;
+
+fail:
+ while (--i)
+ __free_page(pages[i]);
+
+ drm_free_large(pages);
+ return ERR_CAST(p);
+}
+
+static void exynos_gem_put_pages(struct drm_gem_object *obj,
+ struct page **pages)
+{
+ int npages;
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ while (--npages >= 0)
+ __free_page(pages[npages]);
+
+ drm_free_large(pages);
+}
+
+static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
struct vm_area_struct *vma,
unsigned long f_vaddr,
pgoff_t page_offset)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
- struct scatterlist *sgl;
unsigned long pfn;
- int i;
- if (!buf->sgt)
- return -EINTR;
+ if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
+ if (!buf->pages)
+ return -EINTR;
+
+ pfn = page_to_pfn(buf->pages[page_offset++]);
+ } else
+ pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
+
+ return vm_insert_mixed(vma, f_vaddr, pfn);
+}
- if (page_offset >= (buf->size >> PAGE_SHIFT)) {
- DRM_ERROR("invalid page offset\n");
+static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+ struct scatterlist *sgl;
+ struct page **pages;
+ unsigned int npages, i = 0;
+ int ret;
+
+ if (buf->pages) {
+ DRM_DEBUG_KMS("already allocated.\n");
return -EINVAL;
}
+ pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
+ if (IS_ERR(pages)) {
+ DRM_ERROR("failed to get pages.\n");
+ return PTR_ERR(pages);
+ }
+
+ npages = obj->size >> PAGE_SHIFT;
+ buf->page_size = PAGE_SIZE;
+
+ buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!buf->sgt) {
+ DRM_ERROR("failed to allocate sg table.\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_ERROR("failed to initialize sg table.\n");
+ ret = -EFAULT;
+ goto err1;
+ }
+
sgl = buf->sgt->sgl;
- for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
- if (page_offset < (sgl->length >> PAGE_SHIFT))
- break;
- page_offset -= (sgl->length >> PAGE_SHIFT);
+
+ /* set all pages to sg list. */
+ while (i < npages) {
+ sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
+ sg_dma_address(sgl) = page_to_phys(pages[i]);
+ i++;
+ sgl = sg_next(sgl);
}
- pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
+ /* add some codes for UNCACHED type here. TODO */
- return vm_insert_mixed(vma, f_vaddr, pfn);
+ buf->pages = pages;
+ return ret;
+err1:
+ kfree(buf->sgt);
+ buf->sgt = NULL;
+err:
+ exynos_gem_put_pages(obj, pages);
+ return ret;
+
+}
+
+static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+
+ /*
+ * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
+ * allocated at gem fault handler.
+ */
+ sg_free_table(buf->sgt);
+ kfree(buf->sgt);
+ buf->sgt = NULL;
+
+ exynos_gem_put_pages(obj, buf->pages);
+ buf->pages = NULL;
+
+ /* add some codes for UNCACHED type here. TODO */
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -153,6 +270,9 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
+ if (!buf->pages)
+ return;
+
/*
* do not release memory region from exporter.
*
@@ -162,7 +282,10 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (obj->import_attach)
goto out;
- exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+ if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
+ exynos_drm_gem_put_pages(obj);
+ else
+ exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
out:
exynos_drm_fini_buf(obj->dev, buf);
@@ -241,10 +364,22 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
/* set memory type and cache attribute from user side. */
exynos_gem_obj->flags = flags;
- ret = exynos_drm_alloc_buf(dev, buf, flags);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
+ /*
+ * allocate all pages as desired size if user wants to allocate
+ * physically non-continuous memory.
+ */
+ if (flags & EXYNOS_BO_NONCONTIG) {
+ ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
+ if (ret < 0) {
+ drm_gem_object_release(&exynos_gem_obj->base);
+ goto err_fini_buf;
+ }
+ } else {
+ ret = exynos_drm_alloc_buf(dev, buf, flags);
+ if (ret < 0) {
+ drm_gem_object_release(&exynos_gem_obj->base);
+ goto err_fini_buf;
+ }
}
return exynos_gem_obj;
@@ -277,14 +412,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
-dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *filp)
+ struct drm_file *file_priv)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, filp, gem_handle);
+ obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return ERR_PTR(-EINVAL);
@@ -292,17 +427,25 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
+ if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
+ DRM_DEBUG_KMS("not support NONCONTIG type.\n");
+ drm_gem_object_unreference_unlocked(obj);
+
+ /* TODO */
+ return ERR_PTR(-EINVAL);
+ }
+
return &exynos_gem_obj->buffer->dma_addr;
}
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *filp)
+ struct drm_file *file_priv)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, filp, gem_handle);
+ obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return;
@@ -310,6 +453,14 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
+ if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
+ DRM_DEBUG_KMS("not support NONCONTIG type.\n");
+ drm_gem_object_unreference_unlocked(obj);
+
+ /* TODO */
+ return;
+ }
+
drm_gem_object_unreference_unlocked(obj);
/*
@@ -338,57 +489,22 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
&args->offset);
}
-static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
- struct file *filp)
-{
- struct drm_file *file_priv;
-
- mutex_lock(&drm_dev->struct_mutex);
-
- /* find current process's drm_file from filelist. */
- list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
- if (file_priv->filp == filp) {
- mutex_unlock(&drm_dev->struct_mutex);
- return file_priv;
- }
- }
-
- mutex_unlock(&drm_dev->struct_mutex);
- WARN_ON(1);
-
- return ERR_PTR(-EFAULT);
-}
-
static int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct drm_device *drm_dev = obj->dev;
struct exynos_drm_gem_buf *buffer;
- struct drm_file *file_priv;
- unsigned long vm_size;
+ unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_private_data = obj;
- vma->vm_ops = drm_dev->driver->gem_vm_ops;
-
- /* restore it to driver's fops. */
- filp->f_op = fops_get(drm_dev->driver->fops);
-
- file_priv = exynos_drm_find_drm_file(drm_dev, filp);
- if (IS_ERR(file_priv))
- return PTR_ERR(file_priv);
-
- /* restore it to drm_file. */
- filp->private_data = file_priv;
update_vm_cache_attr(exynos_gem_obj, vma);
- vm_size = vma->vm_end - vma->vm_start;
+ vm_size = usize = vma->vm_end - vma->vm_start;
/*
* a buffer contains information to physically continuous memory
@@ -400,23 +516,40 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
if (vm_size > buffer->size)
return -EINVAL;
- ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->kvaddr,
- buffer->dma_addr, buffer->size,
- &buffer->dma_attrs);
- if (ret < 0) {
- DRM_ERROR("failed to mmap.\n");
- return ret;
- }
+ if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
+ int i = 0;
- /*
- * take a reference to this mapping of the object. And this reference
- * is unreferenced by the corresponding vm_close call.
- */
- drm_gem_object_reference(obj);
+ if (!buffer->pages)
+ return -EINVAL;
+
+ vma->vm_flags |= VM_MIXEDMAP;
- mutex_lock(&drm_dev->struct_mutex);
- drm_vm_open_locked(drm_dev, vma);
- mutex_unlock(&drm_dev->struct_mutex);
+ do {
+ ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
+ if (ret) {
+ DRM_ERROR("failed to remap user space.\n");
+ return ret;
+ }
+
+ uaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
+ } else {
+ /*
+ * get page frame number to physical memory to be mapped
+ * to user space.
+ */
+ pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
+ PAGE_SHIFT;
+
+ DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
+
+ if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
+ vma->vm_page_prot)) {
+ DRM_ERROR("failed to remap pfn range.\n");
+ return -EAGAIN;
+ }
+ }
return 0;
}
@@ -445,29 +578,16 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- /*
- * Set specific mmper's fops. And it will be restored by
- * exynos_drm_gem_mmap_buffer to dev->driver->fops.
- * This is used to call specific mapper temporarily.
- */
- file_priv->filp->f_op = &exynos_drm_gem_fops;
+ obj->filp->f_op = &exynos_drm_gem_fops;
+ obj->filp->private_data = obj;
- /*
- * Set gem object to private_data so that specific mmaper
- * can get the gem object. And it will be restored by
- * exynos_drm_gem_mmap_buffer to drm_file.
- */
- file_priv->filp->private_data = obj;
-
- addr = vm_mmap(file_priv->filp, 0, args->size,
+ addr = vm_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0);
drm_gem_object_unreference_unlocked(obj);
- if (IS_ERR((void *)addr)) {
- file_priv->filp->private_data = file_priv;
+ if (IS_ERR((void *)addr))
return PTR_ERR((void *)addr);
- }
args->mapped = addr;
@@ -502,129 +622,6 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
return 0;
}
-struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
-{
- struct vm_area_struct *vma_copy;
-
- vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
- if (!vma_copy)
- return NULL;
-
- if (vma->vm_ops && vma->vm_ops->open)
- vma->vm_ops->open(vma);
-
- if (vma->vm_file)
- get_file(vma->vm_file);
-
- memcpy(vma_copy, vma, sizeof(*vma));
-
- vma_copy->vm_mm = NULL;
- vma_copy->vm_next = NULL;
- vma_copy->vm_prev = NULL;
-
- return vma_copy;
-}
-
-void exynos_gem_put_vma(struct vm_area_struct *vma)
-{
- if (!vma)
- return;
-
- if (vma->vm_ops && vma->vm_ops->close)
- vma->vm_ops->close(vma);
-
- if (vma->vm_file)
- fput(vma->vm_file);
-
- kfree(vma);
-}
-
-int exynos_gem_get_pages_from_userptr(unsigned long start,
- unsigned int npages,
- struct page **pages,
- struct vm_area_struct *vma)
-{
- int get_npages;
-
- /* the memory region mmaped with VM_PFNMAP. */
- if (vma_is_io(vma)) {
- unsigned int i;
-
- for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
- unsigned long pfn;
- int ret = follow_pfn(vma, start, &pfn);
- if (ret)
- return ret;
-
- pages[i] = pfn_to_page(pfn);
- }
-
- if (i != npages) {
- DRM_ERROR("failed to get user_pages.\n");
- return -EINVAL;
- }
-
- return 0;
- }
-
- get_npages = get_user_pages(current, current->mm, start,
- npages, 1, 1, pages, NULL);
- get_npages = max(get_npages, 0);
- if (get_npages != npages) {
- DRM_ERROR("failed to get user_pages.\n");
- while (get_npages)
- put_page(pages[--get_npages]);
- return -EFAULT;
- }
-
- return 0;
-}
-
-void exynos_gem_put_pages_to_userptr(struct page **pages,
- unsigned int npages,
- struct vm_area_struct *vma)
-{
- if (!vma_is_io(vma)) {
- unsigned int i;
-
- for (i = 0; i < npages; i++) {
- set_page_dirty_lock(pages[i]);
-
- /*
- * undo the reference we took when populating
- * the table.
- */
- put_page(pages[i]);
- }
- }
-}
-
-int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
- struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- int nents;
-
- mutex_lock(&drm_dev->struct_mutex);
-
- nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
- if (!nents) {
- DRM_ERROR("failed to map sgl with dma.\n");
- mutex_unlock(&drm_dev->struct_mutex);
- return nents;
- }
-
- mutex_unlock(&drm_dev->struct_mutex);
- return 0;
-}
-
-void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
- struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
-}
-
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -756,9 +753,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
+ ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
if (ret < 0)
- DRM_ERROR("failed to map a buffer with user.\n");
+ DRM_ERROR("failed to map pages.\n");
mutex_unlock(&dev->struct_mutex);
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.h
index d3ea106a9a77..085b2a5d5f70 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -35,25 +35,21 @@
* exynos drm gem buffer structure.
*
* @kvaddr: kernel virtual address to allocated memory region.
- * *userptr: user space address.
* @dma_addr: bus address(accessed by dma) to allocated memory region.
* - this address could be physical address without IOMMU and
* device address with IOMMU.
- * @write: whether pages will be written to by the caller.
* @sgt: sg table to transfer page data.
+ * @pages: contain all pages to allocated memory region.
+ * @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region.
- * @pfnmap: indicate whether memory region from userptr is mmaped with
- * VM_PFNMAP or not.
*/
struct exynos_drm_gem_buf {
void __iomem *kvaddr;
- unsigned long userptr;
dma_addr_t dma_addr;
- struct dma_attrs dma_attrs;
- unsigned int write;
struct sg_table *sgt;
+ struct page **pages;
+ unsigned long page_size;
unsigned long size;
- bool pfnmap;
};
/*
@@ -69,7 +65,6 @@ struct exynos_drm_gem_buf {
* or at framebuffer creation.
* @size: size requested from user, in bytes and this size is aligned
* in page unit.
- * @vma: a pointer to vm_area.
* @flags: indicate memory type to allocated buffer and cache attruibute.
*
* P.S. this object would be transfered to user as kms_bo.handle so
@@ -79,7 +74,6 @@ struct exynos_drm_gem_obj {
struct drm_gem_object base;
struct exynos_drm_gem_buf *buffer;
unsigned long size;
- struct vm_area_struct *vma;
unsigned int flags;
};
@@ -110,9 +104,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
* other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be increased.
*/
-dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *filp);
+ struct drm_file *file_priv);
/*
* put dma address from gem handle and this function could be used for
@@ -121,7 +115,7 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
*/
void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *filp);
+ struct drm_file *file_priv);
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@@ -134,10 +128,6 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-/* map user space allocated by malloc to pages. */
-int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
/* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -173,36 +163,4 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-static inline int vma_is_io(struct vm_area_struct *vma)
-{
- return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
-}
-
-/* get a copy of a virtual memory region. */
-struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
-
-/* release a userspace virtual memory area. */
-void exynos_gem_put_vma(struct vm_area_struct *vma);
-
-/* get pages from user space. */
-int exynos_gem_get_pages_from_userptr(unsigned long start,
- unsigned int npages,
- struct page **pages,
- struct vm_area_struct *vma);
-
-/* drop the reference to pages. */
-void exynos_gem_put_pages_to_userptr(struct page **pages,
- unsigned int npages,
- struct vm_area_struct *vma);
-
-/* map sgt with dma region. */
-int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
- struct sg_table *sgt,
- enum dma_data_direction dir);
-
-/* unmap sgt from dma region. */
-void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
- struct sg_table *sgt,
- enum dma_data_direction dir);
-
#endif
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 2d11e70b601a..c3b9e2b45185 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -346,23 +346,9 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
ctx->hdmi_ctx->drm_dev = drm_dev;
ctx->mixer_ctx->drm_dev = drm_dev;
- if (mixer_ops->iommu_on)
- mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
-
return 0;
}
-static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
-{
- struct drm_hdmi_context *ctx;
- struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
-
- ctx = get_ctx_from_subdrv(subdrv);
-
- if (mixer_ops->iommu_on)
- mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
-}
-
static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -382,7 +368,6 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
subdrv->dev = dev;
subdrv->manager = &hdmi_manager;
subdrv->probe = hdmi_subdrv_probe;
- subdrv->remove = hdmi_subdrv_remove;
platform_set_drvdata(pdev, subdrv);
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 54b522353e48..2da5ffd3a059 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -62,7 +62,6 @@ struct exynos_hdmi_ops {
struct exynos_mixer_ops {
/* manager */
- int (*iommu_on)(void *ctx, bool enable);
int (*enable_vblank)(void *ctx, int pipe);
void (*disable_vblank)(void *ctx);
void (*dpms)(void *ctx, int mode);
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.c
deleted file mode 100644
index 09db1983eb1a..000000000000
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/* exynos_drm_iommu.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include
-#include
-
-#include
-#include
-#include
-
-#include
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
-
-/*
- * drm_create_iommu_mapping - create a mapping structure
- *
- * @drm_dev: DRM device
- */
-int drm_create_iommu_mapping(struct drm_device *drm_dev)
-{
- struct dma_iommu_mapping *mapping = NULL;
- struct exynos_drm_private *priv = drm_dev->dev_private;
- struct device *dev = drm_dev->dev;
-
- if (!priv->da_start)
- priv->da_start = EXYNOS_DEV_ADDR_START;
- if (!priv->da_space_size)
- priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
- if (!priv->da_space_order)
- priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
-
- mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
- priv->da_space_size,
- priv->da_space_order);
- if (!mapping)
- return -ENOMEM;
-
- dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
- GFP_KERNEL);
- dma_set_max_seg_size(dev, 0xffffffffu);
- dev->archdata.mapping = mapping;
-
- return 0;
-}
-
-/*
- * drm_release_iommu_mapping - release iommu mapping structure
- *
- * @drm_dev: DRM device
- *
- * if mapping->kref becomes 0 then all things related to iommu mapping
- * will be released
- */
-void drm_release_iommu_mapping(struct drm_device *drm_dev)
-{
- struct device *dev = drm_dev->dev;
-
- arm_iommu_release_mapping(dev->archdata.mapping);
-}
-
-/*
- * drm_iommu_attach_device- attach device to iommu mapping
- *
- * @drm_dev: DRM device
- * @subdrv_dev: device to be attach
- *
- * This function should be called by sub drivers to attach it to iommu
- * mapping.
- */
-int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- struct device *dev = drm_dev->dev;
- int ret;
-
- if (!dev->archdata.mapping) {
- DRM_ERROR("iommu_mapping is null.\n");
- return -EFAULT;
- }
-
- subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
- sizeof(*subdrv_dev->dma_parms),
- GFP_KERNEL);
- dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
-
- ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
- if (ret < 0) {
- DRM_DEBUG_KMS("failed iommu attach.\n");
- return ret;
- }
-
- /*
- * Set dma_ops to drm_device just one time.
- *
- * The dma mapping api needs device object and the api is used
- * to allocate physial memory and map it with iommu table.
- * If iommu attach succeeded, the sub driver would have dma_ops
- * for iommu and also all sub drivers have same dma_ops.
- */
- if (!dev->archdata.dma_ops)
- dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
-
- return 0;
-}
-
-/*
- * drm_iommu_detach_device -detach device address space mapping from device
- *
- * @drm_dev: DRM device
- * @subdrv_dev: device to be detached
- *
- * This function should be called by sub drivers to detach it from iommu
- * mapping
- */
-void drm_iommu_detach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- struct device *dev = drm_dev->dev;
- struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-
- if (!mapping || !mapping->domain)
- return;
-
- iommu_detach_device(mapping->domain, subdrv_dev);
- drm_release_iommu_mapping(drm_dev);
-}
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.h
deleted file mode 100644
index 18a0ca190b98..000000000000
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* exynos_drm_iommu.h
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Authoer: Inki Dae
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _EXYNOS_DRM_IOMMU_H_
-#define _EXYNOS_DRM_IOMMU_H_
-
-#define EXYNOS_DEV_ADDR_START 0x20000000
-#define EXYNOS_DEV_ADDR_SIZE 0x40000000
-#define EXYNOS_DEV_ADDR_ORDER 0x4
-
-#ifdef CONFIG_DRM_EXYNOS_IOMMU
-
-int drm_create_iommu_mapping(struct drm_device *drm_dev);
-
-void drm_release_iommu_mapping(struct drm_device *drm_dev);
-
-int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev);
-
-void drm_iommu_detach_device(struct drm_device *dev_dev,
- struct device *subdrv_dev);
-
-static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
-{
-#ifdef CONFIG_ARM_DMA_USE_IOMMU
- struct device *dev = drm_dev->dev;
-
- return dev->archdata.mapping ? true : false;
-#else
- return false;
-#endif
-}
-
-#else
-
-struct dma_iommu_mapping;
-static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
-{
- return 0;
-}
-
-static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
-{
-}
-
-static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- return 0;
-}
-
-static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
-}
-
-static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
-{
- return false;
-}
-
-#endif
-#endif
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 4b0c16bfd1da..e4b8a8f741f7 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -382,6 +382,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
+ bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -391,6 +392,8 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
+ is_checked = true;
+
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
@@ -398,7 +401,22 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
- drm_vblank_put(drm_dev, crtc);
+ }
+
+ if (is_checked) {
+ /*
+ * call drm_vblank_put only in case that drm_vblank_get was
+ * called.
+ */
+ if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
+ drm_vblank_put(drm_dev, crtc);
+
+ /*
+ * don't off vblank if vblank_disable_allowed is 1,
+ * because vblank would be off by timer handler.
+ */
+ if (!drm_dev->vblank_disable_allowed)
+ drm_vblank_off(drm_dev, crtc);
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c b/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c
index bafb65389562..2c115f8a62a3 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -50,29 +50,6 @@
#define MAX_HEIGHT 1080
#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
-/* AVI header and aspect ratio */
-#define HDMI_AVI_VERSION 0x02
-#define HDMI_AVI_LENGTH 0x0D
-#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4)
-#define AVI_SAME_AS_PIC_ASPECT_RATIO 8
-
-/* AUI header info */
-#define HDMI_AUI_VERSION 0x01
-#define HDMI_AUI_LENGTH 0x0A
-
-/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
-enum HDMI_PACKET_TYPE {
- /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
- /* InfoFrame packet type */
- HDMI_PACKET_TYPE_INFOFRAME = 0x80,
- /* Vendor-Specific InfoFrame */
- HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
- /* Auxiliary Video information InfoFrame */
- HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
- /* Audio information InfoFrame */
- HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
-};
-
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
@@ -97,7 +74,6 @@ struct hdmi_context {
struct mutex hdmi_mutex;
void __iomem *regs;
- void *parent_ctx;
int external_irq;
int internal_irq;
@@ -108,6 +84,7 @@ struct hdmi_context {
int cur_conf;
struct hdmi_resources res;
+ void *parent_ctx;
int hpd_gpio;
@@ -205,7 +182,6 @@ struct hdmi_v13_conf {
int height;
int vrefresh;
bool interlace;
- int cea_video_id;
const u8 *hdmiphy_data;
const struct hdmi_v13_preset_conf *conf;
};
@@ -377,20 +353,15 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
};
static const struct hdmi_v13_conf hdmi_v13_confs[] = {
- { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
- &hdmi_v13_conf_720p60 },
- { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
- &hdmi_v13_conf_720p60 },
- { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
- &hdmi_v13_conf_480p },
- { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
- &hdmi_v13_conf_1080i50 },
- { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
- &hdmi_v13_conf_1080p50 },
- { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
- &hdmi_v13_conf_1080i60 },
- { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
- &hdmi_v13_conf_1080p60 },
+ { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
+ { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
+ { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p },
+ { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 },
+ { 1920, 1080, 50, false, hdmiphy_v13_conf148_5,
+ &hdmi_v13_conf_1080p50 },
+ { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 },
+ { 1920, 1080, 60, false, hdmiphy_v13_conf148_5,
+ &hdmi_v13_conf_1080p60 },
};
/* HDMI Version 1.4 */
@@ -508,7 +479,6 @@ struct hdmi_conf {
int height;
int vrefresh;
bool interlace;
- int cea_video_id;
const u8 *hdmiphy_data;
const struct hdmi_preset_conf *conf;
};
@@ -964,21 +934,16 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
};
static const struct hdmi_conf hdmi_confs[] = {
- { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
- { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
- { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
- { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
- { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
- { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
- { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
- { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
+ { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 },
+ { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 },
+ { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+ { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
+ { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+ { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
+ { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
+ { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
};
-struct hdmi_infoframe {
- enum HDMI_PACKET_TYPE type;
- u8 ver;
- u8 len;
-};
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
@@ -1302,88 +1267,6 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
return hdmi_v14_conf_index(mode);
}
-static u8 hdmi_chksum(struct hdmi_context *hdata,
- u32 start, u8 len, u32 hdr_sum)
-{
- int i;
-
- /* hdr_sum : header0 + header1 + header2
- * start : start address of packet byte1
- * len : packet bytes - 1 */
- for (i = 0; i < len; ++i)
- hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
-
- /* return 2's complement of 8 bit hdr_sum */
- return (u8)(~(hdr_sum & 0xff) + 1);
-}
-
-static void hdmi_reg_infoframe(struct hdmi_context *hdata,
- struct hdmi_infoframe *infoframe)
-{
- u32 hdr_sum;
- u8 chksum;
- u32 aspect_ratio;
- u32 mod;
- u32 vic;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
- if (hdata->dvi_mode) {
- hdmi_reg_writeb(hdata, HDMI_VSI_CON,
- HDMI_VSI_CON_DO_NOT_TRANSMIT);
- hdmi_reg_writeb(hdata, HDMI_AVI_CON,
- HDMI_AVI_CON_DO_NOT_TRANSMIT);
- hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
- return;
- }
-
- switch (infoframe->type) {
- case HDMI_PACKET_TYPE_AVI:
- hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
- hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
-
- /* Output format zero hardcoded ,RGB YBCR selection */
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
- AVI_ACTIVE_FORMAT_VALID |
- AVI_UNDERSCANNED_DISPLAY_VALID);
-
- aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
-
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
- AVI_SAME_AS_PIC_ASPECT_RATIO);
-
- if (hdata->type == HDMI_TYPE13)
- vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
- else
- vic = hdmi_confs[hdata->cur_conf].cea_video_id;
-
- hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
-
- chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
- infoframe->len, hdr_sum);
- DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
- hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
- break;
- case HDMI_PACKET_TYPE_AUI:
- hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
- hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
- chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
- infoframe->len, hdr_sum);
- DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
- hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
- break;
- default:
- break;
- }
-}
-
static bool hdmi_is_connected(void *ctx)
{
struct hdmi_context *hdata = ctx;
@@ -1410,7 +1293,6 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
raw_edid->width_cm, raw_edid->height_cm);
- kfree(raw_edid);
} else {
return -ENODEV;
}
@@ -1659,8 +1541,6 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
static void hdmi_conf_init(struct hdmi_context *hdata)
{
- struct hdmi_infoframe infoframe;
-
/* disable HPD interrupts */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1695,17 +1575,9 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
- infoframe.type = HDMI_PACKET_TYPE_AVI;
- infoframe.ver = HDMI_AVI_VERSION;
- infoframe.len = HDMI_AVI_LENGTH;
- hdmi_reg_infoframe(hdata, &infoframe);
-
- infoframe.type = HDMI_PACKET_TYPE_AUI;
- infoframe.ver = HDMI_AUI_VERSION;
- infoframe.len = HDMI_AUI_LENGTH;
- hdmi_reg_infoframe(hdata, &infoframe);
-
/* enable AVI packet every vsync, fixes purple line problem */
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
}
}
@@ -2106,18 +1978,9 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
index = hdmi_v14_conf_index(m);
if (index >= 0) {
- struct drm_mode_object base;
- struct list_head head;
-
DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n");
-
- /* preserve display mode header while copying. */
- head = adjusted_mode->head;
- base = adjusted_mode->base;
memcpy(adjusted_mode, m, sizeof(*m));
- adjusted_mode->head = head;
- adjusted_mode->base = base;
break;
}
}
@@ -2303,27 +2166,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
memset(res, 0, sizeof(*res));
/* get clocks, power */
- res->hdmi = devm_clk_get(dev, "hdmi");
+ res->hdmi = clk_get(dev, "hdmi");
if (IS_ERR_OR_NULL(res->hdmi)) {
DRM_ERROR("failed to get clock 'hdmi'\n");
goto fail;
}
- res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
+ res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
goto fail;
}
- res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
+ res->sclk_pixel = clk_get(dev, "sclk_pixel");
if (IS_ERR_OR_NULL(res->sclk_pixel)) {
DRM_ERROR("failed to get clock 'sclk_pixel'\n");
goto fail;
}
- res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
+ res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
goto fail;
}
- res->hdmiphy = devm_clk_get(dev, "hdmiphy");
+ res->hdmiphy = clk_get(dev, "hdmiphy");
if (IS_ERR_OR_NULL(res->hdmiphy)) {
DRM_ERROR("failed to get clock 'hdmiphy'\n");
goto fail;
@@ -2331,7 +2194,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
- res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
+ res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
sizeof(res->regul_bulk[0]), GFP_KERNEL);
if (!res->regul_bulk) {
DRM_ERROR("failed to get memory for regulators\n");
@@ -2341,7 +2204,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
res->regul_bulk[i].supply = supply[i];
res->regul_bulk[i].consumer = NULL;
}
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+ ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
if (ret) {
DRM_ERROR("failed to get regulators\n");
goto fail;
@@ -2354,6 +2217,28 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
return -ENODEV;
}
+static int hdmi_resources_cleanup(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ regulator_bulk_free(res->regul_count, res->regul_bulk);
+ /* kfree is NULL-safe */
+ kfree(res->regul_bulk);
+ if (!IS_ERR_OR_NULL(res->hdmiphy))
+ clk_put(res->hdmiphy);
+ if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
+ clk_put(res->sclk_hdmiphy);
+ if (!IS_ERR_OR_NULL(res->sclk_pixel))
+ clk_put(res->sclk_pixel);
+ if (!IS_ERR_OR_NULL(res->sclk_hdmi))
+ clk_put(res->sclk_hdmi);
+ if (!IS_ERR_OR_NULL(res->hdmi))
+ clk_put(res->hdmi);
+ memset(res, 0, sizeof(*res));
+
+ return 0;
+}
+
static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2493,32 +2378,36 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
ret = hdmi_resources_init(hdata);
if (ret) {
+ ret = -EINVAL;
DRM_ERROR("hdmi_resources_init failed\n");
- return -EINVAL;
+ goto err_data;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
DRM_ERROR("failed to find registers\n");
- return -ENOENT;
+ ret = -ENOENT;
+ goto err_resource;
}
hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!hdata->regs) {
DRM_ERROR("failed to map registers\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto err_resource;
}
- ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
+ ret = gpio_request(hdata->hpd_gpio, "HPD");
if (ret) {
DRM_ERROR("failed to request HPD gpio\n");
- return ret;
+ goto err_resource;
}
/* DDC i2c driver */
if (i2c_add_driver(&ddc_driver)) {
DRM_ERROR("failed to register ddc i2c driver\n");
- return -ENOENT;
+ ret = -ENOENT;
+ goto err_gpio;
}
hdata->ddc_port = hdmi_ddc;
@@ -2581,6 +2470,11 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
i2c_del_driver(&hdmiphy_driver);
err_ddc:
i2c_del_driver(&ddc_driver);
+err_gpio:
+ gpio_free(hdata->hpd_gpio);
+err_resource:
+ hdmi_resources_cleanup(hdata);
+err_data:
return ret;
}
@@ -2597,6 +2491,9 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
free_irq(hdata->internal_irq, hdata);
free_irq(hdata->external_irq, hdata);
+ gpio_free(hdata->hpd_gpio);
+
+ hdmi_resources_cleanup(hdata);
/* hdmiphy i2c driver */
i2c_del_driver(&hdmiphy_driver);
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_mixer.c b/trunk/drivers/gpu/drm/exynos/exynos_mixer.c
index 40a6e1906fbb..e7fbb823fd8e 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -36,7 +36,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h"
-#include "exynos_drm_iommu.h"
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
@@ -81,7 +80,6 @@ enum mixer_version_id {
struct mixer_context {
struct device *dev;
- struct drm_device *drm_dev;
int pipe;
bool interlace;
bool powered;
@@ -92,7 +90,6 @@ struct mixer_context {
struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR];
enum mixer_version_id mxr_ver;
- void *parent_ctx;
};
struct mixer_drv_data {
@@ -668,24 +665,6 @@ static void mixer_win_reset(struct mixer_context *ctx)
spin_unlock_irqrestore(&res->reg_slock, flags);
}
-static int mixer_iommu_on(void *ctx, bool enable)
-{
- struct exynos_drm_hdmi_context *drm_hdmi_ctx;
- struct mixer_context *mdata = ctx;
- struct drm_device *drm_dev;
-
- drm_hdmi_ctx = mdata->parent_ctx;
- drm_dev = drm_hdmi_ctx->drm_dev;
-
- if (is_drm_iommu_supported(drm_dev)) {
- if (enable)
- return drm_iommu_attach_device(drm_dev, mdata->dev);
-
- drm_iommu_detach_device(drm_dev, mdata->dev);
- }
- return 0;
-}
-
static void mixer_poweron(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
@@ -887,7 +866,6 @@ static void mixer_win_disable(void *ctx, int win)
static struct exynos_mixer_ops mixer_ops = {
/* manager */
- .iommu_on = mixer_iommu_on,
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
.dpms = mixer_dpms,
@@ -906,6 +884,7 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
+ bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -915,6 +894,7 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe)
continue;
+ is_checked = true;
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
@@ -922,9 +902,16 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
- drm_vblank_put(drm_dev, crtc);
}
+ if (is_checked)
+ /*
+ * call drm_vblank_put only in case that drm_vblank_get was
+ * called.
+ */
+ if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
+ drm_vblank_put(drm_dev, crtc);
+
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
@@ -984,45 +971,57 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
spin_lock_init(&mixer_res->reg_slock);
- mixer_res->mixer = devm_clk_get(dev, "mixer");
+ mixer_res->mixer = clk_get(dev, "mixer");
if (IS_ERR_OR_NULL(mixer_res->mixer)) {
dev_err(dev, "failed to get clock 'mixer'\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
- mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
+ mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto fail;
}
mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->mixer_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto fail;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(dev, "get interrupt resource failed.\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto fail;
}
ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
0, "drm_mixer", ctx);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
- return ret;
+ goto fail;
}
mixer_res->irq = res->start;
return 0;
+
+fail:
+ if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
+ clk_put(mixer_res->sclk_hdmi);
+ if (!IS_ERR_OR_NULL(mixer_res->mixer))
+ clk_put(mixer_res->mixer);
+ return ret;
}
static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
@@ -1032,21 +1031,25 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
struct device *dev = &pdev->dev;
struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res;
+ int ret;
- mixer_res->vp = devm_clk_get(dev, "vp");
+ mixer_res->vp = clk_get(dev, "vp");
if (IS_ERR_OR_NULL(mixer_res->vp)) {
dev_err(dev, "failed to get clock 'vp'\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
- mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
+ mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
- mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
+ mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
dev_err(dev, "failed to get clock 'sclk_dac'\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
if (mixer_res->sclk_hdmi)
@@ -1055,17 +1058,28 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto fail;
}
mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (mixer_res->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto fail;
}
return 0;
+
+fail:
+ if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
+ clk_put(mixer_res->sclk_dac);
+ if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
+ clk_put(mixer_res->sclk_mixer);
+ if (!IS_ERR_OR_NULL(mixer_res->vp))
+ clk_put(mixer_res->vp);
+ return ret;
}
static struct mixer_drv_data exynos5_mxr_drv_data = {
@@ -1135,7 +1149,6 @@ static int __devinit mixer_probe(struct platform_device *pdev)
}
ctx->dev = &pdev->dev;
- ctx->parent_ctx = (void *)drm_hdmi_ctx;
drm_hdmi_ctx->ctx = (void *)ctx;
ctx->vp_enabled = drv->is_vp_enabled;
ctx->mxr_ver = drv->version;
diff --git a/trunk/drivers/gpu/drm/exynos/regs-hdmi.h b/trunk/drivers/gpu/drm/exynos/regs-hdmi.h
index 970cdb518eb1..9cc7c5e9718c 100644
--- a/trunk/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/trunk/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -298,14 +298,14 @@
#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
-#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1))
+#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n))
#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
-#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1))
+#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n))
#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
@@ -338,19 +338,6 @@
#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
-/* AVI bit definition */
-#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1)
-#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1)
-
-#define AVI_ACTIVE_FORMAT_VALID (1 << 4)
-#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
-
-/* AUI bit definition */
-#define HDMI_AUI_CON_NO_TRAN (0 << 0)
-
-/* VSI bit definition */
-#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
-
/* HDCP related registers */
#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))
diff --git a/trunk/drivers/gpu/drm/gma500/cdv_device.c b/trunk/drivers/gpu/drm/gma500/cdv_device.c
index 23e14e93991f..1ceca3d13b65 100644
--- a/trunk/drivers/gpu/drm/gma500/cdv_device.c
+++ b/trunk/drivers/gpu/drm/gma500/cdv_device.c
@@ -523,7 +523,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
dev_priv->force_audio_property = prop;
}
- drm_object_attach_property(&connector->base, prop, 0);
+ drm_connector_attach_property(connector, prop, 0);
}
@@ -553,7 +553,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
dev_priv->broadcast_rgb_property = prop;
}
- drm_object_attach_property(&connector->base, prop, 0);
+ drm_connector_attach_property(connector, prop, 0);
}
/* Cedarview */
diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 51044cc55cf2..e3a3978cf320 100644
--- a/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1650,7 +1650,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret;
- ret = drm_object_property_set_value(&connector->base, property, val);
+ ret = drm_connector_property_set_value(connector, property, val);
if (ret)
return ret;
diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index e223b500022e..7272a461edfe 100644
--- a/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -185,14 +185,14 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
return -1;
}
- if (drm_object_property_get_value(&connector->base,
+ if (drm_connector_property_get_value(connector,
property, &curValue))
return -1;
if (curValue == value)
return 0;
- if (drm_object_property_set_value(&connector->base,
+ if (drm_connector_property_set_value(connector,
property, value))
return -1;
@@ -341,7 +341,7 @@ void cdv_hdmi_init(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index d81dbc3368f0..b362dd39bf5a 100644
--- a/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -479,7 +479,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
return -1;
}
- if (drm_object_property_get_value(&connector->base,
+ if (drm_connector_property_get_value(connector,
property,
&curValue))
return -1;
@@ -487,7 +487,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
if (curValue == value)
return 0;
- if (drm_object_property_set_value(&connector->base,
+ if (drm_connector_property_set_value(connector,
property,
value))
return -1;
@@ -502,7 +502,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
return -1;
}
} else if (!strcmp(property->name, "backlight") && encoder) {
- if (drm_object_property_set_value(&connector->base,
+ if (drm_connector_property_set_value(connector,
property,
value))
return -1;
@@ -671,10 +671,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*Attach connector properties*/
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 2d4ab48f07a2..32dba2ab53e1 100644
--- a/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -265,13 +265,13 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
goto set_prop_error;
}
- if (drm_object_property_get_value(&connector->base, property, &val))
+ if (drm_connector_property_get_value(connector, property, &val))
goto set_prop_error;
if (val == value)
goto set_prop_done;
- if (drm_object_property_set_value(&connector->base,
+ if (drm_connector_property_set_value(connector,
property, value))
goto set_prop_error;
@@ -296,7 +296,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
}
}
} else if (!strcmp(property->name, "backlight") && encoder) {
- if (drm_object_property_set_value(&connector->base, property,
+ if (drm_connector_property_set_value(connector, property,
value))
goto set_prop_error;
else
@@ -506,7 +506,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
- if (pipe != 0 && pipe != 2) {
+ if (!dev || ((pipe != 0) && (pipe != 2))) {
DRM_ERROR("Invalid parameter\n");
return;
}
@@ -572,10 +572,10 @@ void mdfld_dsi_output_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*attach properties*/
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev_priv->backlight_property,
MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
diff --git a/trunk/drivers/gpu/drm/gma500/mdfld_intel_display.c b/trunk/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 74485dc43945..dec6a9aea3c6 100644
--- a/trunk/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/trunk/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -820,7 +820,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->pos, 0);
if (psb_intel_encoder)
- drm_object_property_get_value(&connector->base,
+ drm_connector_property_get_value(connector,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail.h b/trunk/drivers/gpu/drm/gma500/oaktrail.h
index 30adbbe23024..f2f9f38a5362 100644
--- a/trunk/drivers/gpu/drm/gma500/oaktrail.h
+++ b/trunk/drivers/gpu/drm/gma500/oaktrail.h
@@ -249,9 +249,3 @@ extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
extern void oaktrail_hdmi_save(struct drm_device *dev);
extern void oaktrail_hdmi_restore(struct drm_device *dev);
extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
-extern int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode, int x, int y,
- struct drm_framebuffer *old_fb);
-extern void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode);
-
-
diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_crtc.c b/trunk/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 3071526bc3c1..cdafd2acc72f 100644
--- a/trunk/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/trunk/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -168,11 +168,6 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
- if (pipe == 1) {
- oaktrail_crtc_hdmi_dpms(crtc, mode);
- return;
- }
-
if (!gma_power_begin(dev, true))
return;
@@ -307,9 +302,6 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_connector *connector;
- if (pipe == 1)
- return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
-
if (!gma_power_begin(dev, true))
return 0;
@@ -351,7 +343,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
(mode->crtc_vdisplay - 1));
if (psb_intel_encoder)
- drm_object_property_get_value(&connector->base,
+ drm_connector_property_get_value(connector,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_device.c b/trunk/drivers/gpu/drm/gma500/oaktrail_device.c
index 08747fd7105c..010b820744a5 100644
--- a/trunk/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/trunk/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -544,7 +544,7 @@ const struct psb_ops oaktrail_chip_ops = {
.accel_2d = 1,
.pipes = 2,
.crtcs = 2,
- .hdmi_mask = (1 << 1),
+ .hdmi_mask = (1 << 0),
.lvds_mask = (1 << 0),
.cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f036f1fc161e..69e51e903f35 100644
--- a/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -155,345 +155,6 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
HDMI_READ(HDMI_HCR);
}
-static void wait_for_vblank(struct drm_device *dev)
-{
- /* Wait for 20ms, i.e. one cycle at 50hz. */
- mdelay(20);
-}
-
-static unsigned int htotal_calculate(struct drm_display_mode *mode)
-{
- u32 htotal, new_crtc_htotal;
-
- htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
-
- /*
- * 1024 x 768 new_crtc_htotal = 0x1024;
- * 1280 x 1024 new_crtc_htotal = 0x0c34;
- */
- new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
-
- DRM_DEBUG_KMS("new crtc htotal 0x%4x\n", new_crtc_htotal);
- return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
-}
-
-static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
- int refclk, struct oaktrail_hdmi_clock *best_clock)
-{
- int np_min, np_max, nr_min, nr_max;
- int np, nr, nf;
-
- np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
- np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
- if (np_min < oaktrail_hdmi_limit.np.min)
- np_min = oaktrail_hdmi_limit.np.min;
- if (np_max > oaktrail_hdmi_limit.np.max)
- np_max = oaktrail_hdmi_limit.np.max;
-
- nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
- nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
- if (nr_min < oaktrail_hdmi_limit.nr.min)
- nr_min = oaktrail_hdmi_limit.nr.min;
- if (nr_max > oaktrail_hdmi_limit.nr.max)
- nr_max = oaktrail_hdmi_limit.nr.max;
-
- np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
- nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
- nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
- DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
-
- /*
- * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
- * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
- */
- best_clock->np = np;
- best_clock->nr = nr - 1;
- best_clock->nf = (nf << 14);
-}
-
-static void scu_busy_loop(void __iomem *scu_base)
-{
- u32 status = 0;
- u32 loop_count = 0;
-
- status = readl(scu_base + 0x04);
- while (status & 1) {
- udelay(1); /* scu processing time is in few u secods */
- status = readl(scu_base + 0x04);
- loop_count++;
- /* break if scu doesn't reset busy bit after huge retry */
- if (loop_count > 1000) {
- DRM_DEBUG_KMS("SCU IPC timed out");
- return;
- }
- }
-}
-
-/*
- * You don't want to know, you really really don't want to know....
- *
- * This is magic. However it's safe magic because of the way the platform
- * works and it is necessary magic.
- */
-static void oaktrail_hdmi_reset(struct drm_device *dev)
-{
- void __iomem *base;
- unsigned long scu_ipc_mmio = 0xff11c000UL;
- int scu_len = 1024;
-
- base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
- if (base == NULL) {
- DRM_ERROR("failed to map scu mmio\n");
- return;
- }
-
- /* scu ipc: assert hdmi controller reset */
- writel(0xff11d118, base + 0x0c);
- writel(0x7fffffdf, base + 0x80);
- writel(0x42005, base + 0x0);
- scu_busy_loop(base);
-
- /* scu ipc: de-assert hdmi controller reset */
- writel(0xff11d118, base + 0x0c);
- writel(0x7fffffff, base + 0x80);
- writel(0x42005, base + 0x0);
- scu_busy_loop(base);
-
- iounmap(base);
-}
-
-int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
- int pipe = 1;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int refclk;
- struct oaktrail_hdmi_clock clock;
- u32 dspcntr, pipeconf, dpll, temp;
- int dspcntr_reg = DSPBCNTR;
-
- if (!gma_power_begin(dev, true))
- return 0;
-
- /* Disable the VGA plane that we never use */
- REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-
- /* Disable dpll if necessary */
- dpll = REG_READ(DPLL_CTRL);
- if ((dpll & DPLL_PWRDN) == 0) {
- REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
- REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
- REG_WRITE(DPLL_STATUS, 0x1);
- }
- udelay(150);
-
- /* Reset controller */
- oaktrail_hdmi_reset(dev);
-
- /* program and enable dpll */
- refclk = 25000;
- oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
-
- /* Set the DPLL */
- dpll = REG_READ(DPLL_CTRL);
- dpll &= ~DPLL_PDIV_MASK;
- dpll &= ~(DPLL_PWRDN | DPLL_RESET);
- REG_WRITE(DPLL_CTRL, 0x00000008);
- REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
- REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
- REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
- REG_WRITE(DPLL_UPDATE, 0x80000000);
- REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
- udelay(150);
-
- /* configure HDMI */
- HDMI_WRITE(0x1004, 0x1fd);
- HDMI_WRITE(0x2000, 0x1);
- HDMI_WRITE(0x2008, 0x0);
- HDMI_WRITE(0x3130, 0x8);
- HDMI_WRITE(0x101c, 0x1800810);
-
- temp = htotal_calculate(adjusted_mode);
- REG_WRITE(htot_reg, temp);
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
- REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
-
- REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
- REG_WRITE(PCH_PIPEBSRC, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
-
- temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
- HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
-
- REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
- REG_WRITE(dsppos_reg, 0);
-
- /* Flush the plane changes */
- {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->mode_set_base(crtc, x, y, old_fb);
- }
-
- /* Set up the display plane register */
- dspcntr = REG_READ(dspcntr_reg);
- dspcntr |= DISPPLANE_GAMMA_ENABLE;
- dspcntr |= DISPPLANE_SEL_PIPE_B;
- dspcntr |= DISPLAY_PLANE_ENABLE;
-
- /* setup pipeconf */
- pipeconf = REG_READ(pipeconf_reg);
- pipeconf |= PIPEACONF_ENABLE;
-
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
-
- REG_WRITE(PCH_PIPEBCONF, pipeconf);
- REG_READ(PCH_PIPEBCONF);
- wait_for_vblank(dev);
-
- REG_WRITE(dspcntr_reg, dspcntr);
- wait_for_vblank(dev);
-
- gma_power_end(dev);
-
- return 0;
-}
-
-void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- u32 temp;
-
- DRM_DEBUG_KMS("%s %d\n", __func__, mode);
-
- switch (mode) {
- case DRM_MODE_DPMS_OFF:
- REG_WRITE(VGACNTRL, 0x80000000);
-
- /* Disable plane */
- temp = REG_READ(DSPBCNTR);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
- REG_READ(DSPBCNTR);
- /* Flush the plane changes */
- REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
- REG_READ(DSPBSURF);
- }
-
- /* Disable pipe B */
- temp = REG_READ(PIPEBCONF);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
- REG_READ(PIPEBCONF);
- }
-
- /* Disable LNW Pipes, etc */
- temp = REG_READ(PCH_PIPEBCONF);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
- REG_READ(PCH_PIPEBCONF);
- }
-
- /* wait for pipe off */
- udelay(150);
-
- /* Disable dpll */
- temp = REG_READ(DPLL_CTRL);
- if ((temp & DPLL_PWRDN) == 0) {
- REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
- REG_WRITE(DPLL_STATUS, 0x1);
- }
-
- /* wait for dpll off */
- udelay(150);
-
- break;
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- /* Enable dpll */
- temp = REG_READ(DPLL_CTRL);
- if ((temp & DPLL_PWRDN) != 0) {
- REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
- temp = REG_READ(DPLL_CLK_ENABLE);
- REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
- REG_READ(DPLL_CLK_ENABLE);
- }
- /* wait for dpll warm up */
- udelay(150);
-
- /* Enable pipe B */
- temp = REG_READ(PIPEBCONF);
- if ((temp & PIPEACONF_ENABLE) == 0) {
- REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
- REG_READ(PIPEBCONF);
- }
-
- /* Enable LNW Pipe B */
- temp = REG_READ(PCH_PIPEBCONF);
- if ((temp & PIPEACONF_ENABLE) == 0) {
- REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
- REG_READ(PCH_PIPEBCONF);
- }
-
- wait_for_vblank(dev);
-
- /* Enable plane */
- temp = REG_READ(DSPBCNTR);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
- REG_READ(DSPBSURF);
- }
-
- psb_intel_crtc_load_lut(crtc);
- }
-
- /* DSPARB */
- REG_WRITE(DSPARB, 0x00003fbf);
-
- /* FW1 */
- REG_WRITE(0x70034, 0x3f880a0a);
-
- /* FW2 */
- REG_WRITE(0x70038, 0x0b060808);
-
- /* FW4 */
- REG_WRITE(0x70050, 0x08030404);
-
- /* FW5 */
- REG_WRITE(0x70054, 0x04040404);
-
- /* LNC Chicken Bits - Squawk! */
- REG_WRITE(0x70400, 0x4000);
-
- return;
-}
-
static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
static int dpms_mode = -1;
@@ -572,15 +233,13 @@ static const unsigned char raw_edid[] = {
static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct i2c_adapter *i2c_adap;
struct edid *edid;
- int ret = 0;
+ struct drm_display_mode *mode, *t;
+ int i = 0, ret = 0;
- /*
- * FIXME: We need to figure this lot out. In theory we can
- * read the EDID somehow but I've yet to find working reference
- * code.
- */
i2c_adap = i2c_get_adapter(3);
if (i2c_adap == NULL) {
DRM_ERROR("No ddc adapter available!\n");
@@ -594,7 +253,17 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
}
- return ret;
+
+ /*
+ * prune modes that require frame buffer bigger than stolen mem
+ */
+ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+ if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
+ i++;
+ drm_mode_remove(connector, mode);
+ }
+ }
+ return ret - i;
}
static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
@@ -680,7 +349,6 @@ void oaktrail_hdmi_init(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
drm_sysfs_connector_add(connector);
- dev_info(dev->dev, "HDMI initialised.\n");
return;
@@ -735,9 +403,6 @@ void oaktrail_hdmi_setup(struct drm_device *dev)
dev_priv->hdmi_priv = hdmi_dev;
oaktrail_hdmi_audio_disable(dev);
-
- dev_info(dev->dev, "HDMI hardware present.\n");
-
return;
free:
diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_lvds.c b/trunk/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 325013a9c48c..558c77fb55ec 100644
--- a/trunk/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/trunk/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -133,8 +133,8 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
return;
}
- drm_object_property_get_value(
- &connector->base,
+ drm_connector_property_get_value(
+ connector,
dev->mode_config.scaling_mode_property,
&v);
@@ -363,10 +363,10 @@ void oaktrail_lvds_init(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c b/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 9fa5fa2e6192..2a4c3a9e33e3 100644
--- a/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -603,7 +603,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
goto set_prop_error;
}
- if (drm_object_property_get_value(&connector->base,
+ if (drm_connector_property_get_value(connector,
property,
&curval))
goto set_prop_error;
@@ -611,7 +611,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
if (curval == value)
goto set_prop_done;
- if (drm_object_property_set_value(&connector->base,
+ if (drm_connector_property_set_value(connector,
property,
value))
goto set_prop_error;
@@ -626,7 +626,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
goto set_prop_error;
}
} else if (!strcmp(property->name, "backlight")) {
- if (drm_object_property_set_value(&connector->base,
+ if (drm_connector_property_set_value(connector,
property,
value))
goto set_prop_error;
@@ -746,10 +746,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
connector->doublescan_allowed = false;
/*Attach connector properties*/
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index a4cc777ab7a6..fc9292705dbf 100644
--- a/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1694,7 +1694,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
uint8_t cmd;
int ret;
- ret = drm_object_property_set_value(&connector->base, property, val);
+ ret = drm_connector_property_set_value(connector, property, val);
if (ret)
return ret;
@@ -1749,7 +1749,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
} else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
temp_value = val;
if (psb_intel_sdvo_connector->left == property) {
- drm_object_property_set_value(&connector->base,
+ drm_connector_property_set_value(connector,
psb_intel_sdvo_connector->right, val);
if (psb_intel_sdvo_connector->left_margin == temp_value)
return 0;
@@ -1761,7 +1761,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (psb_intel_sdvo_connector->right == property) {
- drm_object_property_set_value(&connector->base,
+ drm_connector_property_set_value(connector,
psb_intel_sdvo_connector->left, val);
if (psb_intel_sdvo_connector->right_margin == temp_value)
return 0;
@@ -1773,7 +1773,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (psb_intel_sdvo_connector->top == property) {
- drm_object_property_set_value(&connector->base,
+ drm_connector_property_set_value(connector,
psb_intel_sdvo_connector->bottom, val);
if (psb_intel_sdvo_connector->top_margin == temp_value)
return 0;
@@ -1785,7 +1785,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (psb_intel_sdvo_connector->bottom == property) {
- drm_object_property_set_value(&connector->base,
+ drm_connector_property_set_value(connector,
psb_intel_sdvo_connector->top, val);
if (psb_intel_sdvo_connector->bottom_margin == temp_value)
return 0;
@@ -2286,7 +2286,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
- drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
+ drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
psb_intel_sdvo_connector->tv_format, 0);
return true;
@@ -2302,7 +2302,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
psb_intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!psb_intel_sdvo_connector->name) return false; \
- drm_object_attach_property(&connector->base, \
+ drm_connector_attach_property(connector, \
psb_intel_sdvo_connector->name, \
psb_intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2339,7 +2339,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->left)
return false;
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
psb_intel_sdvo_connector->left,
psb_intel_sdvo_connector->left_margin);
@@ -2348,7 +2348,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->right)
return false;
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
psb_intel_sdvo_connector->right,
psb_intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2375,7 +2375,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->top)
return false;
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
psb_intel_sdvo_connector->top,
psb_intel_sdvo_connector->top_margin);
@@ -2384,7 +2384,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->bottom)
return false;
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
psb_intel_sdvo_connector->bottom,
psb_intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2416,7 +2416,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->dot_crawl)
return false;
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
psb_intel_sdvo_connector->dot_crawl,
psb_intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
diff --git a/trunk/drivers/gpu/drm/i2c/ch7006_drv.c b/trunk/drivers/gpu/drm/i2c/ch7006_drv.c
index b865d0728e28..599099fe76e3 100644
--- a/trunk/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/trunk/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -214,7 +214,7 @@ static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encod
else
priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
- drm_object_property_set_value(&connector->base,
+ drm_connector_property_set_value(connector,
encoder->dev->mode_config.tv_subconnector_property,
priv->subconnector);
@@ -254,23 +254,23 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
- drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
+ drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
priv->select_subconnector);
- drm_object_attach_property(&connector->base, conf->tv_subconnector_property,
+ drm_connector_attach_property(connector, conf->tv_subconnector_property,
priv->subconnector);
- drm_object_attach_property(&connector->base, conf->tv_left_margin_property,
+ drm_connector_attach_property(connector, conf->tv_left_margin_property,
priv->hmargin);
- drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
+ drm_connector_attach_property(connector, conf->tv_bottom_margin_property,
priv->vmargin);
- drm_object_attach_property(&connector->base, conf->tv_mode_property,
+ drm_connector_attach_property(connector, conf->tv_mode_property,
priv->norm);
- drm_object_attach_property(&connector->base, conf->tv_brightness_property,
+ drm_connector_attach_property(connector, conf->tv_brightness_property,
priv->brightness);
- drm_object_attach_property(&connector->base, conf->tv_contrast_property,
+ drm_connector_attach_property(connector, conf->tv_contrast_property,
priv->contrast);
- drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property,
+ drm_connector_attach_property(connector, conf->tv_flicker_reduction_property,
priv->flicker);
- drm_object_attach_property(&connector->base, priv->scale_property,
+ drm_connector_attach_property(connector, priv->scale_property,
priv->scale);
return 0;
diff --git a/trunk/drivers/gpu/drm/i915/i915_debugfs.c b/trunk/drivers/gpu/drm/i915/i915_debugfs.c
index 4568e7d8a060..dde8b505bf7f 100644
--- a/trunk/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/trunk/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1068,7 +1068,7 @@ static int gen6_drpc_info(struct seq_file *m)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
+ u32 rpmodectl1, gt_core_status, rcctl1;
unsigned forcewake_count;
int count=0, ret;
@@ -1097,9 +1097,6 @@ static int gen6_drpc_info(struct seq_file *m)
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
mutex_unlock(&dev->struct_mutex);
- mutex_lock(&dev_priv->rps.hw_lock);
- sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
- mutex_unlock(&dev_priv->rps.hw_lock);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
@@ -1151,12 +1148,6 @@ static int gen6_drpc_info(struct seq_file *m)
seq_printf(m, "RC6++ residency since boot: %u\n",
I915_READ(GEN6_GT_GFX_RC6pp));
- seq_printf(m, "RC6 voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
- seq_printf(m, "RC6+ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
- seq_printf(m, "RC6++ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
return 0;
}
@@ -1282,7 +1273,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0;
}
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
@@ -1291,14 +1282,19 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
for (gpu_freq = dev_priv->rps.min_delay;
gpu_freq <= dev_priv->rps.max_delay;
gpu_freq++) {
- ia_freq = gpu_freq;
- sandybridge_pcode_read(dev_priv,
- GEN6_PCODE_READ_MIN_FREQ_TABLE,
- &ia_freq);
+ I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
+ GEN6_PCODE_READ_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
+ GEN6_PCODE_READY) == 0, 10)) {
+ DRM_ERROR("pcode read of freq table timed out\n");
+ continue;
+ }
+ ia_freq = I915_READ(GEN6_PCODE_DATA);
seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1402,15 +1398,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- if (dev_priv->ips.pwrctx) {
+ if (dev_priv->pwrctx) {
seq_printf(m, "power context ");
- describe_obj(m, dev_priv->ips.pwrctx);
+ describe_obj(m, dev_priv->pwrctx);
seq_printf(m, "\n");
}
- if (dev_priv->ips.renderctx) {
+ if (dev_priv->renderctx) {
seq_printf(m, "render context ");
- describe_obj(m, dev_priv->ips.renderctx);
+ describe_obj(m, dev_priv->renderctx);
seq_printf(m, "\n");
}
@@ -1715,13 +1711,13 @@ i915_max_freq_read(struct file *filp,
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf),
"max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1756,7 +1752,7 @@ i915_max_freq_write(struct file *filp,
DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
@@ -1766,7 +1762,7 @@ i915_max_freq_write(struct file *filp,
dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev->struct_mutex);
return cnt;
}
@@ -1791,13 +1787,13 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf),
"min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -1830,7 +1826,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
@@ -1840,7 +1836,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev->struct_mutex);
return cnt;
}
diff --git a/trunk/drivers/gpu/drm/i915/i915_dma.c b/trunk/drivers/gpu/drm/i915/i915_dma.c
index a48e4910ea2c..61ae104dca8c 100644
--- a/trunk/drivers/gpu/drm/i915/i915_dma.c
+++ b/trunk/drivers/gpu/drm/i915/i915_dma.c
@@ -103,6 +103,32 @@ static void i915_write_hws_pga(struct drm_device *dev)
I915_WRITE(HWS_PGA, addr);
}
+/**
+ * Sets up the hardware status page for devices that need a physical address
+ * in the register.
+ */
+static int i915_init_phys_hws(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* Program Hardware Status Page */
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
+
+ if (!dev_priv->status_page_dmah) {
+ DRM_ERROR("Can not allocate hardware status page\n");
+ return -ENOMEM;
+ }
+
+ memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
+ 0, PAGE_SIZE);
+
+ i915_write_hws_pga(dev);
+
+ DRM_DEBUG_DRIVER("Enabled hardware status page\n");
+ return 0;
+}
+
/**
* Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server.
@@ -425,16 +451,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- dev_priv->dri1.counter++;
- if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
- dev_priv->dri1.counter = 0;
+ dev_priv->counter++;
+ if (dev_priv->counter > 0x7FFFFFFFUL)
+ dev_priv->counter = 0;
if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
+ master_priv->sarea_priv->last_enqueue = dev_priv->counter;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->dri1.counter);
+ OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
@@ -576,12 +602,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
ADVANCE_LP_RING();
- master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
+ master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->dri1.counter);
+ OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
@@ -749,21 +775,21 @@ static int i915_emit_irq(struct drm_device * dev)
DRM_DEBUG_DRIVER("\n");
- dev_priv->dri1.counter++;
- if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
- dev_priv->dri1.counter = 1;
+ dev_priv->counter++;
+ if (dev_priv->counter > 0x7FFFFFFFUL)
+ dev_priv->counter = 1;
if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
+ master_priv->sarea_priv->last_enqueue = dev_priv->counter;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->dri1.counter);
+ OUT_RING(dev_priv->counter);
OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
}
- return dev_priv->dri1.counter;
+ return dev_priv->counter;
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -794,7 +820,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
- READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
+ READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
}
return ret;
@@ -988,9 +1014,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1;
break;
- case I915_PARAM_HAS_SECURE_BATCHES:
- value = capable(CAP_SYS_ADMIN);
- break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1303,8 +1326,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
- INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
-
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
@@ -1470,9 +1491,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
- ret = i915_gem_gtt_init(dev);
- if (ret)
+ ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
+ if (!ret) {
+ DRM_ERROR("failed to set up gmch\n");
+ ret = -EIO;
goto put_bridge;
+ }
+
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ ret = -ENODEV;
+ goto put_gmch;
+ }
if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_kick_out_firmware_fb(dev_priv);
@@ -1559,10 +1590,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_setup_gmbus(dev);
intel_opregion_setup(dev);
+ /* Make sure the bios did its job and set up vital registers */
intel_setup_bios(dev);
i915_gem_load(dev);
+ /* Init HWS */
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ret = i915_init_phys_hws(dev);
+ if (ret)
+ goto out_gem_unload;
+ }
+
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
@@ -1582,8 +1621,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->dpio_lock);
- mutex_init(&dev_priv->rps.hw_lock);
-
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
@@ -1641,7 +1678,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
- i915_gem_gtt_fini(dev);
+ intel_gmch_remove();
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@@ -1684,7 +1721,6 @@ int i915_driver_unload(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
- cancel_work_sync(&dev_priv->console_resume_work);
/*
* free the memory space allocated for the child device
diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c
index 6745c7f976db..6770ee6084b4 100644
--- a/trunk/drivers/gpu/drm/i915/i915_drv.c
+++ b/trunk/drivers/gpu/drm/i915/i915_drv.c
@@ -47,11 +47,11 @@ MODULE_PARM_DESC(modeset,
unsigned int i915_fbpercrtc __always_unused = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
-int i915_panel_ignore_lid __read_mostly = 1;
+int i915_panel_ignore_lid __read_mostly = 0;
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
MODULE_PARM_DESC(panel_ignore_lid,
- "Override lid status (0=autodetect, 1=autodetect disabled [default], "
- "-1=force lid closed, -2=force lid open)");
+ "Override lid status (0=autodetect [default], 1=lid open, "
+ "-1=lid closed)");
unsigned int i915_powersave __read_mostly = 1;
module_param_named(powersave, i915_powersave, int, 0600);
@@ -396,6 +396,12 @@ static const struct pci_device_id pciidlist[] = { /* aka */
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -410,36 +416,26 @@ void intel_detect_pch(struct drm_device *dev)
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (pch) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
- unsigned short id;
+ int id;
id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
- dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN5(dev));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev));
- } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_type = PCH_LPT;
- dev_priv->num_pch_pll = 0;
- DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev));
}
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
@@ -481,8 +477,6 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
- cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
-
intel_modeset_disable(dev);
drm_irq_uninstall(dev);
@@ -532,23 +526,17 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
return 0;
}
-void intel_console_resume(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- console_resume_work);
- struct drm_device *dev = dev_priv->dev;
-
- console_lock();
- intel_fbdev_set_suspend(dev, 0);
- console_unlock();
-}
-
-static int __i915_drm_thaw(struct drm_device *dev)
+static int i915_drm_thaw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
i915_restore_state(dev);
intel_opregion_setup(dev);
@@ -565,6 +553,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
intel_modeset_init_hw(dev);
intel_modeset_setup_hw_state(dev);
+ drm_mode_config_reset(dev);
drm_irq_install(dev);
}
@@ -572,41 +561,14 @@ static int __i915_drm_thaw(struct drm_device *dev)
dev_priv->modeset_on_lid = 0;
- /*
- * The console lock can be pretty contented on resume due
- * to all the printk activity. Try to keep it out of the hot
- * path of resume if possible.
- */
- if (console_trylock()) {
- intel_fbdev_set_suspend(dev, 0);
- console_unlock();
- } else {
- schedule_work(&dev_priv->console_resume_work);
- }
-
- return error;
-}
-
-static int i915_drm_thaw(struct drm_device *dev)
-{
- int error = 0;
-
- intel_gt_reset(dev);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
- mutex_unlock(&dev->struct_mutex);
- }
-
- __i915_drm_thaw(dev);
-
+ console_lock();
+ intel_fbdev_set_suspend(dev, 0);
+ console_unlock();
return error;
}
int i915_resume(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -617,20 +579,7 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
- intel_gt_reset(dev);
-
- /*
- * Platforms with opregion should have sane BIOS, older ones (gen3 and
- * earlier) need this since the BIOS might clear all our scratch PTEs.
- */
- if (drm_core_check_feature(dev, DRIVER_MODESET) &&
- !dev_priv->opregion.header) {
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
- mutex_unlock(&dev->struct_mutex);
- }
-
- ret = __i915_drm_thaw(dev);
+ ret = i915_drm_thaw(dev);
if (ret)
return ret;
@@ -884,7 +833,7 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
- if (intel_info->is_valleyview)
+ if (intel_info->is_haswell || intel_info->is_valleyview)
if(!i915_preliminary_hw_support) {
DRM_ERROR("Preliminary hardware support disabled\n");
return -ENODEV;
@@ -1191,40 +1140,12 @@ static bool IS_DISPLAYREG(u32 reg)
if (reg == GEN6_GDRST)
return false;
- switch (reg) {
- case _3D_CHICKEN3:
- case IVB_CHICKEN3:
- case GEN7_COMMON_SLICE_CHICKEN1:
- case GEN7_L3CNTLREG1:
- case GEN7_L3_CHICKEN_MODE_REGISTER:
- case GEN7_ROW_CHICKEN2:
- case GEN7_L3SQCREG4:
- case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
- case GEN7_HALF_SLICE_CHICKEN1:
- case GEN6_MBCTL:
- case GEN6_UCGCTL2:
- return false;
- default:
- break;
- }
-
return true;
}
-static void
-ilk_dummy_write(struct drm_i915_private *dev_priv)
-{
- /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
- * chip from rc6 before touching it for real. MI_MODE is masked, hence
- * harmless to write 0 into. */
- I915_WRITE_NOTRACE(MI_MODE, 0);
-}
-
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
- if (IS_GEN5(dev_priv->dev)) \
- ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
unsigned long irqflags; \
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
@@ -1256,12 +1177,6 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- if (IS_GEN5(dev_priv->dev)) \
- ilk_dummy_write(dev_priv); \
- if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
- DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
- I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
- } \
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
write##y(val, dev_priv->regs + reg + 0x180000); \
} else { \
diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.h b/trunk/drivers/gpu/drm/i915/i915_drv.h
index 87c06f97fa89..f511fa2f4168 100644
--- a/trunk/drivers/gpu/drm/i915/i915_drv.h
+++ b/trunk/drivers/gpu/drm/i915/i915_drv.h
@@ -58,14 +58,6 @@ enum pipe {
};
#define pipe_name(p) ((p) + 'A')
-enum transcoder {
- TRANSCODER_A = 0,
- TRANSCODER_B,
- TRANSCODER_C,
- TRANSCODER_EDP = 0xF,
-};
-#define transcoder_name(t) ((t) + 'A')
-
enum plane {
PLANE_A = 0,
PLANE_B,
@@ -101,12 +93,6 @@ struct intel_pch_pll {
};
#define I915_NUM_PLLS 2
-struct intel_ddi_plls {
- int spll_refcount;
- int wrpll1_refcount;
- int wrpll2_refcount;
-};
-
/* Interface history:
*
* 1.1: Original.
@@ -137,6 +123,14 @@ struct drm_i915_gem_phys_object {
struct drm_i915_gem_object *cur_obj;
};
+struct mem_block {
+ struct mem_block *next;
+ struct mem_block *prev;
+ int start;
+ int size;
+ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
+};
+
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
@@ -257,7 +251,6 @@ struct drm_i915_display_funcs {
uint32_t sprite_width, int pixel_size);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
- void (*modeset_global_resources)(struct drm_device *dev);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -270,6 +263,7 @@ struct drm_i915_display_funcs {
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
+ void (*init_pch_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj);
@@ -344,7 +338,6 @@ struct intel_device_info {
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
- struct drm_device *dev;
unsigned num_pd_entries;
struct page **pt_pages;
uint32_t pd_offset;
@@ -390,18 +383,154 @@ struct intel_fbc_work;
struct intel_gmbus {
struct i2c_adapter adapter;
- u32 force_bit;
+ bool force_bit;
u32 reg0;
u32 gpio_reg;
struct i2c_algo_bit_data bit_algo;
struct drm_i915_private *dev_priv;
};
-struct i915_suspend_saved_registers {
+typedef struct drm_i915_private {
+ struct drm_device *dev;
+
+ const struct intel_device_info *info;
+
+ int relative_constants_mode;
+
+ void __iomem *regs;
+
+ struct drm_i915_gt_funcs gt;
+ /** gt_fifo_count and the subsequent register write are synchronized
+ * with dev->struct_mutex. */
+ unsigned gt_fifo_count;
+ /** forcewake_count is protected by gt_lock */
+ unsigned forcewake_count;
+ /** gt_lock is also taken in irq contexts. */
+ struct spinlock gt_lock;
+
+ struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
+ /** gmbus_mutex protects against concurrent usage of the single hw gmbus
+ * controller on different i2c buses. */
+ struct mutex gmbus_mutex;
+
+ /**
+ * Base address of the gmbus and gpio block.
+ */
+ uint32_t gpio_mmio_base;
+
+ struct pci_dev *bridge_dev;
+ struct intel_ring_buffer ring[I915_NUM_RINGS];
+ uint32_t next_seqno;
+
+ drm_dma_handle_t *status_page_dmah;
+ uint32_t counter;
+ struct drm_i915_gem_object *pwrctx;
+ struct drm_i915_gem_object *renderctx;
+
+ struct resource mch_res;
+
+ atomic_t irq_received;
+
+ /* protects the irq masks */
+ spinlock_t irq_lock;
+
+ /* DPIO indirect register protection */
+ spinlock_t dpio_lock;
+
+ /** Cached value of IMR to avoid reads in updating the bitfield */
+ u32 pipestat[2];
+ u32 irq_mask;
+ u32 gt_irq_mask;
+ u32 pch_irq_mask;
+
+ u32 hotplug_supported_mask;
+ struct work_struct hotplug_work;
+
+ int num_pipe;
+ int num_pch_pll;
+
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+ struct timer_list hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd[I915_NUM_RINGS];
+ uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
+
+ unsigned int stop_rings;
+
+ unsigned long cfb_size;
+ unsigned int cfb_fb;
+ enum plane cfb_plane;
+ int cfb_y;
+ struct intel_fbc_work *fbc_work;
+
+ struct intel_opregion opregion;
+
+ /* overlay */
+ struct intel_overlay *overlay;
+ bool sprite_scaling_enabled;
+
+ /* LVDS info */
+ int backlight_level; /* restore backlight to this value */
+ bool backlight_enabled;
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+ /* Feature bits from the VBIOS */
+ unsigned int int_tv_support:1;
+ unsigned int lvds_dither:1;
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int display_clock_mode:1;
+ int lvds_ssc_freq;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ unsigned int lvds_val; /* used for checking LVDS channel mode */
+ struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
+ bool no_aux_handshake;
+
+ struct notifier_block lid_notifier;
+
+ int crt_ddc_pin;
+ struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+ int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
+ unsigned int fsb_freq, mem_freq, is_ddr3;
+
+ spinlock_t error_lock;
+ /* Protected by dev->error_lock. */
+ struct drm_i915_error_state *first_error;
+ struct work_struct error_work;
+ struct completion error_completion;
+ struct workqueue_struct *wq;
+
+ /* Display functions */
+ struct drm_i915_display_funcs display;
+
+ /* PCH chipset type */
+ enum intel_pch pch_type;
+
+ unsigned long quirks;
+
+ /* Register state */
+ bool modeset_on_lid;
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
+ u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
@@ -547,206 +676,10 @@ struct i915_suspend_saved_registers {
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
-};
-
-struct intel_gen6_power_mgmt {
- struct work_struct work;
- u32 pm_iir;
- /* lock - irqsave spinlock that protectects the work_struct and
- * pm_iir. */
- spinlock_t lock;
-
- /* The below variables an all the rps hw state are protected by
- * dev->struct mutext. */
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
-
- struct delayed_work delayed_resume_work;
-
- /*
- * Protects RPS/RC6 register access and PCU communication.
- * Must be taken after struct_mutex if nested.
- */
- struct mutex hw_lock;
-};
-
-struct intel_ilk_power_mgmt {
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- u8 fmax;
- u8 fstart;
-
- u64 last_count1;
- unsigned long last_time1;
- unsigned long chipset_power;
- u64 last_count2;
- struct timespec last_time2;
- unsigned long gfx_power;
- u8 corr;
-
- int c_m;
- int r_t;
-
- struct drm_i915_gem_object *pwrctx;
- struct drm_i915_gem_object *renderctx;
-};
-
-struct i915_dri1_state {
- unsigned allow_batchbuffer : 1;
- u32 __iomem *gfx_hws_cpu_addr;
-
- unsigned int cpp;
- int back_offset;
- int front_offset;
- int current_page;
- int page_flipping;
-
- uint32_t counter;
-};
-
-struct intel_l3_parity {
- u32 *remap_info;
- struct work_struct error_work;
-};
-
-typedef struct drm_i915_private {
- struct drm_device *dev;
-
- const struct intel_device_info *info;
-
- int relative_constants_mode;
-
- void __iomem *regs;
-
- struct drm_i915_gt_funcs gt;
- /** gt_fifo_count and the subsequent register write are synchronized
- * with dev->struct_mutex. */
- unsigned gt_fifo_count;
- /** forcewake_count is protected by gt_lock */
- unsigned forcewake_count;
- /** gt_lock is also taken in irq contexts. */
- struct spinlock gt_lock;
-
- struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
-
- /** gmbus_mutex protects against concurrent usage of the single hw gmbus
- * controller on different i2c buses. */
- struct mutex gmbus_mutex;
-
- /**
- * Base address of the gmbus and gpio block.
- */
- uint32_t gpio_mmio_base;
-
- struct pci_dev *bridge_dev;
- struct intel_ring_buffer ring[I915_NUM_RINGS];
- uint32_t next_seqno;
-
- drm_dma_handle_t *status_page_dmah;
- struct resource mch_res;
-
- atomic_t irq_received;
-
- /* protects the irq masks */
- spinlock_t irq_lock;
-
- /* DPIO indirect register protection */
- spinlock_t dpio_lock;
-
- /** Cached value of IMR to avoid reads in updating the bitfield */
- u32 pipestat[2];
- u32 irq_mask;
- u32 gt_irq_mask;
- u32 pch_irq_mask;
-
- u32 hotplug_supported_mask;
- struct work_struct hotplug_work;
-
- int num_pipe;
- int num_pch_pll;
-
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
- struct timer_list hangcheck_timer;
- int hangcheck_count;
- uint32_t last_acthd[I915_NUM_RINGS];
- uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
-
- unsigned int stop_rings;
-
- unsigned long cfb_size;
- unsigned int cfb_fb;
- enum plane cfb_plane;
- int cfb_y;
- struct intel_fbc_work *fbc_work;
-
- struct intel_opregion opregion;
-
- /* overlay */
- struct intel_overlay *overlay;
- bool sprite_scaling_enabled;
-
- /* LVDS info */
- int backlight_level; /* restore backlight to this value */
- bool backlight_enabled;
- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
-
- /* Feature bits from the VBIOS */
- unsigned int int_tv_support:1;
- unsigned int lvds_dither:1;
- unsigned int lvds_vbt:1;
- unsigned int int_crt_support:1;
- unsigned int lvds_use_ssc:1;
- unsigned int display_clock_mode:1;
- int lvds_ssc_freq;
- unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
- unsigned int lvds_val; /* used for checking LVDS channel mode */
- struct {
- int rate;
- int lanes;
- int preemphasis;
- int vswing;
-
- bool initialized;
- bool support;
- int bpp;
- struct edp_power_seq pps;
- } edp;
- bool no_aux_handshake;
-
- int crt_ddc_pin;
- struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
- int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
- int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-
- unsigned int fsb_freq, mem_freq, is_ddr3;
-
- spinlock_t error_lock;
- /* Protected by dev->error_lock. */
- struct drm_i915_error_state *first_error;
- struct work_struct error_work;
- struct completion error_completion;
- struct workqueue_struct *wq;
-
- /* Display functions */
- struct drm_i915_display_funcs display;
-
- /* PCH chipset type */
- enum intel_pch pch_type;
- unsigned short pch_id;
-
- unsigned long quirks;
-
- /* Register state */
- bool modeset_on_lid;
struct {
/** Bridge to intel-gtt-ko */
- struct intel_gtt *gtt;
+ const struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Memory allocator for GTT */
@@ -773,6 +706,8 @@ typedef struct drm_i915_private {
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
+ u32 *l3_remap_info;
+
struct shrinker inactive_shrinker;
/**
@@ -850,6 +785,19 @@ typedef struct drm_i915_private {
u32 object_count;
} mm;
+ /* Old dri1 support infrastructure, beware the dragons ya fools entering
+ * here! */
+ struct {
+ unsigned allow_batchbuffer : 1;
+ u32 __iomem *gfx_hws_cpu_addr;
+
+ unsigned int cpp;
+ int back_offset;
+ int front_offset;
+ int current_page;
+ int page_flipping;
+ } dri1;
+
/* Kernel Modesetting */
struct sdvo_device_mapping sdvo_mappings[2];
@@ -863,7 +811,6 @@ typedef struct drm_i915_private {
wait_queue_head_t pending_flip_queue;
struct intel_pch_pll pch_plls[I915_NUM_PLLS];
- struct intel_ddi_plls ddi_plls;
/* Reclocking support */
bool render_reclock_avail;
@@ -873,17 +820,46 @@ typedef struct drm_i915_private {
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
+ struct drm_connector *int_lvds_connector;
+ struct drm_connector *int_edp_connector;
bool mchbar_need_disable;
- struct intel_l3_parity l3_parity;
-
/* gen6+ rps state */
- struct intel_gen6_power_mgmt rps;
+ struct {
+ struct work_struct work;
+ u32 pm_iir;
+ /* lock - irqsave spinlock that protectects the work_struct and
+ * pm_iir. */
+ spinlock_t lock;
+
+ /* The below variables an all the rps hw state are protected by
+ * dev->struct mutext. */
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ } rps;
/* ilk-only ips/rps state. Everything in here is protected by the global
* mchdev_lock in intel_pm.c */
- struct intel_ilk_power_mgmt ips;
+ struct {
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c_m;
+ int r_t;
+ } ips;
enum no_fbc_reason no_fbc_reason;
@@ -895,25 +871,14 @@ typedef struct drm_i915_private {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
- /*
- * The console may be contended at resume, but we don't
- * want it to block on it.
- */
- struct work_struct console_resume_work;
-
struct backlight_device *backlight;
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
+ struct work_struct parity_error_work;
bool hw_contexts_disabled;
uint32_t hw_context_size;
-
- struct i915_suspend_saved_registers regfile;
-
- /* Old dri1 support infrastructure, beware the dragons ya fools entering
- * here! */
- struct i915_dri1_state dri1;
} drm_i915_private_t;
/* Iterate over initialised rings */
@@ -1155,14 +1120,9 @@ struct drm_i915_file_private {
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
- (dev)->pci_device == 0x0152 || \
- (dev)->pci_device == 0x015a)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-#define IS_ULT(dev) (IS_HASWELL(dev) && \
- ((dev)->pci_device & 0xFF00) == 0x0A00)
/*
* The genX designation typically refers to the render engine, so render
@@ -1208,13 +1168,6 @@ struct drm_i915_file_private {
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
-#define INTEL_PCH_DEVICE_ID_MASK 0xff00
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
-#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
-
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@@ -1297,7 +1250,6 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
-extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
@@ -1305,7 +1257,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
-extern void intel_gt_reset(struct drm_device *dev);
void i915_error_state_free(struct kref *error_ref);
@@ -1548,14 +1499,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end);
-int i915_gem_gtt_init(struct drm_device *dev);
-void i915_gem_gtt_fini(struct drm_device *dev);
-static inline void i915_gem_chipset_flush(struct drm_device *dev)
-{
- if (INTEL_INFO(dev)->gen < 6)
- intel_gtt_chipset_flush();
-}
-
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
@@ -1685,9 +1628,6 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
-
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c
index b0016bb65631..107f09befe92 100644
--- a/trunk/drivers/gpu/drm/i915/i915_gem.c
+++ b/trunk/drivers/gpu/drm/i915/i915_gem.c
@@ -845,12 +845,12 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* domain anymore. */
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
- i915_gem_chipset_flush(dev);
+ intel_gtt_chipset_flush();
}
}
if (needs_clflush_after)
- i915_gem_chipset_flush(dev);
+ intel_gtt_chipset_flush();
return ret;
}
@@ -1345,17 +1345,30 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write);
/* Now bind it into the GTT if needed */
- ret = i915_gem_object_pin(obj, 0, true, false);
- if (ret)
- goto unlock;
+ if (!obj->map_and_fenceable) {
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ goto unlock;
+ }
+ if (!obj->gtt_space) {
+ ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
+ if (ret)
+ goto unlock;
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto unpin;
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
+ }
+
+ if (!obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
ret = i915_gem_object_get_fence(obj);
if (ret)
- goto unpin;
+ goto unlock;
+
+ if (i915_gem_object_is_inactive(obj))
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
obj->fault_mappable = true;
@@ -1364,8 +1377,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
-unpin:
- i915_gem_object_unpin(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
out:
@@ -2011,12 +2022,12 @@ i915_add_request(struct intel_ring_buffer *ring,
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
mod_timer(&dev_priv->hangcheck_timer,
- round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
+ jiffies +
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
if (was_empty) {
queue_delayed_work(dev_priv->wq,
- &dev_priv->mm.retire_work,
- round_jiffies_up_relative(HZ));
+ &dev_priv->mm.retire_work, HZ);
intel_mark_busy(dev_priv->dev);
}
}
@@ -2207,8 +2218,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
/* Come back later if the device is busy... */
if (!mutex_trylock(&dev->struct_mutex)) {
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
- round_jiffies_up_relative(HZ));
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
return;
}
@@ -2226,8 +2236,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
}
if (!dev_priv->mm.suspended && !idle)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
- round_jiffies_up_relative(HZ));
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
if (idle)
intel_mark_idle(dev);
@@ -2914,14 +2923,13 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- i915_gem_object_pin_pages(obj);
-
search_free:
if (map_and_fenceable)
- free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
- size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end,
- false);
+ free_space =
+ drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
+ size, alignment, obj->cache_level,
+ 0, dev_priv->mm.gtt_mappable_end,
+ false);
else
free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
size, alignment, obj->cache_level,
@@ -2929,60 +2937,60 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (free_space != NULL) {
if (map_and_fenceable)
- free_space =
+ obj->gtt_space =
drm_mm_get_block_range_generic(free_space,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
false);
else
- free_space =
+ obj->gtt_space =
drm_mm_get_block_generic(free_space,
size, alignment, obj->cache_level,
false);
}
- if (free_space == NULL) {
+ if (obj->gtt_space == NULL) {
ret = i915_gem_evict_something(dev, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
- if (ret) {
- i915_gem_object_unpin_pages(obj);
+ if (ret)
return ret;
- }
goto search_free;
}
if (WARN_ON(!i915_gem_valid_gtt_space(dev,
- free_space,
+ obj->gtt_space,
obj->cache_level))) {
- i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(free_space);
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
return -EINVAL;
}
+
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
- i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(free_space);
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
return ret;
}
+ if (!dev_priv->mm.aliasing_ppgtt)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- obj->gtt_space = free_space;
- obj->gtt_offset = free_space->start;
+ obj->gtt_offset = obj->gtt_space->start;
fenceable =
- free_space->size == fence_size &&
- (free_space->start & (fence_alignment - 1)) == 0;
+ obj->gtt_space->size == fence_size &&
+ (obj->gtt_space->start & (fence_alignment - 1)) == 0;
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
obj->map_and_fenceable = mappable && fenceable;
- i915_gem_object_unpin_pages(obj);
trace_i915_gem_object_bind(obj, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
@@ -3051,7 +3059,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
return;
i915_gem_clflush_object(obj);
- i915_gem_chipset_flush(obj->base.dev);
+ intel_gtt_chipset_flush();
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3446,16 +3454,11 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
if (obj->gtt_space == NULL) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
ret = i915_gem_object_bind_to_gtt(obj, alignment,
map_and_fenceable,
nonblocking);
if (ret)
return ret;
-
- if (!dev_priv->mm.aliasing_ppgtt)
- i915_gem_gtt_bind_object(obj, obj->cache_level);
}
if (!obj->has_global_gtt_mapping && map_and_fenceable)
@@ -3829,7 +3832,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
if (!IS_IVYBRIDGE(dev))
return;
- if (!dev_priv->l3_parity.remap_info)
+ if (!dev_priv->mm.l3_remap_info)
return;
misccpctl = I915_READ(GEN7_MISCCPCTL);
@@ -3838,12 +3841,12 @@ void i915_gem_l3_remap(struct drm_device *dev)
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
- if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
+ if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
DRM_DEBUG("0x%x was already programmed to %x\n",
GEN7_L3LOG_BASE + i, remap);
- if (remap && !dev_priv->l3_parity.remap_info[i/4])
+ if (remap && !dev_priv->mm.l3_remap_info[i/4])
DRM_DEBUG_DRIVER("Clearing remapped register\n");
- I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
+ I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
}
/* Make sure all the writes land before disabling dop clock gating */
@@ -3873,6 +3876,68 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
}
+void i915_gem_init_ppgtt(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t pd_offset;
+ struct intel_ring_buffer *ring;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ uint32_t __iomem *pd_addr;
+ uint32_t pd_entry;
+ int i;
+
+ if (!dev_priv->mm.aliasing_ppgtt)
+ return;
+
+
+ pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
+ if (dev_priv->mm.gtt->needs_dmar)
+ pt_addr = ppgtt->pt_dma_addr[i];
+ else
+ pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+ pd_entry |= GEN6_PDE_VALID;
+
+ writel(pd_entry, pd_addr + i);
+ }
+ readl(pd_addr);
+
+ pd_offset = ppgtt->pd_offset;
+ pd_offset /= 64; /* in cachelines, */
+ pd_offset <<= 16;
+
+ if (INTEL_INFO(dev)->gen == 6) {
+ uint32_t ecochk, gab_ctl, ecobits;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+ gab_ctl = I915_READ(GAB_CTL);
+ I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ ecochk = I915_READ(GAM_ECOCHK);
+ I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
+ ECOCHK_PPGTT_CACHE64B);
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
+ /* GFX_MODE is per-ring on gen7+ */
+ }
+
+ for_each_ring(ring, dev_priv, i) {
+ if (INTEL_INFO(dev)->gen >= 7)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
+ }
+}
+
static bool
intel_enable_blt(struct drm_device *dev)
{
@@ -3895,7 +3960,7 @@ i915_gem_init_hw(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+ if (!intel_enable_gtt())
return -EIO;
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
@@ -4230,7 +4295,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_cache_release(page);
}
}
- i915_gem_chipset_flush(dev);
+ intel_gtt_chipset_flush();
obj->phys_obj->cur_obj = NULL;
obj->phys_obj = NULL;
@@ -4317,7 +4382,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
return -EFAULT;
}
- i915_gem_chipset_flush(dev);
+ intel_gtt_chipset_flush();
return 0;
}
@@ -4342,19 +4407,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
spin_unlock(&file_priv->mm.lock);
}
-static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
-{
- if (!mutex_is_locked(mutex))
- return false;
-
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
- return mutex->owner == task;
-#else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
- return false;
-#endif
-}
-
static int
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
{
@@ -4365,15 +4417,10 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan;
- bool unlock = true;
int cnt;
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return 0;
-
- unlock = false;
- }
+ if (!mutex_trylock(&dev->struct_mutex))
+ return 0;
if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
@@ -4389,7 +4436,6 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
return cnt;
}
diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_context.c b/trunk/drivers/gpu/drm/i915/i915_gem_context.c
index 0e510df80d73..05ed42f203d7 100644
--- a/trunk/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/trunk/drivers/gpu/drm/i915/i915_gem_context.c
@@ -146,7 +146,7 @@ create_hw_context(struct drm_device *dev,
struct i915_hw_context *ctx;
int ret, id;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 48e4317e72dc..3eea143749f6 100644
--- a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -128,6 +128,15 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_i915_obj->cache_level);
}
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (unlikely(target_offset == 0)) {
+ DRM_DEBUG("No GTT space found for object %d\n",
+ reloc->target_handle);
+ return ret;
+ }
+
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
DRM_DEBUG("reloc with multiple write domains: "
@@ -663,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
}
if (flush_domains & I915_GEM_DOMAIN_CPU)
- i915_gem_chipset_flush(ring->dev);
+ intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@@ -791,7 +800,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
u32 exec_start, exec_len;
u32 seqno;
u32 mask;
- u32 flags;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
@@ -803,14 +811,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
return ret;
- flags = 0;
- if (args->flags & I915_EXEC_SECURE) {
- if (!file->is_master || !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- flags |= I915_DISPATCH_SECURE;
- }
-
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
@@ -983,13 +983,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
- /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
- * batch" bit. Hence we need to pin secure batches into the global gtt.
- * hsw should have this fixed, but let's be paranoid and do it
- * unconditionally for now. */
- if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
- i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
-
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
if (ret)
goto err;
@@ -1035,7 +1028,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- trace_i915_gem_ring_dispatch(ring, seqno, flags);
+ trace_i915_gem_ring_dispatch(ring, seqno);
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
@@ -1047,15 +1040,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len,
- flags);
+ exec_start, exec_len);
if (ret)
goto err;
}
} else {
- ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len,
- flags);
+ ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
if (ret)
goto err;
}
diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c
index f7ac61ee1504..df470b5e8d36 100644
--- a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,67 +28,19 @@
#include "i915_trace.h"
#include "intel_drv.h"
-typedef uint32_t gtt_pte_t;
-
-/* PPGTT stuff */
-#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
-
-#define GEN6_PDE_VALID (1 << 0)
-/* gen6+ has bit 11-4 for physical addr bit 39-32 */
-#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
-#define GEN6_PTE_VALID (1 << 0)
-#define GEN6_PTE_UNCACHED (1 << 1)
-#define HSW_PTE_UNCACHED (0)
-#define GEN6_PTE_CACHE_LLC (2 << 1)
-#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
-#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
-
-static inline gtt_pte_t pte_encode(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level)
-{
- gtt_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
-
- switch (level) {
- case I915_CACHE_LLC_MLC:
- /* Haswell doesn't set L3 this way */
- if (IS_HASWELL(dev))
- pte |= GEN6_PTE_CACHE_LLC;
- else
- pte |= GEN6_PTE_CACHE_LLC_MLC;
- break;
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- if (IS_HASWELL(dev))
- pte |= HSW_PTE_UNCACHED;
- else
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- BUG();
- }
-
-
- return pte;
-}
-
/* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
- gtt_pte_t *pt_vaddr;
- gtt_pte_t scratch_pte;
+ uint32_t *pt_vaddr;
+ uint32_t scratch_pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
- I915_CACHE_LLC);
+ scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
+ scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -125,7 +77,6 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (!ppgtt)
return ret;
- ppgtt->dev = dev;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
@@ -167,7 +118,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
i915_ppgtt_clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
- ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
+ ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
dev_priv->mm.aliasing_ppgtt = ppgtt;
@@ -217,9 +168,9 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
const struct sg_table *pages,
unsigned first_entry,
- enum i915_cache_level cache_level)
+ uint32_t pte_flags)
{
- gtt_pte_t *pt_vaddr;
+ uint32_t *pt_vaddr, pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned i, j, m, segment_len;
@@ -237,8 +188,8 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
- cache_level);
+ pte = GEN6_PTE_ADDR_ENCODE(page_addr);
+ pt_vaddr[j] = pte | pte_flags;
/* grab the next page */
if (++m == segment_len) {
@@ -262,10 +213,29 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
+ uint32_t pte_flags = GEN6_PTE_VALID;
+
+ switch (cache_level) {
+ case I915_CACHE_LLC_MLC:
+ pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
+ break;
+ case I915_CACHE_LLC:
+ pte_flags |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ if (IS_HASWELL(obj->base.dev))
+ pte_flags |= HSW_PTE_UNCACHED;
+ else
+ pte_flags |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ BUG();
+ }
+
i915_ppgtt_insert_sg_entries(ppgtt,
obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
- cache_level);
+ pte_flags);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -276,65 +246,23 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
obj->base.size >> PAGE_SHIFT);
}
-void i915_gem_init_ppgtt(struct drm_device *dev)
+/* XXX kill agp_type! */
+static unsigned int cache_level_to_agp_type(struct drm_device *dev,
+ enum i915_cache_level cache_level)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t pd_offset;
- struct intel_ring_buffer *ring;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- uint32_t __iomem *pd_addr;
- uint32_t pd_entry;
- int i;
-
- if (!dev_priv->mm.aliasing_ppgtt)
- return;
-
-
- pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
-
- if (dev_priv->mm.gtt->needs_dmar)
- pt_addr = ppgtt->pt_dma_addr[i];
- else
- pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
- pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
- pd_entry |= GEN6_PDE_VALID;
-
- writel(pd_entry, pd_addr + i);
- }
- readl(pd_addr);
-
- pd_offset = ppgtt->pd_offset;
- pd_offset /= 64; /* in cachelines, */
- pd_offset <<= 16;
-
- if (INTEL_INFO(dev)->gen == 6) {
- uint32_t ecochk, gab_ctl, ecobits;
-
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
-
- gab_ctl = I915_READ(GAB_CTL);
- I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
-
- ecochk = I915_READ(GAM_ECOCHK);
- I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
- ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- } else if (INTEL_INFO(dev)->gen >= 7) {
- I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
- /* GFX_MODE is per-ring on gen7+ */
- }
-
- for_each_ring(ring, dev_priv, i) {
- if (INTEL_INFO(dev)->gen >= 7)
- I915_WRITE(RING_MODE_GEN7(ring),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-
- I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
+ switch (cache_level) {
+ case I915_CACHE_LLC_MLC:
+ if (INTEL_INFO(dev)->gen >= 6)
+ return AGP_USER_CACHED_MEMORY_LLC_MLC;
+ /* Older chipsets do not have this extra level of CPU
+ * cacheing, so fallthrough and request the PTE simply
+ * as cached.
+ */
+ case I915_CACHE_LLC:
+ return AGP_USER_CACHED_MEMORY;
+ default:
+ case I915_CACHE_NONE:
+ return AGP_USER_MEMORY;
}
}
@@ -360,40 +288,13 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible;
}
-
-static void i915_ggtt_clear_range(struct drm_device *dev,
- unsigned first_entry,
- unsigned num_entries)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- gtt_pte_t scratch_pte;
- gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
- const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
- int i;
-
- if (INTEL_INFO(dev)->gen < 6) {
- intel_gtt_clear_range(first_entry, num_entries);
- return;
- }
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
- for (i = 0; i < num_entries; i++)
- iowrite32(scratch_pte, >t_base[i]);
- readl(gtt_base);
-}
-
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
- i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
+ intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
@@ -401,7 +302,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
- i915_gem_chipset_flush(dev);
+ intel_gtt_chipset_flush();
}
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
@@ -417,76 +318,21 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0;
}
-/*
- * Binds an object into the global gtt with the specified cache level. The object
- * will be accessible to the GPU via commands whose operands reference offsets
- * within the global GTT as well as accessible by the GPU through the GMADR
- * mapped BAR (dev_priv->mm.gtt->gtt).
- */
-static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level level)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct sg_table *st = obj->pages;
- struct scatterlist *sg = st->sgl;
- const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
- const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
- gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
- int unused, i = 0;
- unsigned int len, m = 0;
- dma_addr_t addr;
-
- for_each_sg(st->sgl, sg, st->nents, unused) {
- len = sg_dma_len(sg) >> PAGE_SHIFT;
- for (m = 0; m < len; m++) {
- addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
- iowrite32(pte_encode(dev, addr, level), >t_entries[i]);
- i++;
- }
- }
-
- BUG_ON(i > max_entries);
- BUG_ON(i != obj->base.size / PAGE_SIZE);
-
- /* XXX: This serves as a posting read to make sure that the PTE has
- * actually been updated. There is some concern that even though
- * registers and PTEs are within the same BAR that they are potentially
- * of NUMA access patterns. Therefore, even with the way we assume
- * hardware should work, we must keep this posting read for paranoia.
- */
- if (i != 0)
- WARN_ON(readl(>t_entries[i-1]) != pte_encode(dev, addr, level));
-
- /* This next bit makes the above posting read even more important. We
- * want to flush the TLBs only after we're certain all the PTE updates
- * have finished.
- */
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
-}
-
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- if (INTEL_INFO(dev)->gen < 6) {
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- intel_gtt_insert_sg_entries(obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- flags);
- } else {
- gen6_ggtt_bind_object(obj, cache_level);
- }
+ unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
+ intel_gtt_insert_sg_entries(obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
- i915_ggtt_clear_range(obj->base.dev,
- obj->gtt_space->start >> PAGE_SHIFT,
+ intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
@@ -544,161 +390,5 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
/* ... but ensure that we clear the entire range. */
- i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
-}
-
-static int setup_scratch_page(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct page *page;
- dma_addr_t dma_addr;
-
- page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
- if (page == NULL)
- return -ENOMEM;
- get_page(page);
- set_pages_uc(page, 1);
-
-#ifdef CONFIG_INTEL_IOMMU
- dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, dma_addr))
- return -EINVAL;
-#else
- dma_addr = page_to_phys(page);
-#endif
- dev_priv->mm.gtt->scratch_page = page;
- dev_priv->mm.gtt->scratch_page_dma = dma_addr;
-
- return 0;
-}
-
-static void teardown_scratch_page(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
- pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- put_page(dev_priv->mm.gtt->scratch_page);
- __free_page(dev_priv->mm.gtt->scratch_page);
-}
-
-static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
-{
- snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
- snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
- return snb_gmch_ctl << 20;
-}
-
-static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
-{
- snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
- snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
- return snb_gmch_ctl << 25; /* 32 MB units */
-}
-
-static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
-{
- static const int stolen_decoder[] = {
- 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
- snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
- snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
- return stolen_decoder[snb_gmch_ctl] << 20;
-}
-
-int i915_gem_gtt_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- phys_addr_t gtt_bus_addr;
- u16 snb_gmch_ctl;
- int ret;
-
- /* On modern platforms we need not worry ourself with the legacy
- * hostbridge query stuff. Skip it entirely
- */
- if (INTEL_INFO(dev)->gen < 6) {
- ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
- if (!ret) {
- DRM_ERROR("failed to set up gmch\n");
- return -EIO;
- }
-
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- intel_gmch_remove();
- return -ENODEV;
- }
- return 0;
- }
-
- dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
- if (!dev_priv->mm.gtt)
- return -ENOMEM;
-
- if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
- pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
-
- /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
- gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
- dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
-
- /* i9xx_setup */
- pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- dev_priv->mm.gtt->gtt_total_entries =
- gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
- if (INTEL_INFO(dev)->gen < 7)
- dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
- else
- dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
-
- dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
- /* 64/512MB is the current min/max we actually know of, but this is just a
- * coarse sanity check.
- */
- if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
- dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
- DRM_ERROR("Unknown GMADR entries (%d)\n",
- dev_priv->mm.gtt->gtt_mappable_entries);
- ret = -ENXIO;
- goto err_out;
- }
-
- ret = setup_scratch_page(dev);
- if (ret) {
- DRM_ERROR("Scratch setup failed\n");
- goto err_out;
- }
-
- dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
- dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
- if (!dev_priv->mm.gtt->gtt) {
- DRM_ERROR("Failed to map the gtt page table\n");
- teardown_scratch_page(dev);
- ret = -ENOMEM;
- goto err_out;
- }
-
- /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
- DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
- DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
- DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
-
- return 0;
-
-err_out:
- kfree(dev_priv->mm.gtt);
- if (INTEL_INFO(dev)->gen < 6)
- intel_gmch_remove();
- return ret;
-}
-
-void i915_gem_gtt_fini(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- iounmap(dev_priv->mm.gtt->gtt);
- teardown_scratch_page(dev);
- if (INTEL_INFO(dev)->gen < 6)
- intel_gmch_remove();
- kfree(dev_priv->mm.gtt);
+ intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
}
diff --git a/trunk/drivers/gpu/drm/i915/i915_irq.c b/trunk/drivers/gpu/drm/i915/i915_irq.c
index 2604867e6b7d..32e1bda865b8 100644
--- a/trunk/drivers/gpu/drm/i915/i915_irq.c
+++ b/trunk/drivers/gpu/drm/i915/i915_irq.c
@@ -122,10 +122,7 @@ static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
-
- return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
+ return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
}
/* Called from drm generic code, passed a 'crtc', which
@@ -185,8 +182,6 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
int vbl_start, vbl_end, htotal, vtotal;
bool in_vbl = true;
int ret = 0;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@@ -195,7 +190,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
}
/* Get vtotal. */
- vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+ vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
if (INTEL_INFO(dev)->gen >= 4) {
/* No obvious pixelcount register. Only query vertical
@@ -215,13 +210,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
*/
position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
- htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+ htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
*vpos = position / htotal;
*hpos = position - (*vpos * htotal);
}
/* Query vblank area. */
- vbl = I915_READ(VBLANK(cpu_transcoder));
+ vbl = I915_READ(VBLANK(pipe));
/* Test position against vblank region. */
vbl_start = vbl & 0x1fff;
@@ -357,7 +352,8 @@ static void notify_ring(struct drm_device *dev,
if (i915_enable_hangcheck) {
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer,
- round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
+ jiffies +
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
}
@@ -378,7 +374,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->dev->struct_mutex);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
new_delay = dev_priv->rps.cur_delay + 1;
@@ -393,7 +389,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
gen6_set_rps(dev_priv->dev, new_delay);
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->dev->struct_mutex);
}
@@ -409,7 +405,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
static void ivybridge_parity_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
- l3_parity.error_work);
+ parity_error_work);
u32 error_status, row, bank, subbank;
char *parity_event[5];
uint32_t misccpctl;
@@ -473,7 +469,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
- queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
+ queue_work(dev_priv->wq, &dev_priv->parity_error_work);
}
static void snb_gt_irq_handler(struct drm_device *dev,
@@ -524,7 +520,7 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
-static irqreturn_t valleyview_irq_handler(int irq, void *arg)
+static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -610,9 +606,6 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
- if (pch_iir & SDE_HOTPLUG_MASK)
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -653,9 +646,6 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
- if (pch_iir & SDE_HOTPLUG_MASK_CPT)
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -680,7 +670,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
I915_READ(FDI_RX_IIR(pipe)));
}
-static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
+static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -719,6 +709,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
if (de_iir & DE_PCH_EVENT_IVB) {
u32 pch_iir = I915_READ(SDEIIR);
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
cpt_irq_handler(dev, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
@@ -753,12 +745,13 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
notify_ring(dev, &dev_priv->ring[VCS]);
}
-static irqreturn_t ironlake_irq_handler(int irq, void *arg)
+static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
+ u32 hotplug_mask;
atomic_inc(&dev_priv->irq_received);
@@ -776,6 +769,11 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
(!IS_GEN6(dev) || pm_iir == 0))
goto done;
+ if (HAS_PCH_CPT(dev))
+ hotplug_mask = SDE_HOTPLUG_MASK_CPT;
+ else
+ hotplug_mask = SDE_HOTPLUG_MASK;
+
ret = IRQ_HANDLED;
if (IS_GEN5(dev))
@@ -804,6 +802,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
+ if (pch_iir & hotplug_mask)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
if (HAS_PCH_CPT(dev))
cpt_irq_handler(dev, pch_iir);
else
@@ -1751,7 +1751,7 @@ void i915_hangcheck_elapsed(unsigned long data)
repeat:
/* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer,
- round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
+ jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
/* drm_dma.h hooks
@@ -1956,7 +1956,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
u32 enable_mask;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
- u32 render_irqs;
u16 msid;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -1996,12 +1995,21 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
+ dev_priv->gt_irq_mask = ~0;
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
- render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
- GEN6_BLITTER_USER_INTERRUPT;
- I915_WRITE(GTIER, render_irqs);
+ I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
+ GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+ GT_GEN6_BLT_USER_INTERRUPT |
+ GT_GEN6_BSD_USER_INTERRUPT |
+ GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+ GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
+ GT_PIPE_NOTIFY |
+ GT_RENDER_CS_ERROR_INTERRUPT |
+ GT_SYNC_STATUS |
+ GT_USER_INTERRUPT);
POSTING_READ(GTIER);
/* ack & enable invalid PTE error interrupts */
@@ -2011,6 +2019,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
#endif
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+#if 0 /* FIXME: check register definitions; some have moved */
/* Note HDMI and DP share bits */
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@@ -2018,14 +2027,15 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
+#endif
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
@@ -2119,7 +2129,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0;
}
-static irqreturn_t i8xx_irq_handler(int irq, void *arg)
+static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2297,7 +2307,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
return 0;
}
-static irqreturn_t i915_irq_handler(int irq, void *arg)
+static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2535,7 +2545,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
return 0;
}
-static irqreturn_t i965_irq_handler(int irq, void *arg)
+static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2681,7 +2691,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
- INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+ INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
diff --git a/trunk/drivers/gpu/drm/i915/i915_reg.h b/trunk/drivers/gpu/drm/i915/i915_reg.h
index 97fbd9d1823b..a4162ddff6c5 100644
--- a/trunk/drivers/gpu/drm/i915/i915_reg.h
+++ b/trunk/drivers/gpu/drm/i915/i915_reg.h
@@ -26,7 +26,6 @@
#define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
-#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -41,14 +40,6 @@
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
-#define SNB_GMCH_GGMS_MASK 0x3
-#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
-#define SNB_GMCH_GMS_MASK 0x1f
-#define IVB_GMCH_GMS_SHIFT 4
-#define IVB_GMCH_GMS_MASK 0xf
-
/* PCI config space */
@@ -114,6 +105,23 @@
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
+/* PPGTT stuff */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+
+#define GEN6_PDE_VALID (1 << 0)
+#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
+/* gen6+ has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+#define GEN6_PTE_VALID (1 << 0)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define HSW_PTE_UNCACHED (0)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
+#define GEN6_PTE_CACHE_BITS (3 << 1)
+#define GEN6_PTE_GFDT (1 << 3)
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
@@ -233,18 +241,11 @@
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
-#define MI_FLUSH_DW_STORE_INDEX (1<<21)
-#define MI_INVALIDATE_TLB (1<<18)
-#define MI_FLUSH_DW_OP_STOREDW (1<<14)
-#define MI_INVALIDATE_BSD (1<<7)
-#define MI_FLUSH_DW_USE_GTT (1<<2)
-#define MI_FLUSH_DW_USE_PPGTT (0<<2)
+#define MI_INVALIDATE_TLB (1<<18)
+#define MI_INVALIDATE_BSD (1<<7)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
-#define MI_BATCH_NON_SECURE (1)
-/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
-#define MI_BATCH_NON_SECURE_I965 (1<<8)
-#define MI_BATCH_PPGTT_HSW (1<<8)
-#define MI_BATCH_NON_SECURE_HSW (1<<13)
+#define MI_BATCH_NON_SECURE (1)
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
@@ -368,7 +369,6 @@
#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
-#define DPIO_PLL_REFCLK_SEL_MASK 3
#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
#define _DPIO_REFSFR_B 0x8034
@@ -384,9 +384,6 @@
#define DPIO_FASTCLK_DISABLE 0x8100
-#define DPIO_DATA_CHANNEL1 0x8220
-#define DPIO_DATA_CHANNEL2 0x8420
-
/*
* Fence registers
*/
@@ -524,7 +521,6 @@
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
-#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define MI_MODE 0x0209c
@@ -551,8 +547,6 @@
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
-#define VLV_GUNIT_CLOCK_GATE 0x182060
-#define GCFG_DIS (1<<8)
#define VLV_IIR_RW 0x182084
#define VLV_IER 0x1820a0
#define VLV_IIR 0x1820a4
@@ -667,7 +661,6 @@
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
#define CACHE_MODE_0 0x02120 /* 915+ only */
-#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@@ -677,8 +670,6 @@
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
-#define GFX_FLSH_CNTL_GEN6 0x101008
-#define GFX_FLSH_CNTL_EN (1<<0)
#define ECOSKPD 0x021d0
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
@@ -1568,14 +1559,14 @@
#define _VSYNCSHIFT_B 0x61028
-#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
-#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
-#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
-#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
-#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
-#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
+#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
+#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
+#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
+#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
+#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
+#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
-#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
/* VGA port control */
#define ADPA 0x61100
@@ -2650,7 +2641,6 @@
#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_INTERLACE_MASK (7 << 21)
-#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
* fitting must be disabled on pre-ilk for interlaced. */
#define PIPECONF_PROGRESSIVE (0 << 21)
@@ -2721,7 +2711,7 @@
#define PIPE_12BPC (3 << 5)
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
-#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
+#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
@@ -3008,19 +2998,12 @@
#define DISPPLANE_GAMMA_ENABLE (1<<30)
#define DISPPLANE_GAMMA_DISABLE 0
#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
-#define DISPPLANE_YUV422 (0x0<<26)
#define DISPPLANE_8BPP (0x2<<26)
-#define DISPPLANE_BGRA555 (0x3<<26)
-#define DISPPLANE_BGRX555 (0x4<<26)
-#define DISPPLANE_BGRX565 (0x5<<26)
-#define DISPPLANE_BGRX888 (0x6<<26)
-#define DISPPLANE_BGRA888 (0x7<<26)
-#define DISPPLANE_RGBX101010 (0x8<<26)
-#define DISPPLANE_RGBA101010 (0x9<<26)
-#define DISPPLANE_BGRX101010 (0xa<<26)
-#define DISPPLANE_RGBX161616 (0xc<<26)
-#define DISPPLANE_RGBX888 (0xe<<26)
-#define DISPPLANE_RGBA888 (0xf<<26)
+#define DISPPLANE_15_16BPP (0x4<<26)
+#define DISPPLANE_16BPP (0x5<<26)
+#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
+#define DISPPLANE_32BPP (0x7<<26)
+#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_SEL_PIPE_SHIFT 24
@@ -3041,8 +3024,6 @@
#define _DSPASIZE 0x70190
#define _DSPASURF 0x7019C /* 965+ only */
#define _DSPATILEOFF 0x701A4 /* 965+ only */
-#define _DSPAOFFSET 0x701A4 /* HSW */
-#define _DSPASURFLIVE 0x701AC
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@@ -3052,8 +3033,6 @@
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane)
-#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
-#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
@@ -3099,8 +3078,6 @@
#define _DSPBSIZE 0x71190
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
-#define _DSPBOFFSET 0x711A4
-#define _DSPBSURFLIVE 0x711AC
/* Sprite A control */
#define _DVSACNTR 0x72180
@@ -3166,7 +3143,6 @@
#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
-#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
#define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1<<31)
@@ -3201,8 +3177,6 @@
#define _SPRA_SURF 0x7029c
#define _SPRA_KEYMAX 0x702a0
#define _SPRA_TILEOFF 0x702a4
-#define _SPRA_OFFSET 0x702a4
-#define _SPRA_SURFLIVE 0x702ac
#define _SPRA_SCALE 0x70304
#define SPRITE_SCALE_ENABLE (1<<31)
#define SPRITE_FILTER_MASK (3<<29)
@@ -3223,8 +3197,6 @@
#define _SPRB_SURF 0x7129c
#define _SPRB_KEYMAX 0x712a0
#define _SPRB_TILEOFF 0x712a4
-#define _SPRB_OFFSET 0x712a4
-#define _SPRB_SURFLIVE 0x712ac
#define _SPRB_SCALE 0x71304
#define _SPRB_GAMC 0x71400
@@ -3238,10 +3210,8 @@
#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
-#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
-#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
/* VBIOS regs */
#define VGACNTRL 0x71400
@@ -3276,6 +3246,12 @@
#define DISPLAY_PORT_PLL_BIOS_1 0x46010
#define DISPLAY_PORT_PLL_BIOS_2 0x46014
+#define PCH_DSPCLK_GATE_D 0x42020
+# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
+# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
+# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+
#define PCH_3DCGDIS0 0x46020
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@@ -3325,22 +3301,20 @@
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
-#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
-#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
-#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
-#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
-#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
-#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
-#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
-#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
+#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
+#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
+#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
+#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
+#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
+#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
+#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
+#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
#define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31)
-#define PF_PIPE_SEL_MASK_IVB (3<<29)
-#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
#define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23)
@@ -3449,13 +3423,15 @@
#define ILK_HDCP_DISABLE (1<<25)
#define ILK_eDP_A_DISABLE (1<<24)
#define ILK_DESKTOP (1<<23)
+#define ILK_DSPCLK_GATE 0x42020
+#define IVB_VRHUNIT_CLK_GATE (1<<28)
+#define ILK_DPARB_CLK_GATE (1<<5)
+#define ILK_DPFD_CLK_GATE (1<<7)
-#define ILK_DSPCLK_GATE_D 0x42020
-#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
-#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
-#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
-#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
-#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
+/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
+#define ILK_CLK_FBC (1<<7)
+#define ILK_DPFC_DIS1 (1<<8)
+#define ILK_DPFC_DIS2 (1<<9)
#define IVB_CHICKEN3 0x4200c
# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
@@ -3471,21 +3447,14 @@
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
-#define GEN7_L3AGDIS (1<<19)
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
-#define GEN7_L3SQCREG4 0xb034
-#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
-
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
-#define HSW_FUSE_STRAP 0x42014
-#define HSW_CDCLK_LIMIT (1 << 24)
-
/* PCH */
/* south display engine interrupt: IBX */
@@ -3717,7 +3686,7 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
-#define VLV_VIDEO_DIP_CTL_A 0x60200
+#define VLV_VIDEO_DIP_CTL_A 0x60220
#define VLV_VIDEO_DIP_DATA_A 0x60208
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
@@ -3826,22 +3795,16 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
-#define _TRANSA_CHICKEN1 0xf0060
-#define _TRANSB_CHICKEN1 0xf1060
-#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
-#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
-
+#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
#define SOUTH_CHICKEN1 0xc2000
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18
-#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
-#define FDI_BC_BIFURCATION_SELECT (1 << 12)
+#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define SOUTH_CHICKEN2 0xc2004
#define DPLS_EDP_PPS_FIX_DIS (1<<0)
@@ -3853,7 +3816,6 @@
#define SOUTH_DSPCLK_GATE_D 0xc2020
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
-#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */
#define _FDI_TXA_CTL 0x60100
@@ -3939,21 +3901,16 @@
#define FDI_PORT_WIDTH_2X_LPT (1<<19)
#define FDI_PORT_WIDTH_1X_LPT (0<<19)
-#define _FDI_RXA_MISC 0xf0010
-#define _FDI_RXB_MISC 0xf1010
-#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
-#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
-#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
-#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
-#define FDI_RX_TP1_TO_TP2_48 (2<<20)
-#define FDI_RX_TP1_TO_TP2_64 (3<<20)
-#define FDI_RX_FDI_DELAY_90 (0x90<<0)
-#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
-
+#define _FDI_RXA_MISC 0xf0010
+#define _FDI_RXB_MISC 0xf1010
#define _FDI_RXA_TUSIZE1 0xf0030
#define _FDI_RXA_TUSIZE2 0xf0038
#define _FDI_RXB_TUSIZE1 0xf1030
#define _FDI_RXB_TUSIZE2 0xf1038
+#define FDI_RX_TP1_TO_TP2_48 (2<<20)
+#define FDI_RX_TP1_TO_TP2_64 (3<<20)
+#define FDI_RX_FDI_DELAY_90 (0x90<<0)
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
@@ -4046,11 +4003,6 @@
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
#define PCH_PP_OFF_DELAYS 0xc720c
-#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
-#define PANEL_POWER_PORT_LVDS (0 << 30)
-#define PANEL_POWER_PORT_DP_A (1 << 30)
-#define PANEL_POWER_PORT_DP_C (2 << 30)
-#define PANEL_POWER_PORT_DP_D (3 << 30)
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@@ -4098,7 +4050,7 @@
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
#define TRANS_DP_CTL_C 0xe2300
-#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
+#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
#define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
@@ -4156,8 +4108,6 @@
#define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
-#define FORCEWAKE_KERNEL 0x1
-#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
@@ -4270,10 +4220,6 @@
#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
-#define GEN6_PCODE_WRITE_RC6VIDS 0x4
-#define GEN6_PCODE_READ_RC6VIDS 0x5
-#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
-#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
@@ -4305,15 +4251,6 @@
#define GEN7_L3LOG_BASE 0xB070
#define GEN7_L3LOG_SIZE 0x80
-#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
-#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
-#define GEN7_MAX_PS_THREAD_DEP (8<<12)
-#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
-
-#define GEN7_ROW_CHICKEN2 0xe4f4
-#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
-#define DOP_CLOCK_GATING_DISABLE (1<<0)
-
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -4443,39 +4380,33 @@
#define HSW_PWR_WELL_CTL6 0x45414
/* Per-pipe DDI Function Control */
-#define TRANS_DDI_FUNC_CTL_A 0x60400
-#define TRANS_DDI_FUNC_CTL_B 0x61400
-#define TRANS_DDI_FUNC_CTL_C 0x62400
-#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
-#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
- TRANS_DDI_FUNC_CTL_B)
-#define TRANS_DDI_FUNC_ENABLE (1<<31)
+#define PIPE_DDI_FUNC_CTL_A 0x60400
+#define PIPE_DDI_FUNC_CTL_B 0x61400
+#define PIPE_DDI_FUNC_CTL_C 0x62400
+#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
+#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
+ PIPE_DDI_FUNC_CTL_B)
+#define PIPE_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define TRANS_DDI_PORT_MASK (7<<28)
-#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
-#define TRANS_DDI_PORT_NONE (0<<28)
-#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
-#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
-#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
-#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
-#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
-#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
-#define TRANS_DDI_BPC_MASK (7<<20)
-#define TRANS_DDI_BPC_8 (0<<20)
-#define TRANS_DDI_BPC_10 (1<<20)
-#define TRANS_DDI_BPC_6 (2<<20)
-#define TRANS_DDI_BPC_12 (3<<20)
-#define TRANS_DDI_PVSYNC (1<<17)
-#define TRANS_DDI_PHSYNC (1<<16)
-#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
-#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
-#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
-#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
-#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
-#define TRANS_DDI_BFI_ENABLE (1<<4)
-#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
-#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
-#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
+#define PIPE_DDI_PORT_MASK (7<<28)
+#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
+#define PIPE_DDI_MODE_SELECT_MASK (7<<24)
+#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
+#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
+#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
+#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
+#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
+#define PIPE_DDI_BPC_MASK (7<<20)
+#define PIPE_DDI_BPC_8 (0<<20)
+#define PIPE_DDI_BPC_10 (1<<20)
+#define PIPE_DDI_BPC_6 (2<<20)
+#define PIPE_DDI_BPC_12 (3<<20)
+#define PIPE_DDI_PVSYNC (1<<17)
+#define PIPE_DDI_PHSYNC (1<<16)
+#define PIPE_DDI_BFI_ENABLE (1<<4)
+#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
+#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
+#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
/* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040
@@ -4489,16 +4420,12 @@
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
-#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
-#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
-#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
/* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
-#define DP_TP_STATUS_IDLE_DONE (1<<25)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
/* DDI Buffer Control */
@@ -4517,7 +4444,6 @@
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_IS_IDLE (1<<7)
-#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH_X1 (0<<1)
#define DDI_PORT_WIDTH_X2 (1<<1)
#define DDI_PORT_WIDTH_X4 (3<<1)
@@ -4564,8 +4490,8 @@
/* SPLL */
#define SPLL_CTL 0x46020
#define SPLL_PLL_ENABLE (1<<31)
-#define SPLL_PLL_SSC (1<<28)
-#define SPLL_PLL_NON_SSC (2<<28)
+#define SPLL_PLL_SCC (1<<28)
+#define SPLL_PLL_NON_SCC (2<<28)
#define SPLL_PLL_FREQ_810MHz (0<<26)
#define SPLL_PLL_FREQ_1350MHz (1<<26)
@@ -4574,7 +4500,7 @@
#define WRPLL_CTL2 0x46060
#define WRPLL_PLL_ENABLE (1<<31)
#define WRPLL_PLL_SELECT_SSC (0x01<<28)
-#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
+#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
@@ -4591,36 +4517,21 @@
#define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
-#define PORT_CLK_SEL_NONE (7<<29)
-
-/* Transcoder clock selection */
-#define TRANS_CLK_SEL_A 0x46140
-#define TRANS_CLK_SEL_B 0x46144
-#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
-/* For each transcoder, we need to select the corresponding port clock */
-#define TRANS_CLK_SEL_DISABLED (0x0<<29)
-#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
-
-#define _TRANSA_MSA_MISC 0x60410
-#define _TRANSB_MSA_MISC 0x61410
-#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
- _TRANSB_MSA_MISC)
-#define TRANS_MSA_SYNC_CLK (1<<0)
-#define TRANS_MSA_6_BPC (0<<5)
-#define TRANS_MSA_8_BPC (1<<5)
-#define TRANS_MSA_10_BPC (2<<5)
-#define TRANS_MSA_12_BPC (3<<5)
-#define TRANS_MSA_16_BPC (4<<5)
+
+/* Pipe clock selection */
+#define PIPE_CLK_SEL_A 0x46140
+#define PIPE_CLK_SEL_B 0x46144
+#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
+/* For each pipe, we need to select the corresponding port clock */
+#define PIPE_CLK_SEL_DISABLED (0x0<<29)
+#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
/* LCPLL Control */
#define LCPLL_CTL 0x130040
#define LCPLL_PLL_DISABLE (1<<31)
#define LCPLL_PLL_LOCK (1<<30)
-#define LCPLL_CLK_FREQ_MASK (3<<26)
-#define LCPLL_CLK_FREQ_450 (0<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
-#define LCPLL_CD_SOURCE_FCLK (1<<21)
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
diff --git a/trunk/drivers/gpu/drm/i915/i915_suspend.c b/trunk/drivers/gpu/drm/i915/i915_suspend.c
index 63d4d30c39de..5854bddb1e9f 100644
--- a/trunk/drivers/gpu/drm/i915/i915_suspend.c
+++ b/trunk/drivers/gpu/drm/i915/i915_suspend.c
@@ -60,9 +60,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
- array = dev_priv->regfile.save_palette_a;
+ array = dev_priv->save_palette_a;
else
- array = dev_priv->regfile.save_palette_b;
+ array = dev_priv->save_palette_b;
for (i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
@@ -82,9 +82,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
- array = dev_priv->regfile.save_palette_a;
+ array = dev_priv->save_palette_a;
else
- array = dev_priv->regfile.save_palette_b;
+ array = dev_priv->save_palette_b;
for (i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
@@ -131,11 +131,11 @@ static void i915_save_vga(struct drm_device *dev)
u16 cr_index, cr_data, st01;
/* VGA color palette registers */
- dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
+ dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
/* MSR bits */
- dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
- if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
+ dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
+ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
@@ -150,35 +150,35 @@ static void i915_save_vga(struct drm_device *dev)
i915_read_indexed(dev, cr_index, cr_data, 0x11) &
(~0x80));
for (i = 0; i <= 0x24; i++)
- dev_priv->regfile.saveCR[i] =
+ dev_priv->saveCR[i] =
i915_read_indexed(dev, cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */
- dev_priv->regfile.saveCR[0x11] &= ~0x80;
+ dev_priv->saveCR[0x11] &= ~0x80;
/* Attribute controller registers */
I915_READ8(st01);
- dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
+ dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
for (i = 0; i <= 0x14; i++)
- dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
+ dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
I915_READ8(st01);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
- dev_priv->regfile.saveGR[i] =
+ dev_priv->saveGR[i] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
- dev_priv->regfile.saveGR[0x10] =
+ dev_priv->saveGR[0x10] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
- dev_priv->regfile.saveGR[0x11] =
+ dev_priv->saveGR[0x11] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
- dev_priv->regfile.saveGR[0x18] =
+ dev_priv->saveGR[0x18] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
/* Sequencer registers */
for (i = 0; i < 8; i++)
- dev_priv->regfile.saveSR[i] =
+ dev_priv->saveSR[i] =
i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
}
@@ -189,8 +189,8 @@ static void i915_restore_vga(struct drm_device *dev)
u16 cr_index, cr_data, st01;
/* MSR bits */
- I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
- if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
+ I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
+ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
@@ -203,36 +203,36 @@ static void i915_restore_vga(struct drm_device *dev)
/* Sequencer registers, don't write SR07 */
for (i = 0; i < 7; i++)
i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
- dev_priv->regfile.saveSR[i]);
+ dev_priv->saveSR[i]);
/* CRT controller regs */
/* Enable CR group 0 writes */
- i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
+ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
for (i = 0; i <= 0x24; i++)
- i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
+ i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
/* Graphics controller regs */
for (i = 0; i < 9; i++)
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
- dev_priv->regfile.saveGR[i]);
+ dev_priv->saveGR[i]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
- dev_priv->regfile.saveGR[0x10]);
+ dev_priv->saveGR[0x10]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
- dev_priv->regfile.saveGR[0x11]);
+ dev_priv->saveGR[0x11]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
- dev_priv->regfile.saveGR[0x18]);
+ dev_priv->saveGR[0x18]);
/* Attribute controller registers */
I915_READ8(st01); /* switch back to index mode */
for (i = 0; i <= 0x14; i++)
- i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
+ i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
I915_READ8(st01); /* switch back to index mode */
- I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
I915_READ8(st01);
/* VGA color palette registers */
- I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
+ I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
}
static void i915_save_modeset_reg(struct drm_device *dev)
@@ -244,162 +244,156 @@ static void i915_save_modeset_reg(struct drm_device *dev)
return;
/* Cursor state */
- dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
- dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
- dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
- dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
- dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
- dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
+ dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
if (IS_GEN2(dev))
- dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
+ dev_priv->saveCURSIZE = I915_READ(CURSIZE);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
- dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+ dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+ dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
/* Pipe & plane A info */
- dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
- dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
+ dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
- dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
- dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
+ dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
} else {
- dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
- dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
- dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
+ dev_priv->saveFPA0 = I915_READ(_FPA0);
+ dev_priv->saveFPA1 = I915_READ(_FPA1);
+ dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
- dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
- dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
- dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
- dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
- dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
- dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
+ dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+ dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
- dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
- dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
- dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
-
- dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
- dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
-
- dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
- dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
- dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
-
- dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
- dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
- dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
- dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
- dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
- dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
- dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
- }
-
- dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
- dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
- dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
- dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
- dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
+ dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
+ dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+ dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+ dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+ dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+ dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+ dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+ }
+
+ dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
- dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+ dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
- dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
+ dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
/* Pipe & plane B info */
- dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
- dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
+ dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
- dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
- dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
+ dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
} else {
- dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
- dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
- dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
+ dev_priv->saveFPB0 = I915_READ(_FPB0);
+ dev_priv->saveFPB1 = I915_READ(_FPB1);
+ dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
- dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
- dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
- dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
- dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
- dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
- dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
+ dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+ dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
- dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
- dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
- dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
-
- dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
- dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
-
- dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
- dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
- dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
-
- dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
- dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
- dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
- dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
- dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
- dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
- dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
- }
-
- dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
- dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
- dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
- dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
- dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
+ dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
+ dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+ dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+ dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+ dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+ dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+ dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+ }
+
+ dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
- dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+ dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
- dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+ dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
- dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
- dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
}
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
- else
- dev_priv->regfile.saveADPA = I915_READ(ADPA);
-
return;
}
@@ -418,20 +412,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
case 7:
case 6:
for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
break;
case 3:
case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
break;
}
@@ -453,164 +447,158 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
}
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
- I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
+ I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
+ I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
}
/* Pipe & plane A info */
/* Prime the clock */
- if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
+ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_a_reg);
udelay(150);
}
- I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
- I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
+ I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
+ I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
/* Actually enable it */
- I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
+ I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
POSTING_READ(dpll_a_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
+ I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
POSTING_READ(_DPLL_A_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
- I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
- I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
- I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
- I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
- I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
+ I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
+ I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
- I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
- I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
- I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
- I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
- I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
- I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
- I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
- I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
+ I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
- I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
- I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
- I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
- I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
- I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
- I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
- I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
+ I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
+ I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
+ I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
+ I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
+ I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
+ I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
+ I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
}
/* Restore plane info */
- I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
- I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
- I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
- I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
- I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
+ I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
- I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
+ I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
- I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
+ I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
- I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
+ I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
/* Pipe & plane B info */
- if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
+ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_b_reg);
udelay(150);
}
- I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
- I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
+ I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
+ I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
/* Actually enable it */
- I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
+ I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
POSTING_READ(dpll_b_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
+ I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
POSTING_READ(_DPLL_B_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
- I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
- I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
- I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
- I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
- I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
+ I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
+ I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
- I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
- I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
- I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
- I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
- I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
- I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
- I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
- I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
+ I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
- I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
- I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
- I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
- I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
- I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
- I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
- I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
+ I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
+ I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
+ I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
+ I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
+ I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
+ I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
+ I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
}
/* Restore plane info */
- I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
- I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
- I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
- I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
- I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
+ I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
- I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
+ I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
- I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
+ I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
- I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
+ I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
/* Cursor state */
- I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
- I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
- I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
- I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
- I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
- I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
+ I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
-
- /* CRT state */
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
- else
- I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
+ I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
return;
}
@@ -620,84 +608,89 @@ static void i915_save_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration control */
- dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
+ dev_priv->saveDSPARB = I915_READ(DSPARB);
/* This is only meaningful in non-KMS mode */
- /* Don't regfile.save them in KMS mode */
+ /* Don't save them in KMS mode */
i915_save_modeset_reg(dev);
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->saveADPA = I915_READ(PCH_ADPA);
+ } else {
+ dev_priv->saveADPA = I915_READ(ADPA);
+ }
+
/* LVDS state */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
- dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
- dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
- dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
- dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
- dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+ dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+ dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+ dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+ dev_priv->saveLVDS = I915_READ(PCH_LVDS);
} else {
- dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
- dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
+ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
- dev_priv->regfile.saveLVDS = I915_READ(LVDS);
+ dev_priv->saveLVDS = I915_READ(LVDS);
}
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+ dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
- dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
- dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
+ dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+ dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+ dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
} else {
- dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
- dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
- dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
- }
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- dev_priv->regfile.saveDP_B = I915_READ(DP_B);
- dev_priv->regfile.saveDP_C = I915_READ(DP_C);
- dev_priv->regfile.saveDP_D = I915_READ(DP_D);
- dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
- dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
- dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
- dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
- dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
- dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
- dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
- dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
- }
- /* FIXME: regfile.save TV & SDVO state */
- }
-
- /* Only regfile.save FBC state on the platform that supports FBC */
+ dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+ dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+ dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ }
+
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->saveDP_B = I915_READ(DP_B);
+ dev_priv->saveDP_C = I915_READ(DP_C);
+ dev_priv->saveDP_D = I915_READ(DP_D);
+ dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+ dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+ dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+ dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+ dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+ dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+ dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+ dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+ }
+ /* FIXME: save TV & SDVO state */
+
+ /* Only save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
+ dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
} else if (IS_GM45(dev)) {
- dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
} else {
- dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
}
/* VGA state */
- dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
- dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
- dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
+ dev_priv->saveVGA0 = I915_READ(VGA0);
+ dev_priv->saveVGA1 = I915_READ(VGA1);
+ dev_priv->saveVGA_PD = I915_READ(VGA_PD);
if (HAS_PCH_SPLIT(dev))
- dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
+ dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
- dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
+ dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
i915_save_vga(dev);
}
@@ -707,95 +700,97 @@ static void i915_restore_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration */
- I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Display port ratios (must be done before clock is set) */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
- I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
- I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
- I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
- I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
- I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
- I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
- I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
- }
+ I915_WRITE(DSPARB, dev_priv->saveDSPARB);
+
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
}
/* This is only meaningful in non-KMS mode */
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
+ else
+ I915_WRITE(ADPA, dev_priv->saveADPA);
+
/* LVDS state */
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS);
+ I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
- I915_WRITE(LVDS, dev_priv->regfile.saveLVDS);
+ I915_WRITE(LVDS, dev_priv->saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
- I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
+ I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
- I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
* otherwise we get blank eDP screen after S3 on some machines
*/
- I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
- I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
- I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
- I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
- I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
+ I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
+ I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
+ I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
+ I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
I915_WRITE(RSTDBYCTL,
- dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
+ dev_priv->saveMCHBAR_RENDER_STANDBY);
} else {
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
- I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
- I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
- I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
- I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
- I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
- I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
- }
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Display Port state */
- if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
- I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
- I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
- }
- /* FIXME: restore TV & SDVO state */
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
+ I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
+ I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
+ I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
+ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
}
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->saveDP_B);
+ I915_WRITE(DP_C, dev_priv->saveDP_C);
+ I915_WRITE(DP_D, dev_priv->saveDP_D);
+ }
+ /* FIXME: restore TV & SDVO state */
+
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else {
- I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
+ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
}
}
/* VGA state */
if (HAS_PCH_SPLIT(dev))
- I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
+ I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
- I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
+ I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
- I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
- I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
- I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
+ I915_WRITE(VGA0, dev_priv->saveVGA0);
+ I915_WRITE(VGA1, dev_priv->saveVGA1);
+ I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
POSTING_READ(VGA_PD);
udelay(150);
@@ -807,45 +802,46 @@ int i915_save_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
+ pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
mutex_lock(&dev->struct_mutex);
+ /* Hardware status page */
+ dev_priv->saveHWS = I915_READ(HWS_PGA);
+
i915_save_display(dev);
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.saveDEIER = I915_READ(DEIER);
- dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
- dev_priv->regfile.saveGTIER = I915_READ(GTIER);
- dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
- dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
- dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
- dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
- I915_READ(RSTDBYCTL);
- dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
- } else {
- dev_priv->regfile.saveIER = I915_READ(IER);
- dev_priv->regfile.saveIMR = I915_READ(IMR);
- }
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->saveDEIER = I915_READ(DEIER);
+ dev_priv->saveDEIMR = I915_READ(DEIMR);
+ dev_priv->saveGTIER = I915_READ(GTIER);
+ dev_priv->saveGTIMR = I915_READ(GTIMR);
+ dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+ dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
+ dev_priv->saveMCHBAR_RENDER_STANDBY =
+ I915_READ(RSTDBYCTL);
+ dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
+ } else {
+ dev_priv->saveIER = I915_READ(IER);
+ dev_priv->saveIMR = I915_READ(IMR);
}
intel_disable_gt_powersave(dev);
/* Cache mode state */
- dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
- dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
- dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
- dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+ dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+ dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+ dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
mutex_unlock(&dev->struct_mutex);
@@ -857,40 +853,41 @@ int i915_restore_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
+ pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
mutex_lock(&dev->struct_mutex);
+ /* Hardware status page */
+ I915_WRITE(HWS_PGA, dev_priv->saveHWS);
+
i915_restore_display(dev);
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- /* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
- I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
- I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
- I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
- I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
- I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
- I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
- } else {
- I915_WRITE(IER, dev_priv->regfile.saveIER);
- I915_WRITE(IMR, dev_priv->regfile.saveIMR);
- }
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(DEIER, dev_priv->saveDEIER);
+ I915_WRITE(DEIMR, dev_priv->saveDEIMR);
+ I915_WRITE(GTIER, dev_priv->saveGTIER);
+ I915_WRITE(GTIMR, dev_priv->saveGTIMR);
+ I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
+ I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
+ I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
+ } else {
+ I915_WRITE(IER, dev_priv->saveIER);
+ I915_WRITE(IMR, dev_priv->saveIMR);
}
/* Cache mode state */
- I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
+ I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
- I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
+ I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
- I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
- I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
+ I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
+ I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
}
for (i = 0; i < 3; i++)
- I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
+ I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
mutex_unlock(&dev->struct_mutex);
diff --git a/trunk/drivers/gpu/drm/i915/i915_sysfs.c b/trunk/drivers/gpu/drm/i915/i915_sysfs.c
index 3bf51d58319d..903eebd2117a 100644
--- a/trunk/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/trunk/drivers/gpu/drm/i915/i915_sysfs.c
@@ -162,7 +162,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
if (ret)
return ret;
- if (!dev_priv->l3_parity.remap_info) {
+ if (!dev_priv->mm.l3_remap_info) {
temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (!temp) {
mutex_unlock(&drm_dev->struct_mutex);
@@ -182,9 +182,9 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
* at this point it is left as a TODO.
*/
if (temp)
- dev_priv->l3_parity.remap_info = temp;
+ dev_priv->mm.l3_remap_info = temp;
- memcpy(dev_priv->l3_parity.remap_info + (offset/4),
+ memcpy(dev_priv->mm.l3_remap_info + (offset/4),
buf + (offset/4),
count);
@@ -211,9 +211,12 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- mutex_lock(&dev_priv->rps.hw_lock);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev->struct_mutex);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@@ -225,9 +228,12 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- mutex_lock(&dev_priv->rps.hw_lock);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev->struct_mutex);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@@ -248,14 +254,16 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val /= GT_FREQUENCY_MULTIPLIER;
- mutex_lock(&dev_priv->rps.hw_lock);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
@@ -264,7 +272,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
dev_priv->rps.max_delay = val;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev->struct_mutex);
return count;
}
@@ -276,9 +284,12 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- mutex_lock(&dev_priv->rps.hw_lock);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev->struct_mutex);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@@ -299,14 +310,16 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val /= GT_FREQUENCY_MULTIPLIER;
- mutex_lock(&dev_priv->rps.hw_lock);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
@@ -315,7 +328,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
dev_priv->rps.min_delay = val;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev->struct_mutex);
return count;
diff --git a/trunk/drivers/gpu/drm/i915/i915_trace.h b/trunk/drivers/gpu/drm/i915/i915_trace.h
index 3db4a6817713..8134421b89a6 100644
--- a/trunk/drivers/gpu/drm/i915/i915_trace.h
+++ b/trunk/drivers/gpu/drm/i915/i915_trace.h
@@ -229,26 +229,24 @@ TRACE_EVENT(i915_gem_evict_everything,
);
TRACE_EVENT(i915_gem_ring_dispatch,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
- TP_ARGS(ring, seqno, flags),
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
- __field(u32, flags)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = seqno;
- __entry->flags = flags;
i915_trace_irq_get(ring, seqno);
),
- TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
- __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
+ TP_printk("dev=%u, ring=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
);
TRACE_EVENT(i915_gem_ring_flush,
diff --git a/trunk/drivers/gpu/drm/i915/intel_bios.c b/trunk/drivers/gpu/drm/i915/intel_bios.c
index 87e9b92039df..56846ed5ee55 100644
--- a/trunk/drivers/gpu/drm/i915/intel_bios.c
+++ b/trunk/drivers/gpu/drm/i915/intel_bios.c
@@ -499,12 +499,8 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
edp = find_section(bdb, BDB_EDP);
if (!edp) {
- if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
- DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
- "supported, assume %dbpp panel color "
- "depth.\n",
- dev_priv->edp.bpp);
- }
+ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
return;
}
@@ -657,9 +653,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->lvds_use_ssc = 1;
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
-
- /* eDP data */
- dev_priv->edp.bpp = 18;
}
static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -762,8 +755,7 @@ void intel_setup_bios(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Set the Panel Power On/Off timings if uninitialized. */
- if (!HAS_PCH_SPLIT(dev) &&
- I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
+ if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
/* Set T2 to 40ms and T5 to 200ms */
I915_WRITE(PP_ON_DELAYS, 0x019007d0);
diff --git a/trunk/drivers/gpu/drm/i915/intel_crt.c b/trunk/drivers/gpu/drm/i915/intel_crt.c
index 5c7774396e10..6345878ae1e7 100644
--- a/trunk/drivers/gpu/drm/i915/intel_crt.c
+++ b/trunk/drivers/gpu/drm/i915/intel_crt.c
@@ -143,7 +143,7 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode)
int old_dpms;
/* PCH platforms and VLV only support on/off. */
- if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
+ if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
if (mode == connector->dpms)
@@ -221,20 +221,14 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
- if (HAS_PCH_SPLIT(dev))
- adpa = ADPA_HOTPLUG_BITS;
- else
- adpa = 0;
-
+ adpa = ADPA_HOTPLUG_BITS;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_LPT(dev))
- ; /* Those bits don't exist here */
- else if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
@@ -407,16 +401,12 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
- int ret;
edid = intel_crt_get_edid(connector, adapter);
if (!edid)
return 0;
- ret = intel_connector_update_modes(connector, edid);
- kfree(edid);
-
- return ret;
+ return intel_connector_update_modes(connector, edid);
}
static bool intel_crt_detect_ddc(struct drm_connector *connector)
@@ -654,22 +644,10 @@ static int intel_crt_set_property(struct drm_connector *connector,
static void intel_crt_reset(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
- if (HAS_PCH_SPLIT(dev)) {
- u32 adpa;
-
- adpa = I915_READ(PCH_ADPA);
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- adpa |= ADPA_HOTPLUG_BITS;
- I915_WRITE(PCH_ADPA, adpa);
- POSTING_READ(PCH_ADPA);
-
- DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
+ if (HAS_PCH_SPLIT(dev))
crt->force_hotplug_required = 1;
- }
-
}
/*
@@ -751,7 +729,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = true;
- if (IS_I830(dev))
+ if (IS_HASWELL(dev) || IS_I830(dev))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
@@ -771,10 +749,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
- if (IS_HASWELL(dev))
- crt->base.get_hw_state = intel_ddi_get_hw_state;
- else
- crt->base.get_hw_state = intel_crt_get_hw_state;
+ crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
@@ -791,6 +766,18 @@ void intel_crt_init(struct drm_device *dev)
* Configure the automatic hotplug detection stuff
*/
crt->force_hotplug_required = 0;
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 adpa;
+
+ adpa = I915_READ(PCH_ADPA);
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa |= ADPA_HOTPLUG_BITS;
+ I915_WRITE(PCH_ADPA, adpa);
+ POSTING_READ(PCH_ADPA);
+
+ DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
+ crt->force_hotplug_required = 1;
+ }
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}
diff --git a/trunk/drivers/gpu/drm/i915/intel_ddi.c b/trunk/drivers/gpu/drm/i915/intel_ddi.c
index 852012b6fc5b..bfe375466a0e 100644
--- a/trunk/drivers/gpu/drm/i915/intel_ddi.c
+++ b/trunk/drivers/gpu/drm/i915/intel_ddi.c
@@ -58,26 +58,6 @@ static const u32 hsw_ddi_translations_fdi[] = {
0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
-static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
-{
- struct drm_encoder *encoder = &intel_encoder->base;
- int type = intel_encoder->type;
-
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
- type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(encoder);
- return intel_dig_port->port;
-
- } else if (type == INTEL_OUTPUT_ANALOG) {
- return PORT_E;
-
- } else {
- DRM_ERROR("Invalid DDI encoder type %d\n", type);
- BUG();
- }
-}
-
/* On Haswell, DDI port buffers must be programmed with correct values
* in advance. The buffer values are different for FDI and DP modes,
* but the HDMI/DVI fields are shared among those. So we program the DDI
@@ -153,34 +133,25 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 temp, i, rx_ctl_val;
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp, i;
- /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
- * mode set "sequence for CRT port" document:
- * - TP1 to TP2 time with the default value
- * - FDI delay to 90h
- */
- I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
- FDI_RX_PWRDN_LANE0_VAL(2) |
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
- /* Enable the PCH Receiver FDI PLL */
- rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
- ((intel_crtc->fdi_lanes - 1) << 19);
- I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
- POSTING_READ(_FDI_RXA_CTL);
- udelay(220);
-
- /* Switch from Rawclk to PCDclk */
- rx_ctl_val |= FDI_PCDCLK;
- I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
-
- /* Configure Port Clock Select */
- I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
-
- /* Start the training iterating through available voltages and emphasis,
- * testing each value twice. */
- for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
+ /* Configure CPU PLL, wait for warmup */
+ I915_WRITE(SPLL_CTL,
+ SPLL_PLL_ENABLE |
+ SPLL_PLL_FREQ_1350MHz |
+ SPLL_PLL_SCC);
+
+ /* Use SPLL to drive the output when in FDI mode */
+ I915_WRITE(PORT_CLK_SEL(PORT_E),
+ PORT_CLK_SEL_SPLL);
+ I915_WRITE(PIPE_CLK_SEL(pipe),
+ PIPE_CLK_SEL_PORT(PORT_E));
+
+ udelay(20);
+
+ /* Start the training iterating through available voltages and emphasis */
+ for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
/* Configure DP_TP_CTL with auto-training */
I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
@@ -189,63 +160,103 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DP_TP_CTL_ENABLE);
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
+ temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp = (temp & ~DDI_BUF_EMP_MASK);
I915_WRITE(DDI_BUF_CTL(PORT_E),
- DDI_BUF_CTL_ENABLE |
- ((intel_crtc->fdi_lanes - 1) << 1) |
- hsw_ddi_buf_ctl_values[i / 2]);
- POSTING_READ(DDI_BUF_CTL(PORT_E));
+ temp |
+ DDI_BUF_CTL_ENABLE |
+ DDI_PORT_WIDTH_X2 |
+ hsw_ddi_buf_ctl_values[i]);
udelay(600);
- /* Program PCH FDI Receiver TU */
- I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
-
- /* Enable PCH FDI Receiver with auto-training */
- rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
- I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
- POSTING_READ(_FDI_RXA_CTL);
-
- /* Wait for FDI receiver lane calibration */
- udelay(30);
-
- /* Unset FDI_RX_MISC pwrdn lanes */
- temp = I915_READ(_FDI_RXA_MISC);
- temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- I915_WRITE(_FDI_RXA_MISC, temp);
- POSTING_READ(_FDI_RXA_MISC);
-
- /* Wait for FDI auto training time */
- udelay(5);
+ /* We need to program FDI_RX_MISC with the default TP1 to TP2
+ * values before enabling the receiver, and configure the delay
+ * for the FDI timing generator to 90h. Luckily, all the other
+ * bits are supposed to be zeroed, so we can write those values
+ * directly.
+ */
+ I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 |
+ FDI_RX_FDI_DELAY_90);
+
+ /* Enable CPU FDI Receiver with auto-training */
+ reg = FDI_RX_CTL(pipe);
+ I915_WRITE(reg,
+ I915_READ(reg) |
+ FDI_LINK_TRAIN_AUTO |
+ FDI_RX_ENABLE |
+ FDI_LINK_TRAIN_PATTERN_1_CPT |
+ FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_PORT_WIDTH_2X_LPT |
+ FDI_RX_PLL_ENABLE);
+ POSTING_READ(reg);
+ udelay(100);
temp = I915_READ(DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
- DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
+ DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
/* Enable normal pixel sending for FDI */
I915_WRITE(DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_LINK_TRAIN_NORMAL |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_ENABLE);
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
+
+ /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
+ temp = I915_READ(DDI_FUNC_CTL(pipe));
+ temp &= ~PIPE_DDI_PORT_MASK;
+ temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
+ PIPE_DDI_MODE_SELECT_FDI |
+ PIPE_DDI_FUNC_ENABLE |
+ PIPE_DDI_PORT_WIDTH_X2;
+ I915_WRITE(DDI_FUNC_CTL(pipe),
+ temp);
+ break;
+ } else {
+ DRM_ERROR("Error training BUF_CTL %d\n", i);
- return;
+ /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ I915_READ(DP_TP_CTL(PORT_E)) &
+ ~DP_TP_CTL_ENABLE);
+ I915_WRITE(FDI_RX_CTL(pipe),
+ I915_READ(FDI_RX_CTL(pipe)) &
+ ~FDI_RX_PLL_ENABLE);
+ continue;
}
+ }
- /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
- I915_WRITE(DP_TP_CTL(PORT_E),
- I915_READ(DP_TP_CTL(PORT_E)) & ~DP_TP_CTL_ENABLE);
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
- rx_ctl_val &= ~FDI_RX_ENABLE;
- I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+/* For DDI connections, it is possible to support different outputs over the
+ * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
+ * the time the output is detected what exactly is on the other end of it. This
+ * function aims at providing support for this detection and proper output
+ * configuration.
+ */
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+ /* For now, we don't do any proper output detection and assume that we
+ * handle HDMI only */
- /* Reset FDI_RX_MISC pwrdn lanes */
- temp = I915_READ(_FDI_RXA_MISC);
- temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- I915_WRITE(_FDI_RXA_MISC, temp);
+ switch(port){
+ case PORT_A:
+ /* We don't handle eDP and DP yet */
+ DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
+ break;
+ /* Assume that the ports B, C and D are working in HDMI mode for now */
+ case PORT_B:
+ case PORT_C:
+ case PORT_D:
+ intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
+ break;
+ default:
+ DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
+ port);
+ break;
}
-
- DRM_ERROR("FDI link training failed!\n");
}
/* WRPLL clock dividers */
@@ -634,435 +645,116 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{298000, 2, 21, 19},
};
-static void intel_ddi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- int port = intel_ddi_get_encoder_port(intel_encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ int port = intel_hdmi->ddi_port;
int pipe = intel_crtc->pipe;
- int type = intel_encoder->type;
-
- DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
- port_name(port), pipe_name(pipe));
-
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
- switch (intel_dp->lane_count) {
- case 1:
- intel_dp->DP |= DDI_PORT_WIDTH_X1;
- break;
- case 2:
- intel_dp->DP |= DDI_PORT_WIDTH_X2;
- break;
- case 4:
- intel_dp->DP |= DDI_PORT_WIDTH_X4;
- break;
- default:
- intel_dp->DP |= DDI_PORT_WIDTH_X4;
- WARN(1, "Unexpected DP lane count %d\n",
- intel_dp->lane_count);
- break;
- }
-
- if (intel_dp->has_audio) {
- DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
- pipe_name(intel_crtc->pipe));
-
- /* write eld */
- DRM_DEBUG_DRIVER("DP audio: write eld information\n");
- intel_write_eld(encoder, adjusted_mode);
- }
-
- intel_dp_init_link_config(intel_dp);
-
- } else if (type == INTEL_OUTPUT_HDMI) {
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-
- if (intel_hdmi->has_audio) {
- /* Proper support for digital audio needs a new logic
- * and a new set of registers, so we leave it for future
- * patch bombing.
- */
- DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
- pipe_name(intel_crtc->pipe));
-
- /* write eld */
- DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
- intel_write_eld(encoder, adjusted_mode);
- }
-
- intel_hdmi->set_infoframes(encoder, adjusted_mode);
- }
-}
-
-static struct intel_encoder *
-intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder, *ret = NULL;
- int num_encoders = 0;
-
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
- ret = intel_encoder;
- num_encoders++;
- }
-
- if (num_encoders != 1)
- WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
- intel_crtc->pipe);
-
- BUG_ON(ret == NULL);
- return ret;
-}
-
-void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t val;
-
- switch (intel_crtc->ddi_pll_sel) {
- case PORT_CLK_SEL_SPLL:
- plls->spll_refcount--;
- if (plls->spll_refcount == 0) {
- DRM_DEBUG_KMS("Disabling SPLL\n");
- val = I915_READ(SPLL_CTL);
- WARN_ON(!(val & SPLL_PLL_ENABLE));
- I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
- POSTING_READ(SPLL_CTL);
- }
- break;
- case PORT_CLK_SEL_WRPLL1:
- plls->wrpll1_refcount--;
- if (plls->wrpll1_refcount == 0) {
- DRM_DEBUG_KMS("Disabling WRPLL 1\n");
- val = I915_READ(WRPLL_CTL1);
- WARN_ON(!(val & WRPLL_PLL_ENABLE));
- I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL1);
- }
- break;
- case PORT_CLK_SEL_WRPLL2:
- plls->wrpll2_refcount--;
- if (plls->wrpll2_refcount == 0) {
- DRM_DEBUG_KMS("Disabling WRPLL 2\n");
- val = I915_READ(WRPLL_CTL2);
- WARN_ON(!(val & WRPLL_PLL_ENABLE));
- I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL2);
- }
- break;
- }
-
- WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
- WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
- WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
-
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
-}
+ int p, n2, r2;
+ u32 temp, i;
-static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
-{
- u32 i;
+ /* On Haswell, we need to enable the clocks and prepare DDI function to
+ * work in HDMI mode for this pipe.
+ */
+ DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
- if (clock <= wrpll_tmds_clock_table[i].clock)
+ if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
break;
if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
i--;
- *p = wrpll_tmds_clock_table[i].p;
- *n2 = wrpll_tmds_clock_table[i].n2;
- *r2 = wrpll_tmds_clock_table[i].r2;
-
- if (wrpll_tmds_clock_table[i].clock != clock)
- DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n",
- wrpll_tmds_clock_table[i].clock, clock);
-
- DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
- clock, *p, *n2, *r2);
-}
-
-bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
- int type = intel_encoder->type;
- enum pipe pipe = intel_crtc->pipe;
- uint32_t reg, val;
-
- /* TODO: reuse PLLs when possible (compare values) */
-
- intel_ddi_put_crtc_pll(crtc);
-
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- switch (intel_dp->link_bw) {
- case DP_LINK_BW_1_62:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
- break;
- case DP_LINK_BW_2_7:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
- break;
- case DP_LINK_BW_5_4:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
- break;
- default:
- DRM_ERROR("Link bandwidth %d unsupported\n",
- intel_dp->link_bw);
- return false;
- }
-
- /* We don't need to turn any PLL on because we'll use LCPLL. */
- return true;
-
- } else if (type == INTEL_OUTPUT_HDMI) {
- int p, n2, r2;
-
- if (plls->wrpll1_refcount == 0) {
- DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
- pipe_name(pipe));
- plls->wrpll1_refcount++;
- reg = WRPLL_CTL1;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
- } else if (plls->wrpll2_refcount == 0) {
- DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
- pipe_name(pipe));
- plls->wrpll2_refcount++;
- reg = WRPLL_CTL2;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
- } else {
- DRM_ERROR("No WRPLLs available!\n");
- return false;
- }
-
- WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
- "WRPLL already enabled\n");
-
- intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
-
- val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
- WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p);
+ p = wrpll_tmds_clock_table[i].p;
+ n2 = wrpll_tmds_clock_table[i].n2;
+ r2 = wrpll_tmds_clock_table[i].r2;
- } else if (type == INTEL_OUTPUT_ANALOG) {
- if (plls->spll_refcount == 0) {
- DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
- pipe_name(pipe));
- plls->spll_refcount++;
- reg = SPLL_CTL;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
- }
+ if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
+ DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
+ wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
- WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
- "SPLL already enabled\n");
+ DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+ crtc->mode.clock, p, n2, r2);
- val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+ /* Enable LCPLL if disabled */
+ temp = I915_READ(LCPLL_CTL);
+ if (temp & LCPLL_PLL_DISABLE)
+ I915_WRITE(LCPLL_CTL,
+ temp & ~LCPLL_PLL_DISABLE);
- } else {
- WARN(1, "Invalid DDI encoder type %d\n", type);
- return false;
- }
+ /* Configure WR PLL 1, program the correct divider values for
+ * the desired frequency and wait for warmup */
+ I915_WRITE(WRPLL_CTL1,
+ WRPLL_PLL_ENABLE |
+ WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) |
+ WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p));
- I915_WRITE(reg, val);
udelay(20);
- return true;
-}
+ /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
+ * this port for connection.
+ */
+ I915_WRITE(PORT_CLK_SEL(port),
+ PORT_CLK_SEL_WRPLL1);
+ I915_WRITE(PIPE_CLK_SEL(pipe),
+ PIPE_CLK_SEL_PORT(port));
-void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
- int type = intel_encoder->type;
- uint32_t temp;
+ udelay(20);
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ if (intel_hdmi->has_audio) {
+ /* Proper support for digital audio needs a new logic and a new set
+ * of registers, so we leave it for future patch bombing.
+ */
+ DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
+ pipe_name(intel_crtc->pipe));
- temp = TRANS_MSA_SYNC_CLK;
- switch (intel_crtc->bpp) {
- case 18:
- temp |= TRANS_MSA_6_BPC;
- break;
- case 24:
- temp |= TRANS_MSA_8_BPC;
- break;
- case 30:
- temp |= TRANS_MSA_10_BPC;
- break;
- case 36:
- temp |= TRANS_MSA_12_BPC;
- break;
- default:
- temp |= TRANS_MSA_8_BPC;
- WARN(1, "%d bpp unsupported by DDI function\n",
- intel_crtc->bpp);
- }
- I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
+ /* write eld */
+ DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
}
-}
-void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
- int type = intel_encoder->type;
- uint32_t temp;
-
- /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
- temp = TRANS_DDI_FUNC_ENABLE;
- temp |= TRANS_DDI_SELECT_PORT(port);
+ /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+ temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
switch (intel_crtc->bpp) {
case 18:
- temp |= TRANS_DDI_BPC_6;
+ temp |= PIPE_DDI_BPC_6;
break;
case 24:
- temp |= TRANS_DDI_BPC_8;
+ temp |= PIPE_DDI_BPC_8;
break;
case 30:
- temp |= TRANS_DDI_BPC_10;
+ temp |= PIPE_DDI_BPC_10;
break;
case 36:
- temp |= TRANS_DDI_BPC_12;
+ temp |= PIPE_DDI_BPC_12;
break;
default:
- WARN(1, "%d bpp unsupported by transcoder DDI function\n",
+ WARN(1, "%d bpp unsupported by pipe DDI function\n",
intel_crtc->bpp);
}
- if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
- temp |= TRANS_DDI_PVSYNC;
- if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
- temp |= TRANS_DDI_PHSYNC;
-
- if (cpu_transcoder == TRANSCODER_EDP) {
- switch (pipe) {
- case PIPE_A:
- temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
- break;
- case PIPE_B:
- temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
- break;
- case PIPE_C:
- temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
- break;
- default:
- BUG();
- break;
- }
- }
-
- if (type == INTEL_OUTPUT_HDMI) {
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-
- if (intel_hdmi->has_hdmi_sink)
- temp |= TRANS_DDI_MODE_SELECT_HDMI;
- else
- temp |= TRANS_DDI_MODE_SELECT_DVI;
-
- } else if (type == INTEL_OUTPUT_ANALOG) {
- temp |= TRANS_DDI_MODE_SELECT_FDI;
- temp |= (intel_crtc->fdi_lanes - 1) << 1;
-
- } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
- type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- temp |= TRANS_DDI_MODE_SELECT_DP_SST;
-
- switch (intel_dp->lane_count) {
- case 1:
- temp |= TRANS_DDI_PORT_WIDTH_X1;
- break;
- case 2:
- temp |= TRANS_DDI_PORT_WIDTH_X2;
- break;
- case 4:
- temp |= TRANS_DDI_PORT_WIDTH_X4;
- break;
- default:
- temp |= TRANS_DDI_PORT_WIDTH_X4;
- WARN(1, "Unsupported lane count %d\n",
- intel_dp->lane_count);
- }
-
- } else {
- WARN(1, "Invalid encoder type %d for pipe %d\n",
- intel_encoder->type, pipe);
- }
-
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
-}
-
-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
-{
- uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
- uint32_t val = I915_READ(reg);
-
- val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
- val |= TRANS_DDI_PORT_NONE;
- I915_WRITE(reg, val);
-}
-
-bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
-{
- struct drm_device *dev = intel_connector->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = intel_connector->encoder;
- int type = intel_connector->base.connector_type;
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
- enum pipe pipe = 0;
- enum transcoder cpu_transcoder;
- uint32_t tmp;
-
- if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
- return false;
-
- if (port == PORT_A)
- cpu_transcoder = TRANSCODER_EDP;
+ if (intel_hdmi->has_hdmi_sink)
+ temp |= PIPE_DDI_MODE_SELECT_HDMI;
else
- cpu_transcoder = pipe;
+ temp |= PIPE_DDI_MODE_SELECT_DVI;
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= PIPE_DDI_PVSYNC;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= PIPE_DDI_PHSYNC;
- switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
- case TRANS_DDI_MODE_SELECT_HDMI:
- case TRANS_DDI_MODE_SELECT_DVI:
- return (type == DRM_MODE_CONNECTOR_HDMIA);
+ I915_WRITE(DDI_FUNC_CTL(pipe), temp);
- case TRANS_DDI_MODE_SELECT_DP_SST:
- if (type == DRM_MODE_CONNECTOR_eDP)
- return true;
- case TRANS_DDI_MODE_SELECT_DP_MST:
- return (type == DRM_MODE_CONNECTOR_DisplayPort);
-
- case TRANS_DDI_MODE_SELECT_FDI:
- return (type == DRM_MODE_CONNECTOR_VGA);
-
- default:
- return false;
- }
+ intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -1070,432 +762,58 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum port port = intel_ddi_get_encoder_port(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 tmp;
int i;
- tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
return false;
- if (port == PORT_A) {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+ for_each_pipe(i) {
+ tmp = I915_READ(DDI_FUNC_CTL(i));
- switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
- case TRANS_DDI_EDP_INPUT_A_ON:
- case TRANS_DDI_EDP_INPUT_A_ONOFF:
- *pipe = PIPE_A;
- break;
- case TRANS_DDI_EDP_INPUT_B_ONOFF:
- *pipe = PIPE_B;
- break;
- case TRANS_DDI_EDP_INPUT_C_ONOFF:
- *pipe = PIPE_C;
- break;
- }
-
- return true;
- } else {
- for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
-
- if ((tmp & TRANS_DDI_PORT_MASK)
- == TRANS_DDI_SELECT_PORT(port)) {
- *pipe = i;
- return true;
- }
+ if ((tmp & PIPE_DDI_PORT_MASK)
+ == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
+ *pipe = i;
+ return true;
}
}
- DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
+ DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
return true;
}
-static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- uint32_t temp, ret;
- enum port port;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
- int i;
-
- if (cpu_transcoder == TRANSCODER_EDP) {
- port = PORT_A;
- } else {
- temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- temp &= TRANS_DDI_PORT_MASK;
-
- for (i = PORT_B; i <= PORT_E; i++)
- if (temp == TRANS_DDI_SELECT_PORT(i))
- port = i;
- }
-
- ret = I915_READ(PORT_CLK_SEL(port));
-
- DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n",
- pipe_name(pipe), port_name(port), ret);
-
- return ret;
-}
-
-void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe pipe;
- struct intel_crtc *intel_crtc;
-
- for_each_pipe(pipe) {
- intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
- if (!intel_crtc->active)
- continue;
-
- intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
- pipe);
-
- switch (intel_crtc->ddi_pll_sel) {
- case PORT_CLK_SEL_SPLL:
- dev_priv->ddi_plls.spll_refcount++;
- break;
- case PORT_CLK_SEL_WRPLL1:
- dev_priv->ddi_plls.wrpll1_refcount++;
- break;
- case PORT_CLK_SEL_WRPLL2:
- dev_priv->ddi_plls.wrpll2_refcount++;
- break;
- }
- }
-}
-
-void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
-{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
-
- if (cpu_transcoder != TRANSCODER_EDP)
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_PORT(port));
-}
-
-void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
-{
- struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
-
- if (cpu_transcoder != TRANSCODER_EDP)
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_DISABLED);
-}
-
-static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
-{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_crtc *crtc = encoder->crtc;
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
- int type = intel_encoder->type;
-
- if (type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- ironlake_edp_panel_vdd_on(intel_dp);
- ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, true);
- }
-
- WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
- I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
-
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
- intel_dp_start_link_train(intel_dp);
- intel_dp_complete_link_train(intel_dp);
- }
-}
-
-static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
- enum port port)
-{
- uint32_t reg = DDI_BUF_CTL(port);
- int i;
-
- for (i = 0; i < 8; i++) {
- udelay(1);
- if (I915_READ(reg) & DDI_BUF_IS_IDLE)
- return;
- }
- DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
-}
-
-static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
-{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
- int type = intel_encoder->type;
- uint32_t val;
- bool wait = false;
-
- val = I915_READ(DDI_BUF_CTL(port));
- if (val & DDI_BUF_CTL_ENABLE) {
- val &= ~DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), val);
- wait = true;
- }
-
- val = I915_READ(DP_TP_CTL(port));
- val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(DP_TP_CTL(port), val);
-
- if (wait)
- intel_wait_ddi_buf_idle(dev_priv, port);
-
- if (type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- ironlake_edp_panel_vdd_on(intel_dp);
- ironlake_edp_panel_off(intel_dp);
- }
-
- I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
-}
-
-static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+void intel_enable_ddi(struct intel_encoder *encoder)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
- int type = intel_encoder->type;
-
- if (type == INTEL_OUTPUT_HDMI) {
- /* In HDMI/DVI mode, the port width, and swing/emphasis values
- * are ignored so nothing special needs to be done besides
- * enabling the port.
- */
- I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
- } else if (type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- ironlake_edp_backlight_on(intel_dp);
- }
-}
-
-static void intel_disable_ddi(struct intel_encoder *intel_encoder)
-{
- struct drm_encoder *encoder = &intel_encoder->base;
- int type = intel_encoder->type;
-
- if (type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- ironlake_edp_backlight_off(intel_dp);
- }
-}
-
-int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
-{
- if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
- return 450;
- else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
- LCPLL_CLK_FREQ_450)
- return 450;
- else if (IS_ULT(dev_priv->dev))
- return 338;
- else
- return 540;
-}
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ int port = intel_hdmi->ddi_port;
+ u32 temp;
-void intel_ddi_pll_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t val = I915_READ(LCPLL_CTL);
+ temp = I915_READ(DDI_BUF_CTL(port));
+ temp |= DDI_BUF_CTL_ENABLE;
- /* The LCPLL register should be turned on by the BIOS. For now let's
- * just check its state and print errors in case something is wrong.
- * Don't even try to turn it on.
+ /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
+ * and swing/emphasis values are ignored so nothing special needs
+ * to be done besides enabling the port.
*/
-
- DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
- intel_ddi_get_cdclk_freq(dev_priv));
-
- if (val & LCPLL_CD_SOURCE_FCLK)
- DRM_ERROR("CDCLK source is not LCPLL\n");
-
- if (val & LCPLL_PLL_DISABLE)
- DRM_ERROR("LCPLL is disabled\n");
-}
-
-void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
-{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- enum port port = intel_dig_port->port;
- bool wait;
- uint32_t val;
-
- if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
- val = I915_READ(DDI_BUF_CTL(port));
- if (val & DDI_BUF_CTL_ENABLE) {
- val &= ~DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), val);
- wait = true;
- }
-
- val = I915_READ(DP_TP_CTL(port));
- val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(DP_TP_CTL(port), val);
- POSTING_READ(DP_TP_CTL(port));
-
- if (wait)
- intel_wait_ddi_buf_idle(dev_priv, port);
- }
-
- val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
- DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
- if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
- val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
- I915_WRITE(DP_TP_CTL(port), val);
- POSTING_READ(DP_TP_CTL(port));
-
- intel_dp->DP |= DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
- POSTING_READ(DDI_BUF_CTL(port));
-
- udelay(600);
+ I915_WRITE(DDI_BUF_CTL(port), temp);
}
-void intel_ddi_fdi_disable(struct drm_crtc *crtc)
+void intel_disable_ddi(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- uint32_t val;
-
- intel_ddi_post_disable(intel_encoder);
-
- val = I915_READ(_FDI_RXA_CTL);
- val &= ~FDI_RX_ENABLE;
- I915_WRITE(_FDI_RXA_CTL, val);
-
- val = I915_READ(_FDI_RXA_MISC);
- val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- I915_WRITE(_FDI_RXA_MISC, val);
-
- val = I915_READ(_FDI_RXA_CTL);
- val &= ~FDI_PCDCLK;
- I915_WRITE(_FDI_RXA_CTL, val);
-
- val = I915_READ(_FDI_RXA_CTL);
- val &= ~FDI_RX_PLL_ENABLE;
- I915_WRITE(_FDI_RXA_CTL, val);
-}
-
-static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
-{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
- int type = intel_encoder->type;
-
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
- intel_dp_check_link_status(intel_dp);
-}
-
-static void intel_ddi_destroy(struct drm_encoder *encoder)
-{
- /* HDMI has nothing special to destroy, so we can go with this. */
- intel_dp_encoder_destroy(encoder);
-}
-
-static bool intel_ddi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- int type = intel_encoder->type;
-
- WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n");
-
- if (type == INTEL_OUTPUT_HDMI)
- return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode);
- else
- return intel_dp_mode_fixup(encoder, mode, adjusted_mode);
-}
-
-static const struct drm_encoder_funcs intel_ddi_funcs = {
- .destroy = intel_ddi_destroy,
-};
-
-static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
- .mode_fixup = intel_ddi_mode_fixup,
- .mode_set = intel_ddi_mode_set,
- .disable = intel_encoder_noop,
-};
-
-void intel_ddi_init(struct drm_device *dev, enum port port)
-{
- struct intel_digital_port *intel_dig_port;
- struct intel_encoder *intel_encoder;
- struct drm_encoder *encoder;
- struct intel_connector *hdmi_connector = NULL;
- struct intel_connector *dp_connector = NULL;
-
- intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
- if (!intel_dig_port)
- return;
-
- dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
- if (!dp_connector) {
- kfree(intel_dig_port);
- return;
- }
-
- if (port != PORT_A) {
- hdmi_connector = kzalloc(sizeof(struct intel_connector),
- GFP_KERNEL);
- if (!hdmi_connector) {
- kfree(dp_connector);
- kfree(intel_dig_port);
- return;
- }
- }
-
- intel_encoder = &intel_dig_port->base;
- encoder = &intel_encoder->base;
-
- drm_encoder_init(dev, encoder, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
-
- intel_encoder->enable = intel_enable_ddi;
- intel_encoder->pre_enable = intel_ddi_pre_enable;
- intel_encoder->disable = intel_disable_ddi;
- intel_encoder->post_disable = intel_ddi_post_disable;
- intel_encoder->get_hw_state = intel_ddi_get_hw_state;
-
- intel_dig_port->port = port;
- if (hdmi_connector)
- intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
- else
- intel_dig_port->hdmi.sdvox_reg = 0;
- intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ int port = intel_hdmi->ddi_port;
+ u32 temp;
- intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- intel_encoder->cloneable = false;
- intel_encoder->hot_plug = intel_ddi_hot_plug;
+ temp = I915_READ(DDI_BUF_CTL(port));
+ temp &= ~DDI_BUF_CTL_ENABLE;
- if (hdmi_connector)
- intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
- intel_dp_init_connector(intel_dig_port, dp_connector);
+ I915_WRITE(DDI_BUF_CTL(port), temp);
}
diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c
index 3f7f62d370cb..b426d44a2b05 100644
--- a/trunk/drivers/gpu/drm/i915/intel_display.c
+++ b/trunk/drivers/gpu/drm/i915/intel_display.c
@@ -41,6 +41,8 @@
#include
#include
+#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
+
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -78,16 +80,6 @@ struct intel_limit {
/* FDI */
#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
-int
-intel_pch_rawclk(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- WARN_ON(!HAS_PCH_SPLIT(dev));
-
- return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
-}
-
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
@@ -388,7 +380,7 @@ static const intel_limit_t intel_limits_vlv_dac = {
static const intel_limit_t intel_limits_vlv_hdmi = {
.dot = { .min = 20000, .max = 165000 },
- .vco = { .min = 4000000, .max = 5994000},
+ .vco = { .min = 5994000, .max = 4000000 },
.n = { .min = 1, .max = 7 },
.m = { .min = 60, .max = 300 }, /* guess */
.m1 = { .min = 2, .max = 3 },
@@ -401,10 +393,10 @@ static const intel_limit_t intel_limits_vlv_hdmi = {
};
static const intel_limit_t intel_limits_vlv_dp = {
- .dot = { .min = 25000, .max = 270000 },
- .vco = { .min = 4000000, .max = 6000000 },
+ .dot = { .min = 162000, .max = 270000 },
+ .vco = { .min = 5994000, .max = 4000000 },
.n = { .min = 1, .max = 7 },
- .m = { .min = 22, .max = 450 },
+ .m = { .min = 60, .max = 300 }, /* guess */
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
@@ -539,7 +531,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
limit = &intel_limits_ironlake_single_lvds;
}
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
+ HAS_eDP)
limit = &intel_limits_ironlake_display_port;
else
limit = &intel_limits_ironlake_dac;
@@ -935,15 +927,6 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
-enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- return intel_crtc->cpu_transcoder;
-}
-
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1016,11 +999,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
if (INTEL_INFO(dev)->gen >= 4) {
- int reg = PIPECONF(cpu_transcoder);
+ int reg = PIPECONF(pipe);
/* Wait for the Pipe State to go off */
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
@@ -1122,14 +1103,12 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
int reg;
u32 val;
bool cur_state;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
if (IS_HASWELL(dev_priv->dev)) {
/* On Haswell, DDI is used instead of FDI_TX_CTL */
- reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+ reg = DDI_FUNC_CTL(pipe);
val = I915_READ(reg);
- cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
+ cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
} else {
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
@@ -1149,9 +1128,14 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- reg = FDI_RX_CTL(pipe);
- val = I915_READ(reg);
- cur_state = !!(val & FDI_RX_ENABLE);
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
+ return;
+ } else {
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
+ }
WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
@@ -1184,6 +1168,10 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
int reg;
u32 val;
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
+ return;
+ }
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1224,14 +1212,12 @@ void assert_pipe(struct drm_i915_private *dev_priv,
int reg;
u32 val;
bool cur_state;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
/* if we need the pipe A quirk it must be always on */
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
- reg = PIPECONF(cpu_transcoder);
+ reg = PIPECONF(pipe);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
WARN(cur_state != state,
@@ -1568,14 +1554,14 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
}
/**
- * ironlake_enable_pch_pll - enable PCH PLL
+ * intel_enable_pch_pll - enable PCH PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
-static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
+static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll;
@@ -1659,12 +1645,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
pll->on = false;
}
-static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_device *dev = dev_priv->dev;
+ int reg;
+ u32 val, pipeconf_val;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- uint32_t reg, val, pipeconf_val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
@@ -1678,15 +1664,10 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
- if (HAS_PCH_CPT(dev)) {
- /* Workaround: Set the timing override bit before enabling the
- * pch transcoder. */
- reg = TRANS_CHICKEN2(pipe);
- val = I915_READ(reg);
- val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
- I915_WRITE(reg, val);
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
+ return;
}
-
reg = TRANSCONF(pipe);
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
@@ -1715,42 +1696,11 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
DRM_ERROR("failed to enable transcoder %d\n", pipe);
}
-static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
-{
- u32 val, pipeconf_val;
-
- /* PCH only available on ILK+ */
- BUG_ON(dev_priv->info->gen < 5);
-
- /* FDI must be feeding us bits for PCH ports */
- assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
- assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
-
- /* Workaround: set timing override bit. */
- val = I915_READ(_TRANSA_CHICKEN2);
- val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
- I915_WRITE(_TRANSA_CHICKEN2, val);
-
- val = TRANS_ENABLE;
- pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
-
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
- PIPECONF_INTERLACED_ILK)
- val |= TRANS_INTERLACED;
- else
- val |= TRANS_PROGRESSIVE;
-
- I915_WRITE(TRANSCONF(TRANSCODER_A), val);
- if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
- DRM_ERROR("Failed to enable PCH transcoder\n");
-}
-
-static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_device *dev = dev_priv->dev;
- uint32_t reg, val;
+ int reg;
+ u32 val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -1766,31 +1716,6 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("failed to disable transcoder %d\n", pipe);
-
- if (!HAS_PCH_IBX(dev)) {
- /* Workaround: Clear the timing override chicken bit again. */
- reg = TRANS_CHICKEN2(pipe);
- val = I915_READ(reg);
- val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- I915_WRITE(reg, val);
- }
-}
-
-static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = I915_READ(_TRANSACONF);
- val &= ~TRANS_ENABLE;
- I915_WRITE(_TRANSACONF, val);
- /* wait for PCH transcoder off, transcoder state */
- if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
- DRM_ERROR("Failed to disable PCH transcoder\n");
-
- /* Workaround: clear timing override bit. */
- val = I915_READ(_TRANSA_CHICKEN2);
- val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- I915_WRITE(_TRANSA_CHICKEN2, val);
}
/**
@@ -1810,17 +1735,9 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool pch_port)
{
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
- enum transcoder pch_transcoder;
int reg;
u32 val;
- if (IS_HASWELL(dev_priv->dev))
- pch_transcoder = TRANSCODER_A;
- else
- pch_transcoder = pipe;
-
/*
* A pipe without a PLL won't actually be able to drive bits from
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
@@ -1831,13 +1748,13 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
else {
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
- assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
- assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
+ assert_fdi_rx_pll_enabled(dev_priv, pipe);
+ assert_fdi_tx_pll_enabled(dev_priv, pipe);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
- reg = PIPECONF(cpu_transcoder);
+ reg = PIPECONF(pipe);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE)
return;
@@ -1861,8 +1778,6 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
int reg;
u32 val;
@@ -1876,7 +1791,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
- reg = PIPECONF(cpu_transcoder);
+ reg = PIPECONF(pipe);
val = I915_READ(reg);
if ((val & PIPECONF_ENABLE) == 0)
return;
@@ -1892,10 +1807,8 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
- if (dev_priv->info->gen >= 4)
- I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
- else
- I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
+ I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
+ I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
}
/**
@@ -2013,9 +1926,9 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
-unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
- unsigned int bpp,
- unsigned int pitch)
+static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y,
+ unsigned int bpp,
+ unsigned int pitch)
{
int tile_rows, tiles;
@@ -2056,38 +1969,24 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (fb->pixel_format) {
- case DRM_FORMAT_C8:
+ switch (fb->bits_per_pixel) {
+ case 8:
dspcntr |= DISPPLANE_8BPP;
break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_ARGB1555:
- dspcntr |= DISPPLANE_BGRX555;
- break;
- case DRM_FORMAT_RGB565:
- dspcntr |= DISPPLANE_BGRX565;
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- dspcntr |= DISPPLANE_BGRX888;
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- dspcntr |= DISPPLANE_RGBX888;
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- dspcntr |= DISPPLANE_BGRX101010;
+ case 16:
+ if (fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
break;
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ABGR2101010:
- dspcntr |= DISPPLANE_RGBX101010;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
- DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
+ DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
return -EINVAL;
}
-
if (INTEL_INFO(dev)->gen >= 4) {
if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
@@ -2101,9 +2000,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ gen4_compute_dspaddr_offset_xtiled(&x, &y,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
} else {
intel_crtc->dspaddr_offset = linear_offset;
@@ -2154,31 +2053,27 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (fb->pixel_format) {
- case DRM_FORMAT_C8:
+ switch (fb->bits_per_pixel) {
+ case 8:
dspcntr |= DISPPLANE_8BPP;
break;
- case DRM_FORMAT_RGB565:
- dspcntr |= DISPPLANE_BGRX565;
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- dspcntr |= DISPPLANE_BGRX888;
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- dspcntr |= DISPPLANE_RGBX888;
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- dspcntr |= DISPPLANE_BGRX101010;
+ case 16:
+ if (fb->depth != 16)
+ return -EINVAL;
+
+ dspcntr |= DISPPLANE_16BPP;
break;
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ABGR2101010:
- dspcntr |= DISPPLANE_RGBX101010;
+ case 24:
+ case 32:
+ if (fb->depth == 24)
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ else if (fb->depth == 30)
+ dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
+ else
+ return -EINVAL;
break;
default:
- DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
+ DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
return -EINVAL;
}
@@ -2194,9 +2089,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
intel_crtc->dspaddr_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ gen4_compute_dspaddr_offset_xtiled(&x, &y,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
@@ -2204,12 +2099,8 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
obj->gtt_offset + intel_crtc->dspaddr_offset);
- if (IS_HASWELL(dev)) {
- I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
- } else {
- I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE(DSPLINOFF(plane), linear_offset);
- }
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
POSTING_READ(reg);
return 0;
@@ -2257,39 +2148,13 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
return ret;
}
-static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_master_private *master_priv;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- if (!dev->primary->master)
- return;
-
- master_priv = dev->primary->master->driver_priv;
- if (!master_priv->sarea_priv)
- return;
-
- switch (intel_crtc->pipe) {
- case 0:
- master_priv->sarea_priv->pipeA_x = x;
- master_priv->sarea_priv->pipeA_y = y;
- break;
- case 1:
- master_priv->sarea_priv->pipeB_x = x;
- master_priv->sarea_priv->pipeB_y = y;
- break;
- default:
- break;
- }
-}
-
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_framebuffer *old_fb;
int ret;
@@ -2341,7 +2206,20 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
- intel_crtc_update_sarea_pos(crtc, x, y);
+ if (!dev->primary->master)
+ return 0;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (!master_priv->sarea_priv)
+ return 0;
+
+ if (intel_crtc->pipe) {
+ master_priv->sarea_priv->pipeB_x = x;
+ master_priv->sarea_priv->pipeB_y = y;
+ } else {
+ master_priv->sarea_priv->pipeA_x = x;
+ master_priv->sarea_priv->pipeA_y = y;
+ }
return 0;
}
@@ -2436,29 +2314,6 @@ static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
POSTING_READ(SOUTH_CHICKEN1);
}
-static void ivb_modeset_global_resources(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *pipe_B_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
- struct intel_crtc *pipe_C_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
- uint32_t temp;
-
- /* When everything is off disable fdi C so that we could enable fdi B
- * with all lanes. XXX: This misses the case where a pipe is not using
- * any pch resources and so doesn't need any fdi lanes. */
- if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
-
- temp = I915_READ(SOUTH_CHICKEN1);
- temp &= ~FDI_BC_BIFURCATION_SELECT;
- DRM_DEBUG_KMS("disabling fdi C rx\n");
- I915_WRITE(SOUTH_CHICKEN1, temp);
- }
-}
-
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
@@ -2502,9 +2357,11 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
- FDI_RX_PHASE_SYNC_POINTER_EN);
+ if (HAS_PCH_IBX(dev)) {
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+ FDI_RX_PHASE_SYNC_POINTER_EN);
+ }
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
@@ -2593,9 +2450,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
- I915_WRITE(FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
@@ -2610,7 +2464,8 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- cpt_phase_pointer_enable(dev, pipe);
+ if (HAS_PCH_CPT(dev))
+ cpt_phase_pointer_enable(dev, pipe);
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
@@ -2715,9 +2570,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
- I915_READ(FDI_RX_IIR(pipe)));
-
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2730,9 +2582,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
- I915_WRITE(FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
@@ -2744,7 +2593,8 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- cpt_phase_pointer_enable(dev, pipe);
+ if (HAS_PCH_CPT(dev))
+ cpt_phase_pointer_enable(dev, pipe);
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
@@ -2763,7 +2613,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
if (temp & FDI_RX_BIT_LOCK ||
(I915_READ(reg) & FDI_RX_BIT_LOCK)) {
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
break;
}
}
@@ -2804,7 +2654,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
if (temp & FDI_RX_SYMBOL_LOCK) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
}
@@ -2821,6 +2671,9 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
int pipe = intel_crtc->pipe;
u32 reg, temp;
+ /* Write the TU size bits so error detection works */
+ I915_WRITE(FDI_RX_TUSIZE1(pipe),
+ I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
@@ -2921,6 +2774,9 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev)) {
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_EN));
} else if (HAS_PCH_CPT(dev)) {
cpt_phase_pointer_disable(dev, pipe);
}
@@ -2983,7 +2839,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
}
-static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
+static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *intel_encoder;
@@ -2993,6 +2849,23 @@ static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
* must be driven by its own crtc; no sharing is possible.
*/
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+
+ /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
+ * CPU handles all others */
+ if (IS_HASWELL(dev)) {
+ /* It is still unclear how this will work on PPT, so throw up a warning */
+ WARN_ON(!HAS_PCH_LPT(dev));
+
+ if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
+ DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
+ return true;
+ } else {
+ DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
+ intel_encoder->type);
+ return false;
+ }
+ }
+
switch (intel_encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&intel_encoder->base))
@@ -3004,11 +2877,6 @@ static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
return true;
}
-static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
-{
- return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
-}
-
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(struct drm_crtc *crtc)
{
@@ -3118,24 +2986,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
assert_transcoder_disabled(dev_priv, pipe);
- /* Write the TU size bits before fdi link training, so that error
- * detection works. */
- I915_WRITE(FDI_RX_TUSIZE1(pipe),
- I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
-
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
- /* XXX: pch pll's can be enabled any time before we enable the PCH
- * transcoder, and we actually should do this to not upset any PCH
- * transcoder that already use the clock when we share it.
- *
- * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
- * unconditionally resets the pll - we need that to have the right LVDS
- * enable sequence. */
- ironlake_enable_pch_pll(intel_crtc);
+ intel_enable_pch_pll(intel_crtc);
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_LPT(dev)) {
+ DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
+ lpt_program_iclkip(crtc);
+ } else if (HAS_PCH_CPT(dev)) {
u32 sel;
temp = I915_READ(PCH_DPLL_SEL);
@@ -3172,7 +3031,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
- intel_fdi_normal_train(crtc);
+ if (!IS_HASWELL(dev))
+ intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
@@ -3204,37 +3064,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
temp |= TRANS_DP_PORT_SEL_D;
break;
default:
- BUG();
+ DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+ temp |= TRANS_DP_PORT_SEL_B;
+ break;
}
I915_WRITE(reg, temp);
}
- ironlake_enable_pch_transcoder(dev_priv, pipe);
-}
-
-static void lpt_pch_enable(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
-
- assert_transcoder_disabled(dev_priv, TRANSCODER_A);
-
- lpt_program_iclkip(crtc);
-
- /* Set transcoder timing. */
- I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
- I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
- I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder)));
-
- I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
- I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
- I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder)));
- I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
-
- lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
+ intel_enable_transcoder(dev_priv, pipe);
}
static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
@@ -3327,12 +3165,16 @@ static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u3
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int dslreg = PIPEDSL(pipe);
+ int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
u32 temp;
temp = I915_READ(dslreg);
udelay(500);
if (wait_for(I915_READ(dslreg) != temp, 5)) {
+ /* Without this, mode sets may fail silently on FDI */
+ I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
+ udelay(250);
+ I915_WRITE(tc2reg, 0);
if (wait_for(I915_READ(dslreg) != temp, 5))
DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
}
@@ -3363,12 +3205,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
- is_pch_port = ironlake_crtc_driving_pch(crtc);
+ is_pch_port = intel_crtc_driving_pch(crtc);
if (is_pch_port) {
- /* Note: FDI PLL enabling _must_ be done before we enable the
- * cpu pipes, hence this is separate from all the other fdi/pch
- * enabling. */
ironlake_fdi_pll_enable(intel_crtc);
} else {
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -3381,17 +3220,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
- if (IS_IVYBRIDGE(dev))
- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
- PF_PIPE_SEL_IVB(pipe));
- else
- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
@@ -3431,83 +3265,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
-static void haswell_crtc_enable(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *encoder;
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- bool is_pch_port;
-
- WARN_ON(!crtc->enabled);
-
- if (intel_crtc->active)
- return;
-
- intel_crtc->active = true;
- intel_update_watermarks(dev);
-
- is_pch_port = haswell_crtc_driving_pch(crtc);
-
- if (is_pch_port)
- dev_priv->display.fdi_link_train(crtc);
-
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->pre_enable)
- encoder->pre_enable(encoder);
-
- intel_ddi_enable_pipe_clock(intel_crtc);
-
- /* Enable panel fitting for eDP */
- if (dev_priv->pch_pf_size &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
- /* Force use of hard-coded filter coefficients
- * as some pre-programmed values are broken,
- * e.g. x201.
- */
- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
- PF_PIPE_SEL_IVB(pipe));
- I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
- I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
- }
-
- /*
- * On ILK+ LUT must be loaded before the pipe is running but with
- * clocks enabled
- */
- intel_crtc_load_lut(crtc);
-
- intel_ddi_set_pipe_settings(crtc);
- intel_ddi_enable_pipe_func(crtc);
-
- intel_enable_pipe(dev_priv, pipe, is_pch_port);
- intel_enable_plane(dev_priv, plane, pipe);
-
- if (is_pch_port)
- lpt_pch_enable(crtc);
-
- mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
- mutex_unlock(&dev->struct_mutex);
-
- intel_crtc_update_cursor(crtc, true);
-
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->enable(encoder);
-
- /*
- * There seems to be a race in PCH platform hw (at least on some
- * outputs) where an enabled pipe still completes any pageflip right
- * away (as if the pipe is off) instead of waiting for vblank. As soon
- * as the first vblank happend, everything works as expected. Hence just
- * wait for one vblank before returning to avoid strange things
- * happening.
- */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-}
-
static void ironlake_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3546,7 +3303,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
ironlake_fdi_disable(crtc);
- ironlake_disable_pch_transcoder(dev_priv, pipe);
+ intel_disable_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
@@ -3588,78 +3345,12 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
}
-static void haswell_crtc_disable(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *encoder;
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
- bool is_pch_port;
-
- if (!intel_crtc->active)
- return;
-
- is_pch_port = haswell_crtc_driving_pch(crtc);
-
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->disable(encoder);
-
- intel_crtc_wait_for_pending_flips(crtc);
- drm_vblank_off(dev, pipe);
- intel_crtc_update_cursor(crtc, false);
-
- intel_disable_plane(dev_priv, plane, pipe);
-
- if (dev_priv->cfb_plane == plane)
- intel_disable_fbc(dev);
-
- intel_disable_pipe(dev_priv, pipe);
-
- intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
-
- /* Disable PF */
- I915_WRITE(PF_CTL(pipe), 0);
- I915_WRITE(PF_WIN_SZ(pipe), 0);
-
- intel_ddi_disable_pipe_clock(intel_crtc);
-
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->post_disable)
- encoder->post_disable(encoder);
-
- if (is_pch_port) {
- lpt_disable_pch_transcoder(dev_priv);
- intel_ddi_fdi_disable(crtc);
- }
-
- intel_crtc->active = false;
- intel_update_watermarks(dev);
-
- mutex_lock(&dev->struct_mutex);
- intel_update_fbc(dev);
- mutex_unlock(&dev->struct_mutex);
-}
-
-static void ironlake_crtc_off(struct drm_crtc *crtc)
+static void ironlake_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_put_pch_pll(intel_crtc);
}
-static void haswell_crtc_off(struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
- * start using it. */
- intel_crtc->cpu_transcoder = intel_crtc->pipe;
-
- intel_ddi_put_crtc_pll(crtc);
-}
-
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
@@ -4150,6 +3841,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
}
}
+ if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+ /* Use VBT settings if we have an eDP panel */
+ unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+
+ if (edp_bpc && edp_bpc < display_bpc) {
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+ display_bpc = edp_bpc;
+ }
+ continue;
+ }
+
/*
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
* through, clamp it down. (Note: >12bpc will be caught below.)
@@ -4359,7 +4061,7 @@ static void vlv_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock, intel_clock_t *reduced_clock,
- int num_connectors)
+ int refclk, int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4367,19 +4069,9 @@ static void vlv_update_pll(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
u32 dpll, mdiv, pdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
- bool is_sdvo;
- u32 temp;
-
- is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
-
- dpll = DPLL_VGA_MODE_DIS;
- dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
- dpll |= DPLL_REFA_CLK_ENABLE_VLV;
- dpll |= DPLL_INTEGRATED_CLOCK_VLV;
+ bool is_hdmi;
- I915_WRITE(DPLL(pipe), dpll);
- POSTING_READ(DPLL(pipe));
+ is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
bestn = clock->n;
bestm1 = clock->m1;
@@ -4387,10 +4079,12 @@ static void vlv_update_pll(struct drm_crtc *crtc,
bestp1 = clock->p1;
bestp2 = clock->p2;
- /*
- * In Valleyview PLL and program lane counter registers are exposed
- * through DPIO interface
- */
+ /* Enable DPIO clock input */
+ dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
+ DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+ I915_WRITE(DPLL(pipe), dpll);
+ POSTING_READ(DPLL(pipe));
+
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
mdiv |= ((bestn << DPIO_N_SHIFT));
@@ -4401,13 +4095,12 @@ static void vlv_update_pll(struct drm_crtc *crtc,
intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
- pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
+ pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) |
(3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
- (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
- (5 << DPIO_CLK_BIAS_CTL_SHIFT);
+ (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT);
intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
- intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
+ intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051);
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll);
@@ -4415,44 +4108,19 @@ static void vlv_update_pll(struct drm_crtc *crtc,
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
- intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
- intel_dp_set_m_n(crtc, mode, adjusted_mode);
-
- I915_WRITE(DPLL(pipe), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
- udelay(150);
+ if (is_hdmi) {
+ u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
- temp = 0;
- if (is_sdvo) {
- temp = intel_mode_get_pixel_multiplier(adjusted_mode);
if (temp > 1)
temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
else
temp = 0;
- }
- I915_WRITE(DPLL_MD(pipe), temp);
- POSTING_READ(DPLL_MD(pipe));
- /* Now program lane control registers */
- if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
- || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
- {
- temp = 0x1000C4;
- if(pipe == 1)
- temp |= (1 << 21);
- intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
- }
- if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
- {
- temp = 0x1000C4;
- if(pipe == 1)
- temp |= (1 << 21);
- intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
+ I915_WRITE(DPLL_MD(pipe), temp);
+ POSTING_READ(DPLL_MD(pipe));
}
+
+ intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */
}
static void i9xx_update_pll(struct drm_crtc *crtc,
@@ -4468,8 +4136,6 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
u32 dpll;
bool is_sdvo;
- i9xx_update_pll_dividers(crtc, clock, reduced_clock);
-
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
@@ -4570,7 +4236,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
static void i8xx_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
- intel_clock_t *clock, intel_clock_t *reduced_clock,
+ intel_clock_t *clock,
int num_connectors)
{
struct drm_device *dev = crtc->dev;
@@ -4579,8 +4245,6 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
u32 dpll;
- i9xx_update_pll_dividers(crtc, clock, reduced_clock);
-
dpll = DPLL_VGA_MODE_DIS;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -4630,64 +4294,6 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
I915_WRITE(DPLL(pipe), dpll);
}
-static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
- uint32_t vsyncshift;
-
- if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- /* the chip adds 2 halflines automatically */
- adjusted_mode->crtc_vtotal -= 1;
- adjusted_mode->crtc_vblank_end -= 1;
- vsyncshift = adjusted_mode->crtc_hsync_start
- - adjusted_mode->crtc_htotal / 2;
- } else {
- vsyncshift = 0;
- }
-
- if (INTEL_INFO(dev)->gen > 3)
- I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
-
- I915_WRITE(HTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(HBLANK(cpu_transcoder),
- (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(HSYNC(cpu_transcoder),
- (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
- I915_WRITE(VTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- I915_WRITE(VBLANK(cpu_transcoder),
- (adjusted_mode->crtc_vblank_start - 1) |
- ((adjusted_mode->crtc_vblank_end - 1) << 16));
- I915_WRITE(VSYNC(cpu_transcoder),
- (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
-
- /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
- * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
- * documented on the DDI_FUNC_CTL register description, EDP Input Select
- * bits. */
- if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
- (pipe == PIPE_B || pipe == PIPE_C))
- I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
-
- /* pipesrc controls the size that is scaled from, which should
- * always be the user's requested size.
- */
- I915_WRITE(PIPESRC(pipe),
- ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
-}
-
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4701,7 +4307,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dspcntr, pipeconf;
+ u32 dspcntr, pipeconf, vsyncshift;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *encoder;
@@ -4765,14 +4371,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (is_sdvo && is_tv)
i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
+ i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
+ &reduced_clock : NULL);
+
if (IS_GEN2(dev))
- i8xx_update_pll(crtc, adjusted_mode, &clock,
- has_reduced_clock ? &reduced_clock : NULL,
- num_connectors);
+ i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
else if (IS_VALLEYVIEW(dev))
- vlv_update_pll(crtc, mode, adjusted_mode, &clock,
- has_reduced_clock ? &reduced_clock : NULL,
- num_connectors);
+ vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL,
+ refclk, num_connectors);
else
i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
has_reduced_clock ? &reduced_clock : NULL,
@@ -4813,14 +4419,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
}
}
- if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
- if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
- pipeconf |= PIPECONF_BPP_6 |
- PIPECONF_ENABLE |
- I965_PIPECONF_ACTIVE;
- }
- }
-
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
@@ -4836,12 +4434,40 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (!IS_GEN2(dev) &&
- adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
- else
+ /* the chip adds 2 halflines automatically */
+ adjusted_mode->crtc_vtotal -= 1;
+ adjusted_mode->crtc_vblank_end -= 1;
+ vsyncshift = adjusted_mode->crtc_hsync_start
+ - adjusted_mode->crtc_htotal/2;
+ } else {
pipeconf |= PIPECONF_PROGRESSIVE;
+ vsyncshift = 0;
+ }
+
+ if (!IS_GEN3(dev))
+ I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
+
+ I915_WRITE(HTOTAL(pipe),
+ (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ I915_WRITE(HBLANK(pipe),
+ (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ I915_WRITE(HSYNC(pipe),
+ (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
- intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
+ I915_WRITE(VTOTAL(pipe),
+ (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ I915_WRITE(VBLANK(pipe),
+ (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ I915_WRITE(VSYNC(pipe),
+ (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
@@ -4850,6 +4476,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
I915_WRITE(DSPPOS(plane), 0);
+ I915_WRITE(PIPESRC(pipe),
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
@@ -5040,8 +4668,8 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
val |= PIPE_12BPC;
break;
default:
- /* Case prevented by intel_choose_pipe_bpp_dither. */
- BUG();
+ val |= PIPE_8BPC;
+ break;
}
val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
@@ -5058,31 +4686,6 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
POSTING_READ(PIPECONF(pipe));
}
-static void haswell_set_pipeconf(struct drm_crtc *crtc,
- struct drm_display_mode *adjusted_mode,
- bool dither)
-{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
- uint32_t val;
-
- val = I915_READ(PIPECONF(cpu_transcoder));
-
- val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
- if (dither)
- val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
-
- val &= ~PIPECONF_INTERLACE_MASK_HSW;
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
- val |= PIPECONF_INTERLACED_ILK;
- else
- val |= PIPECONF_PROGRESSIVE;
-
- I915_WRITE(PIPECONF(cpu_transcoder), val);
- POSTING_READ(PIPECONF(cpu_transcoder));
-}
-
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock,
@@ -5146,115 +4749,74 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
return true;
}
-static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t temp;
-
- temp = I915_READ(SOUTH_CHICKEN1);
- if (temp & FDI_BC_BIFURCATION_SELECT)
- return;
-
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
-
- temp |= FDI_BC_BIFURCATION_SELECT;
- DRM_DEBUG_KMS("enabling fdi C rx\n");
- I915_WRITE(SOUTH_CHICKEN1, temp);
- POSTING_READ(SOUTH_CHICKEN1);
-}
-
-static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *pipe_B_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
-
- DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
- intel_crtc->pipe, intel_crtc->fdi_lanes);
- if (intel_crtc->fdi_lanes > 4) {
- DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
- intel_crtc->pipe, intel_crtc->fdi_lanes);
- /* Clamp lanes to avoid programming the hw with bogus values. */
- intel_crtc->fdi_lanes = 4;
-
- return false;
- }
-
- if (dev_priv->num_pipe == 2)
- return true;
-
- switch (intel_crtc->pipe) {
- case PIPE_A:
- return true;
- case PIPE_B:
- if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
- intel_crtc->fdi_lanes > 2) {
- DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
- intel_crtc->pipe, intel_crtc->fdi_lanes);
- /* Clamp lanes to avoid programming the hw with bogus values. */
- intel_crtc->fdi_lanes = 2;
-
- return false;
- }
-
- if (intel_crtc->fdi_lanes > 2)
- WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
- else
- cpt_enable_fdi_bc_bifurcation(dev);
-
- return true;
- case PIPE_C:
- if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
- if (intel_crtc->fdi_lanes > 2) {
- DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
- intel_crtc->pipe, intel_crtc->fdi_lanes);
- /* Clamp lanes to avoid programming the hw with bogus values. */
- intel_crtc->fdi_lanes = 2;
-
- return false;
- }
- } else {
- DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
- return false;
- }
-
- cpt_enable_fdi_bc_bifurcation(dev);
-
- return true;
- default:
- BUG();
- }
-}
-
-static void ironlake_set_m_n(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
- struct intel_encoder *intel_encoder, *edp_encoder = NULL;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll, fp = 0, fp2 = 0;
+ bool ok, has_reduced_clock = false, is_sdvo = false;
+ bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
+ struct intel_encoder *encoder, *edp_encoder = NULL;
+ int ret;
struct fdi_m_n m_n = {0};
- int target_clock, pixel_multiplier, lane, link_bw;
- bool is_dp = false, is_cpu_edp = false;
+ u32 temp;
+ int target_clock, pixel_multiplier, lane, link_bw, factor;
+ unsigned int pipe_bpp;
+ bool dither;
+ bool is_cpu_edp = false, is_pch_edp = false;
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
- switch (intel_encoder->type) {
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ is_crt = true;
+ break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
is_dp = true;
- if (!intel_encoder_is_pch_edp(&intel_encoder->base))
+ if (intel_encoder_is_pch_edp(&encoder->base))
+ is_pch_edp = true;
+ else
is_cpu_edp = true;
- edp_encoder = intel_encoder;
+ edp_encoder = encoder;
break;
}
+
+ num_connectors++;
+ }
+
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock, &reduced_clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
}
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
lane = 0;
@@ -5281,6 +4843,20 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
else
target_clock = adjusted_mode->clock;
+ /* determine panel color depth */
+ dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp,
+ adjusted_mode);
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
+ pipe_bpp != 36) {
+ WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
+ pipe_bpp);
+ pipe_bpp = 24;
+ }
+ intel_crtc->bpp = pipe_bpp;
+
if (!lane) {
/*
* Account for spread spectrum to avoid
@@ -5298,51 +4874,10 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
&m_n);
- I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
- I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
- I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
-}
-
-static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
- struct drm_display_mode *adjusted_mode,
- intel_clock_t *clock, u32 fp)
-{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder;
- uint32_t dpll;
- int factor, pixel_multiplier, num_connectors = 0;
- bool is_lvds = false, is_sdvo = false, is_tv = false;
- bool is_dp = false, is_cpu_edp = false;
-
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
- switch (intel_encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- case INTEL_OUTPUT_SDVO:
- case INTEL_OUTPUT_HDMI:
- is_sdvo = true;
- if (intel_encoder->needs_tv_clock)
- is_tv = true;
- break;
- case INTEL_OUTPUT_TVOUT:
- is_tv = true;
- break;
- case INTEL_OUTPUT_DISPLAYPORT:
- is_dp = true;
- break;
- case INTEL_OUTPUT_EDP:
- is_dp = true;
- if (!intel_encoder_is_pch_edp(&intel_encoder->base))
- is_cpu_edp = true;
- break;
- }
-
- num_connectors++;
- }
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+ reduced_clock.m2;
/* Enable autotuning of the PLL clock (if permissible) */
factor = 21;
@@ -5354,7 +4889,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
} else if (is_sdvo && is_tv)
factor = 20;
- if (clock->m < factor * clock->n)
+ if (clock.m < factor * clock.n)
fp |= FP_CB_TUNE;
dpll = 0;
@@ -5364,119 +4899,55 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
- pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
if (pixel_multiplier > 1) {
dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
- dpll |= DPLL_DVO_HIGH_SPEED;
- }
- if (is_dp && !is_cpu_edp)
- dpll |= DPLL_DVO_HIGH_SPEED;
-
- /* compute bitmask from p1 value */
- dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- /* also FPA1 */
- dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
-
- switch (clock->p2) {
- case 5:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
- break;
- case 7:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
- break;
- case 10:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
- break;
- case 14:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- break;
- }
-
- if (is_sdvo && is_tv)
- dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (is_tv)
- /* XXX: just matching BIOS for now */
- /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
- dpll |= 3;
- else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
- else
- dpll |= PLL_REF_INPUT_DREFCLK;
-
- return dpll;
-}
-
-static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- int num_connectors = 0;
- intel_clock_t clock, reduced_clock;
- u32 dpll, fp = 0, fp2 = 0;
- bool ok, has_reduced_clock = false;
- bool is_lvds = false, is_dp = false, is_cpu_edp = false;
- struct intel_encoder *encoder;
- u32 temp;
- int ret;
- bool dither, fdi_config_ok;
-
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- case INTEL_OUTPUT_DISPLAYPORT:
- is_dp = true;
- break;
- case INTEL_OUTPUT_EDP:
- is_dp = true;
- if (!intel_encoder_is_pch_edp(&encoder->base))
- is_cpu_edp = true;
- break;
- }
-
- num_connectors++;
+ dpll |= DPLL_DVO_HIGH_SPEED;
}
+ if (is_dp && !is_cpu_edp)
+ dpll |= DPLL_DVO_HIGH_SPEED;
- WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
- "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ /* also FPA1 */
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
- &has_reduced_clock, &reduced_clock);
- if (!ok) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- return -EINVAL;
+ switch (clock.p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
}
- /* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc, true);
-
- /* determine panel color depth */
- dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
- adjusted_mode);
- if (is_lvds && dev_priv->lvds_dither)
- dither = true;
-
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
- reduced_clock.m2;
-
- dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
+ if (is_sdvo && is_tv)
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (is_tv)
+ /* XXX: just matching BIOS for now */
+ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
- /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
- if (!is_cpu_edp) {
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
+ * pre-Haswell/LPT generation */
+ if (HAS_PCH_LPT(dev)) {
+ DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
+ pipe);
+ } else if (!is_cpu_edp) {
struct intel_pch_pll *pll;
pll = intel_get_pch_pll(intel_crtc, dpll, fp);
@@ -5562,13 +5033,47 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
}
- intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* the chip adds 2 halflines automatically */
+ adjusted_mode->crtc_vtotal -= 1;
+ adjusted_mode->crtc_vblank_end -= 1;
+ I915_WRITE(VSYNCSHIFT(pipe),
+ adjusted_mode->crtc_hsync_start
+ - adjusted_mode->crtc_htotal/2);
+ } else {
+ I915_WRITE(VSYNCSHIFT(pipe), 0);
+ }
+
+ I915_WRITE(HTOTAL(pipe),
+ (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ I915_WRITE(HBLANK(pipe),
+ (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ I915_WRITE(HSYNC(pipe),
+ (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+ I915_WRITE(VTOTAL(pipe),
+ (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ I915_WRITE(VBLANK(pipe),
+ (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ I915_WRITE(VSYNC(pipe),
+ (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
- /* Note, this also computes intel_crtc->fdi_lanes which is used below in
- * ironlake_check_fdi_lanes. */
- ironlake_set_m_n(crtc, mode, adjusted_mode);
+ /* pipesrc controls the size that is scaled from, which should
+ * always be the user's requested size.
+ */
+ I915_WRITE(PIPESRC(pipe),
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
+ I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
+ I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
@@ -5587,217 +5092,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
- return fdi_config_ok ? ret : -EINVAL;
-}
-
-static int haswell_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- int num_connectors = 0;
- intel_clock_t clock, reduced_clock;
- u32 dpll = 0, fp = 0, fp2 = 0;
- bool ok, has_reduced_clock = false;
- bool is_lvds = false, is_dp = false, is_cpu_edp = false;
- struct intel_encoder *encoder;
- u32 temp;
- int ret;
- bool dither;
-
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- case INTEL_OUTPUT_DISPLAYPORT:
- is_dp = true;
- break;
- case INTEL_OUTPUT_EDP:
- is_dp = true;
- if (!intel_encoder_is_pch_edp(&encoder->base))
- is_cpu_edp = true;
- break;
- }
-
- num_connectors++;
- }
-
- if (is_cpu_edp)
- intel_crtc->cpu_transcoder = TRANSCODER_EDP;
- else
- intel_crtc->cpu_transcoder = pipe;
-
- /* We are not sure yet this won't happen. */
- WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
- INTEL_PCH_TYPE(dev));
-
- WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
- num_connectors, pipe_name(pipe));
-
- WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
- (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
-
- WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
-
- if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
- return -EINVAL;
-
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
- &has_reduced_clock,
- &reduced_clock);
- if (!ok) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- return -EINVAL;
- }
- }
-
- /* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc, true);
-
- /* determine panel color depth */
- dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
- adjusted_mode);
- if (is_lvds && dev_priv->lvds_dither)
- dither = true;
-
- DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
- drm_mode_debug_printmodeline(mode);
-
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
- reduced_clock.m2;
-
- dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
- fp);
-
- /* CPU eDP is the only output that doesn't need a PCH PLL of its
- * own on pre-Haswell/LPT generation */
- if (!is_cpu_edp) {
- struct intel_pch_pll *pll;
-
- pll = intel_get_pch_pll(intel_crtc, dpll, fp);
- if (pll == NULL) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
- pipe);
- return -EINVAL;
- }
- } else
- intel_put_pch_pll(intel_crtc);
-
- /* The LVDS pin pair needs to be on before the DPLLs are
- * enabled. This is an exception to the general rule that
- * mode_set doesn't turn things on.
- */
- if (is_lvds) {
- temp = I915_READ(PCH_LVDS);
- temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev)) {
- temp &= ~PORT_TRANS_SEL_MASK;
- temp |= PORT_TRANS_SEL_CPT(pipe);
- } else {
- if (pipe == 1)
- temp |= LVDS_PIPEB_SELECT;
- else
- temp &= ~LVDS_PIPEB_SELECT;
- }
-
- /* set the corresponsding LVDS_BORDER bit */
- temp |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether
- * we're going to set the DPLLs for dual-channel mode or
- * not.
- */
- if (clock.p2 == 7)
- temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- temp &= ~(LVDS_B0B3_POWER_UP |
- LVDS_CLKB_POWER_UP);
-
- /* It would be nice to set 24 vs 18-bit mode
- * (LVDS_A3_POWER_UP) appropriately here, but we need to
- * look more thoroughly into how panels behave in the
- * two modes.
- */
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- temp |= LVDS_HSYNC_POLARITY;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- temp |= LVDS_VSYNC_POLARITY;
- I915_WRITE(PCH_LVDS, temp);
- }
- }
-
- if (is_dp && !is_cpu_edp) {
- intel_dp_set_m_n(crtc, mode, adjusted_mode);
- } else {
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- /* For non-DP output, clear any trans DP clock recovery
- * setting.*/
- I915_WRITE(TRANSDATA_M1(pipe), 0);
- I915_WRITE(TRANSDATA_N1(pipe), 0);
- I915_WRITE(TRANSDPLINK_M1(pipe), 0);
- I915_WRITE(TRANSDPLINK_N1(pipe), 0);
- }
- }
-
- intel_crtc->lowfreq_avail = false;
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- if (intel_crtc->pch_pll) {
- I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(intel_crtc->pch_pll->pll_reg);
- udelay(150);
-
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
- }
-
- if (intel_crtc->pch_pll) {
- if (is_lvds && has_reduced_clock && i915_powersave) {
- I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
- intel_crtc->lowfreq_avail = true;
- } else {
- I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
- }
- }
- }
-
- intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
-
- if (!is_dp || is_cpu_edp)
- ironlake_set_m_n(crtc, mode, adjusted_mode);
-
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- if (is_cpu_edp)
- ironlake_set_pll_edp(crtc, adjusted_mode->clock);
-
- haswell_set_pipeconf(crtc, adjusted_mode, dither);
-
- /* Set up the display plane register */
- I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
- POSTING_READ(DSPCNTR(plane));
-
- ret = intel_pipe_set_base(crtc, x, y, fb);
-
- intel_update_watermarks(dev);
-
- intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
-
return ret;
}
@@ -5809,8 +5103,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder_helper_funcs *encoder_funcs;
- struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int ret;
@@ -5821,19 +5113,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
x, y, fb);
drm_vblank_post_modeset(dev, pipe);
- if (ret != 0)
- return ret;
-
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
- encoder->base.base.id,
- drm_get_encoder_name(&encoder->base),
- mode->base.id, mode->name);
- encoder_funcs = encoder->base.helper_private;
- encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
- }
-
- return 0;
+ return ret;
}
static bool intel_eld_uptodate(struct drm_connector *connector,
@@ -6469,7 +5749,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
int depth, int bpp)
{
struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_mode_fb_cmd2 mode_cmd;
obj = i915_gem_alloc_object(dev,
intel_framebuffer_size_for_mode(mode, bpp));
@@ -6599,19 +5879,24 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
- return false;
+ goto fail;
}
if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
- return false;
+ goto fail;
}
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
+
return true;
+fail:
+ connector->encoder = NULL;
+ encoder->crtc = NULL;
+ return false;
}
void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -6736,12 +6021,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ int pipe = intel_crtc->pipe;
struct drm_display_mode *mode;
- int htot = I915_READ(HTOTAL(cpu_transcoder));
- int hsync = I915_READ(HSYNC(cpu_transcoder));
- int vtot = I915_READ(VTOTAL(cpu_transcoder));
- int vsync = I915_READ(VSYNC(cpu_transcoder));
+ int htot = I915_READ(HTOTAL(pipe));
+ int hsync = I915_READ(HSYNC(pipe));
+ int vtot = I915_READ(VTOTAL(pipe));
+ int vsync = I915_READ(VSYNC(pipe));
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
@@ -6898,19 +6183,14 @@ static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
- struct drm_device *dev = work->crtc->dev;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&work->dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
- intel_update_fbc(dev);
- mutex_unlock(&dev->struct_mutex);
-
- BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
- atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
-
+ intel_update_fbc(work->dev);
+ mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
@@ -6921,6 +6201,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
struct drm_i915_gem_object *obj;
+ struct drm_pending_vblank_event *e;
+ struct timeval tvbl;
unsigned long flags;
/* Ignore early vblank irqs */
@@ -6936,8 +6218,17 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
intel_crtc->unpin_work = NULL;
- if (work->event)
- drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
+ if (work->event) {
+ e = work->event;
+ e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
+
+ e->event.tv_sec = tvbl.tv_sec;
+ e->event.tv_usec = tvbl.tv_usec;
+
+ list_add_tail(&e->base.link,
+ &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
drm_vblank_put(dev, intel_crtc->pipe);
@@ -6947,9 +6238,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
- wake_up(&dev_priv->pending_flip_queue);
- queue_work(dev_priv->wq, &work->work);
+ wake_up(&dev_priv->pending_flip_queue);
+ schedule_work(&work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
}
@@ -7250,7 +6541,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return -ENOMEM;
work->event = event;
- work->crtc = crtc;
+ work->dev = crtc->dev;
intel_fb = to_intel_framebuffer(crtc->fb);
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
@@ -7275,9 +6566,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
- if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
- flush_workqueue(dev_priv->wq);
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto cleanup;
@@ -7296,7 +6584,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
- atomic_inc(&intel_crtc->unpin_work_count);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
@@ -7311,7 +6598,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_pending:
- atomic_dec(&intel_crtc->unpin_work_count);
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
@@ -7607,7 +6893,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
dev->mode_config.dpms_property;
connector->dpms = DRM_MODE_DPMS_ON;
- drm_object_property_set_value(&connector->base,
+ drm_connector_property_set_value(connector,
dpms_property,
DRM_MODE_DPMS_ON);
@@ -7729,6 +7015,8 @@ bool intel_set_mode(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_encoder *encoder;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
bool ret = true;
@@ -7773,9 +7061,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
- if (dev_priv->display.modeset_global_resources)
- dev_priv->display.modeset_global_resources(dev);
-
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
*/
@@ -7785,6 +7070,18 @@ bool intel_set_mode(struct drm_crtc *crtc,
x, y, fb);
if (!ret)
goto done;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != &intel_crtc->base)
+ continue;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.id, drm_get_encoder_name(encoder),
+ mode->base.id, mode->name);
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+ }
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -8123,12 +7420,6 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
};
-static void intel_cpu_pll_init(struct drm_device *dev)
-{
- if (IS_HASWELL(dev))
- intel_ddi_pll_init(dev);
-}
-
static void intel_pch_pll_init(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -8168,7 +7459,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
- intel_crtc->cpu_transcoder = pipe;
if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
@@ -8261,9 +7551,17 @@ static void intel_setup_outputs(struct drm_device *dev)
I915_WRITE(PFIT_CONTROL, 0);
}
- if (!(IS_HASWELL(dev) &&
- (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
- intel_crt_init(dev);
+ if (HAS_PCH_SPLIT(dev)) {
+ dpd_is_edp = intel_dpd_is_edp(dev);
+
+ if (has_edp_a(dev))
+ intel_dp_init(dev, DP_A, PORT_A);
+
+ if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
+ intel_dp_init(dev, PCH_DP_D, PORT_D);
+ }
+
+ intel_crt_init(dev);
if (IS_HASWELL(dev)) {
int found;
@@ -8286,10 +7584,6 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_D);
} else if (HAS_PCH_SPLIT(dev)) {
int found;
- dpd_is_edp = intel_dpd_is_edp(dev);
-
- if (has_edp_a(dev))
- intel_dp_init(dev, DP_A, PORT_A);
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
@@ -8309,15 +7603,11 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_C, PORT_C);
- if (I915_READ(PCH_DP_D) & DP_DETECTED)
+ if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
int found;
- /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
- if (I915_READ(DP_C) & DP_DETECTED)
- intel_dp_init(dev, DP_C, PORT_C);
-
if (I915_READ(SDVOB) & PORT_DETECTED) {
/* SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, SDVOB, true);
@@ -8330,6 +7620,9 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(SDVOC) & PORT_DETECTED)
intel_hdmi_init(dev, SDVOC, PORT_C);
+ /* Shares lanes with HDMI on SDVOC */
+ if (I915_READ(DP_C) & DP_DETECTED)
+ intel_dp_init(dev, DP_C, PORT_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -8385,8 +7678,6 @@ static void intel_setup_outputs(struct drm_device *dev)
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ironlake_init_pch_refclk(dev);
-
- drm_helper_move_panel_connectors_to_head(dev);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -8427,51 +7718,27 @@ int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- /* FIXME <= Gen4 stride limits are bit unclear */
- if (mode_cmd->pitches[0] > 32768)
- return -EINVAL;
-
- if (obj->tiling_mode != I915_TILING_NONE &&
- mode_cmd->pitches[0] != obj->stride)
- return -EINVAL;
-
- /* Reject formats not supported by any plane early. */
switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_ARGB1555:
- if (INTEL_INFO(dev)->gen > 3)
- return -EINVAL;
- break;
case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ABGR2101010:
- if (INTEL_INFO(dev)->gen < 4)
- return -EINVAL;
+ /* RGB formats are common across chipsets */
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
- if (INTEL_INFO(dev)->gen < 6)
- return -EINVAL;
break;
default:
- DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
+ DRM_DEBUG_KMS("unsupported pixel format %u\n",
+ mode_cmd->pixel_format);
return -EINVAL;
}
- /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
- if (mode_cmd->offsets[0] != 0)
- return -EINVAL;
-
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -8509,13 +7776,7 @@ static void intel_init_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
- if (IS_HASWELL(dev)) {
- dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
- dev_priv->display.crtc_enable = haswell_crtc_enable;
- dev_priv->display.crtc_disable = haswell_crtc_disable;
- dev_priv->display.off = haswell_crtc_off;
- dev_priv->display.update_plane = ironlake_update_plane;
- } else if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -8566,8 +7827,6 @@ static void intel_init_display(struct drm_device *dev)
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
- dev_priv->display.modeset_global_resources =
- ivb_modeset_global_resources;
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = haswell_write_eld;
@@ -8799,7 +8058,6 @@ void intel_modeset_init(struct drm_device *dev)
DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
- intel_cpu_pll_init(dev);
intel_pch_pll_init(dev);
/* Just disable it once at startup */
@@ -8869,7 +8127,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
u32 reg;
/* Clear any frame start delays used for debugging left by the BIOS */
- reg = PIPECONF(crtc->cpu_transcoder);
+ reg = PIPECONF(crtc->pipe);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
/* We need to sanitize the plane -> pipe mapping first because this will
@@ -8997,35 +8255,10 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
struct intel_encoder *encoder;
struct intel_connector *connector;
- if (IS_HASWELL(dev)) {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
-
- if (tmp & TRANS_DDI_FUNC_ENABLE) {
- switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
- case TRANS_DDI_EDP_INPUT_A_ON:
- case TRANS_DDI_EDP_INPUT_A_ONOFF:
- pipe = PIPE_A;
- break;
- case TRANS_DDI_EDP_INPUT_B_ONOFF:
- pipe = PIPE_B;
- break;
- case TRANS_DDI_EDP_INPUT_C_ONOFF:
- pipe = PIPE_C;
- break;
- }
-
- crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- crtc->cpu_transcoder = TRANSCODER_EDP;
-
- DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
- pipe_name(pipe));
- }
- }
-
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
+ tmp = I915_READ(PIPECONF(pipe));
if (tmp & PIPECONF_ENABLE)
crtc->active = true;
else
@@ -9038,9 +8271,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
crtc->active ? "enabled" : "disabled");
}
- if (IS_HASWELL(dev))
- intel_ddi_setup_hw_pll_state(dev);
-
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
pipe = 0;
@@ -9090,8 +8320,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
intel_modeset_update_staged_output_state(dev);
intel_modeset_check_state(dev);
-
- drm_mode_config_reset(dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -9219,7 +8447,6 @@ intel_display_capture_error_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
- enum transcoder cpu_transcoder;
int i;
error = kmalloc(sizeof(*error), GFP_ATOMIC);
@@ -9227,8 +8454,6 @@ intel_display_capture_error_state(struct drm_device *dev)
return NULL;
for_each_pipe(i) {
- cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
-
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
error->cursor[i].base = I915_READ(CURBASE(i));
@@ -9243,14 +8468,14 @@ intel_display_capture_error_state(struct drm_device *dev)
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
- error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
+ error->pipe[i].conf = I915_READ(PIPECONF(i));
error->pipe[i].source = I915_READ(PIPESRC(i));
- error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
- error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
- error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
- error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
- error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
- error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+ error->pipe[i].htotal = I915_READ(HTOTAL(i));
+ error->pipe[i].hblank = I915_READ(HBLANK(i));
+ error->pipe[i].hsync = I915_READ(HSYNC(i));
+ error->pipe[i].vtotal = I915_READ(VTOTAL(i));
+ error->pipe[i].vblank = I915_READ(VBLANK(i));
+ error->pipe[i].vsync = I915_READ(VSYNC(i));
}
return error;
diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c
index 1b63d55318a0..368ed8ef1600 100644
--- a/trunk/drivers/gpu/drm/i915/intel_dp.c
+++ b/trunk/drivers/gpu/drm/i915/intel_dp.c
@@ -36,6 +36,8 @@
#include
#include "i915_drv.h"
+#define DP_RECEIVER_CAP_SIZE 0xf
+#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
/**
@@ -47,9 +49,7 @@
*/
static bool is_edp(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-
- return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
+ return intel_dp->base.type == INTEL_OUTPUT_EDP;
}
/**
@@ -76,16 +76,15 @@ static bool is_cpu_edp(struct intel_dp *intel_dp)
return is_edp(intel_dp) && !is_pch_edp(intel_dp);
}
-static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
+static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-
- return intel_dig_port->base.base.dev;
+ return container_of(encoder, struct intel_dp, base.base);
}
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
{
- return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ return container_of(intel_attached_encoder(connector),
+ struct intel_dp, base);
}
/**
@@ -107,31 +106,48 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
return is_pch_edp(intel_dp);
}
+static void intel_dp_start_link_train(struct intel_dp *intel_dp);
+static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
static void intel_dp_link_down(struct intel_dp *intel_dp);
void
intel_edp_link_config(struct intel_encoder *intel_encoder,
int *lane_num, int *link_bw)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
*lane_num = intel_dp->lane_count;
- *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+ if (intel_dp->link_bw == DP_LINK_BW_1_62)
+ *link_bw = 162000;
+ else if (intel_dp->link_bw == DP_LINK_BW_2_7)
+ *link_bw = 270000;
}
int
intel_edp_target_clock(struct intel_encoder *intel_encoder,
struct drm_display_mode *mode)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
- struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
- if (intel_connector->panel.fixed_mode)
- return intel_connector->panel.fixed_mode->clock;
+ if (intel_dp->panel_fixed_mode)
+ return intel_dp->panel_fixed_mode->clock;
else
return mode->clock;
}
+static int
+intel_dp_max_lane_count(struct intel_dp *intel_dp)
+{
+ int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
+ switch (max_lane_count) {
+ case 1: case 2: case 4:
+ break;
+ default:
+ max_lane_count = 4;
+ }
+ return max_lane_count;
+}
+
static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
@@ -192,7 +208,7 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
bool adjust_mode)
{
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
- int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+ int max_lanes = intel_dp_max_lane_count(intel_dp);
int max_rate, mode_rate;
mode_rate = intel_dp_link_required(mode->clock, 24);
@@ -218,14 +234,12 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- if (is_edp(intel_dp) && fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
+ if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
+ if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
+ if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
return MODE_PANEL;
}
@@ -271,10 +285,6 @@ intel_hrawclk(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t clkcfg;
- /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
- if (IS_VALLEYVIEW(dev))
- return 200;
-
clkcfg = I915_READ(CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
@@ -300,7 +310,7 @@ intel_hrawclk(struct drm_device *dev)
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
@@ -308,7 +318,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
@@ -317,7 +327,7 @@ static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (!is_edp(intel_dp))
@@ -336,8 +346,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *recv, int recv_size)
{
uint32_t output_reg = intel_dp->output_reg;
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -347,29 +356,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
uint32_t aux_clock_divider;
int try, precharge;
- if (IS_HASWELL(dev)) {
- switch (intel_dig_port->port) {
- case PORT_A:
- ch_ctl = DPA_AUX_CH_CTL;
- ch_data = DPA_AUX_CH_DATA1;
- break;
- case PORT_B:
- ch_ctl = PCH_DPB_AUX_CH_CTL;
- ch_data = PCH_DPB_AUX_CH_DATA1;
- break;
- case PORT_C:
- ch_ctl = PCH_DPC_AUX_CH_CTL;
- ch_data = PCH_DPC_AUX_CH_DATA1;
- break;
- case PORT_D:
- ch_ctl = PCH_DPD_AUX_CH_CTL;
- ch_data = PCH_DPD_AUX_CH_DATA1;
- break;
- default:
- BUG();
- }
- }
-
intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
@@ -379,16 +365,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_HASWELL(dev))
- aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
- else if (IS_VALLEYVIEW(dev))
- aux_clock_divider = 100;
- else if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (IS_GEN6(dev) || IS_GEN7(dev))
aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
- aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+ aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
@@ -660,6 +642,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
return -EREMOTEIO;
}
+static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+
static int
intel_dp_i2c_init(struct intel_dp *intel_dp,
struct intel_connector *intel_connector, const char *name)
@@ -685,25 +670,22 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
return ret;
}
-bool
+static bool
intel_dp_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
- int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+ int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
- if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
- intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
- adjusted_mode);
- intel_pch_panel_fitting(dev,
- intel_connector->panel.fitting_mode,
+ if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
+ intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
+ intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
mode, adjusted_mode);
}
@@ -780,23 +762,21 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- struct intel_encoder *intel_encoder;
- struct intel_dp *intel_dp;
+ struct intel_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
struct intel_dp_m_n m_n;
int pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
/*
* Find the lane count in the intel_encoder private
*/
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
- intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_encoder->type == INTEL_OUTPUT_EDP)
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_dp->base.type == INTEL_OUTPUT_EDP)
{
lane_count = intel_dp->lane_count;
break;
@@ -811,46 +791,23 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
- if (IS_HASWELL(dev)) {
- I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
- TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
- I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
- I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
- } else if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(TRANSDATA_M1(pipe),
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
- } else if (IS_VALLEYVIEW(dev)) {
- I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
- I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
- I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
- I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
} else {
I915_WRITE(PIPE_GMCH_DATA_M(pipe),
- TU_SIZE(m_n.tu) | m_n.gmch_m);
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
}
}
-void intel_dp_init_link_config(struct intel_dp *intel_dp)
-{
- memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
- intel_dp->link_configuration[0] = intel_dp->link_bw;
- intel_dp->link_configuration[1] = intel_dp->lane_count;
- intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
- /*
- * Check for DPCD version > 1.1 and enhanced framing support
- */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
- intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- }
-}
-
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -858,7 +815,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_crtc *crtc = encoder->crtc;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/*
@@ -903,12 +860,21 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_write_eld(encoder, adjusted_mode);
}
-
- intel_dp_init_link_config(intel_dp);
+ memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp->link_configuration[0] = intel_dp->link_bw;
+ intel_dp->link_configuration[1] = intel_dp->lane_count;
+ intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
+ /*
+ * Check for DPCD version > 1.1 and enhanced framing support
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+ intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
/* Split out the IBX/CPU vs CPT settings */
- if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -965,7 +931,7 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
@@ -1012,9 +978,9 @@ static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
return control;
}
-void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1053,7 +1019,7 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1075,14 +1041,14 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
{
struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
struct intel_dp, panel_vdd_work);
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_device *dev = intel_dp->base.base.dev;
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
}
-void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
return;
@@ -1105,9 +1071,9 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
}
}
-void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1147,9 +1113,9 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
}
}
-void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
@@ -1172,12 +1138,10 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
ironlake_wait_panel_off(intel_dp);
}
-void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
u32 pp;
if (!is_edp(intel_dp))
@@ -1195,21 +1159,17 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
-
- intel_panel_enable_backlight(dev, pipe);
}
-void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
if (!is_edp(intel_dp))
return;
- intel_panel_disable_backlight(dev);
-
DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_BLC_ENABLE;
@@ -1220,9 +1180,8 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
@@ -1246,9 +1205,8 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
@@ -1270,7 +1228,7 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
}
/* If the sink supports it, try to set the power state appropriately */
-void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
int ret, i;
@@ -1340,11 +1298,10 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
return true;
}
}
-
- DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
- intel_dp->output_reg);
}
+ DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
+
return true;
}
@@ -1439,6 +1396,38 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE);
}
+static uint8_t
+intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static uint8_t
+intel_get_adjust_request_voltage(uint8_t adjust_request[2],
+ int lane)
+{
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ uint8_t l = adjust_request[lane>>1];
+
+ return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static uint8_t
+intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
+ int lane)
+{
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ uint8_t l = adjust_request[lane>>1];
+
+ return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+
#if 0
static char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
@@ -1459,7 +1448,7 @@ static char *link_train_names[] = {
static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_device *dev = intel_dp->base.base.dev;
if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1472,21 +1461,9 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
static uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_device *dev = intel_dp->base.base.dev;
- if (IS_HASWELL(dev)) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- return DP_TRAIN_PRE_EMPHASIS_9_5;
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
- }
- } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
@@ -1517,12 +1494,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
uint8_t v = 0;
uint8_t p = 0;
int lane;
+ uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
uint8_t voltage_max;
uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
- uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+ uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
+ uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
if (this_v > v)
v = this_v;
@@ -1639,38 +1617,52 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
}
}
-/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
-static uint32_t
-intel_dp_signal_levels_hsw(uint8_t train_set)
+static uint8_t
+intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
{
- int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
- switch (signal_levels) {
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_400MV_0DB_HSW;
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_400MV_3_5DB_HSW;
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
- return DDI_BUF_EMP_400MV_6DB_HSW;
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
- return DDI_BUF_EMP_400MV_9_5DB_HSW;
+ int s = (lane & 1) * 4;
+ uint8_t l = link_status[lane>>1];
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_600MV_0DB_HSW;
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_600MV_3_5DB_HSW;
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
- return DDI_BUF_EMP_600MV_6DB_HSW;
+ return (l >> s) & 0xf;
+}
- case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_800MV_0DB_HSW;
- case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_800MV_3_5DB_HSW;
- default:
- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
- "0x%x\n", signal_levels);
- return DDI_BUF_EMP_400MV_0DB_HSW;
+/* Check for clock recovery is done on all channels */
+static bool
+intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+ int lane;
+ uint8_t lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = intel_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+
+/* Check to see if channel eq is done on all channels */
+#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
+ DP_LANE_CHANNEL_EQ_DONE|\
+ DP_LANE_SYMBOL_LOCKED)
+static bool
+intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ uint8_t lane_align;
+ uint8_t lane_status;
+ int lane;
+
+ lane_align = intel_dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ lane_status = intel_get_lane_status(link_status, lane);
+ if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+ return false;
}
+ return true;
}
static bool
@@ -1678,49 +1670,11 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum port port = intel_dig_port->port;
int ret;
- uint32_t temp;
-
- if (IS_HASWELL(dev)) {
- temp = I915_READ(DP_TP_CTL(port));
-
- if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
- temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
- else
- temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
-
- temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
- switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
- case DP_TRAINING_PATTERN_DISABLE:
- temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
- I915_WRITE(DP_TP_CTL(port), temp);
-
- if (wait_for((I915_READ(DP_TP_STATUS(port)) &
- DP_TP_STATUS_IDLE_DONE), 1))
- DRM_ERROR("Timed out waiting for DP idle patterns\n");
- temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
- temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
-
- break;
- case DP_TRAINING_PATTERN_1:
- temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
- break;
- case DP_TRAINING_PATTERN_2:
- temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
- break;
- case DP_TRAINING_PATTERN_3:
- temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
- break;
- }
- I915_WRITE(DP_TP_CTL(port), temp);
-
- } else if (HAS_PCH_CPT(dev) &&
- (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -1780,20 +1734,16 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
}
/* Enable corresponding port and start training pattern 1 */
-void
+static void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
- struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
int i;
uint8_t voltage;
bool clock_recovery = false;
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
- if (IS_HASWELL(dev))
- intel_ddi_prepare_link_retrain(encoder);
-
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
intel_dp->link_configuration,
@@ -1811,11 +1761,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
- if (IS_HASWELL(dev)) {
- signal_levels = intel_dp_signal_levels_hsw(
- intel_dp->train_set[0]);
- DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
- } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1823,24 +1770,23 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
+ DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
- signal_levels);
- /* Set training pattern 1 */
if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE))
break;
+ /* Set training pattern 1 */
- drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
+ udelay(100);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
- if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
clock_recovery = true;
break;
@@ -1879,10 +1825,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
intel_dp->DP = DP;
}
-void
+static void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_device *dev = intel_dp->base.base.dev;
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
@@ -1902,10 +1848,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_HASWELL(dev)) {
- signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
- DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
- } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1922,18 +1865,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP_LINK_SCRAMBLING_DISABLE))
break;
- drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
+ udelay(400);
if (!intel_dp_get_link_status(intel_dp, link_status))
break;
/* Make sure clock is still ok */
- if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
cr_tries++;
continue;
}
- if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
+ if (intel_channel_eq_ok(intel_dp, link_status)) {
channel_eq = true;
break;
}
@@ -1952,38 +1895,16 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
- if (channel_eq)
- DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
-
intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
}
static void
intel_dp_link_down(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
- /*
- * DDI code has a strict mode set sequence and we should try to respect
- * it, otherwise we might hang the machine in many different ways. So we
- * really should be disabling the port only on a complete crtc_disable
- * sequence. This function is just called under two conditions on DDI
- * code:
- * - Link train failed while doing crtc_enable, and on this case we
- * really should respect the mode set sequence and wait for a
- * crtc_disable.
- * - Someone turned the monitor off and intel_dp_check_link_status
- * called us. We don't need to disable the whole port on this case, so
- * when someone turns the monitor on again,
- * intel_ddi_prepare_link_retrain will take care of redoing the link
- * train.
- */
- if (IS_HASWELL(dev))
- return;
-
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
return;
@@ -2002,7 +1923,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
- struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
/* Hardware workaround: leaving our transcoder select
* set to transcoder B while it's off will prevent the
@@ -2103,7 +2024,7 @@ static void
intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
/* NAK by default */
- intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
+ intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
}
/*
@@ -2115,17 +2036,16 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
* 4. Check link status on receipt of hot-plug interrupt
*/
-void
+static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
- if (!intel_encoder->connectors_active)
+ if (!intel_dp->base.connectors_active)
return;
- if (WARN_ON(!intel_encoder->base.crtc))
+ if (WARN_ON(!intel_dp->base.base.crtc))
return;
/* Try to read receiver status if the link appears to be up */
@@ -2154,9 +2074,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
- if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
+ if (!intel_channel_eq_ok(intel_dp, link_status)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
- drm_get_encoder_name(&intel_encoder->base));
+ drm_get_encoder_name(&intel_dp->base.base));
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
}
@@ -2205,12 +2125,11 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum drm_connector_status status;
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
- status = intel_panel_detect(dev);
+ status = intel_panel_detect(intel_dp->base.base.dev);
if (status == connector_status_unknown)
status = connector_status_connected;
return status;
@@ -2222,7 +2141,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
static enum drm_connector_status
g4x_dp_detect(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
@@ -2249,45 +2168,44 @@ g4x_dp_detect(struct intel_dp *intel_dp)
static struct edid *
intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- /* use cached edid if we have one */
- if (intel_connector->edid) {
- struct edid *edid;
- int size;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct edid *edid;
+ int size;
- /* invalid edid */
- if (IS_ERR(intel_connector->edid))
+ if (is_edp(intel_dp)) {
+ if (!intel_dp->edid)
return NULL;
- size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
+ size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
edid = kmalloc(size, GFP_KERNEL);
if (!edid)
return NULL;
- memcpy(edid, intel_connector->edid, size);
+ memcpy(edid, intel_dp->edid, size);
return edid;
}
- return drm_get_edid(connector, adapter);
+ edid = drm_get_edid(connector, adapter);
+ return edid;
}
static int
intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- /* use cached edid if we have one */
- if (intel_connector->edid) {
- /* invalid edid */
- if (IS_ERR(intel_connector->edid))
- return 0;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ int ret;
- return intel_connector_update_modes(connector,
- intel_connector->edid);
+ if (is_edp(intel_dp)) {
+ drm_mode_connector_update_edid_property(connector,
+ intel_dp->edid);
+ ret = drm_add_edid_modes(connector, intel_dp->edid);
+ drm_edid_to_eld(connector,
+ intel_dp->edid);
+ return intel_dp->edid_mode_count;
}
- return intel_ddc_get_modes(connector, adapter);
+ ret = intel_ddc_get_modes(connector, adapter);
+ return ret;
}
@@ -2301,12 +2219,9 @@ static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_device *dev = connector->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
enum drm_connector_status status;
struct edid *edid = NULL;
- char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
intel_dp->has_audio = false;
@@ -2315,9 +2230,10 @@ intel_dp_detect(struct drm_connector *connector, bool force)
else
status = g4x_dp_detect(intel_dp);
- hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
- 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
- DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+ DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
+ intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
+ intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
+ intel_dp->dpcd[6], intel_dp->dpcd[7]);
if (status != connector_status_connected)
return status;
@@ -2334,31 +2250,49 @@ intel_dp_detect(struct drm_connector *connector, bool force)
}
}
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
return connector_status_connected;
}
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_device *dev = connector->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
- if (ret)
+ if (ret) {
+ if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
+ struct drm_display_mode *newmode;
+ list_for_each_entry(newmode, &connector->probed_modes,
+ head) {
+ if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
+ intel_dp->panel_fixed_mode =
+ drm_mode_duplicate(dev, newmode);
+ break;
+ }
+ }
+ }
return ret;
+ }
- /* if eDP has no EDID, fall back to fixed mode */
- if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
- struct drm_display_mode *mode;
- mode = drm_mode_duplicate(dev,
- intel_connector->panel.fixed_mode);
- if (mode) {
+ /* if eDP has no EDID, try to use fixed panel mode from VBT */
+ if (is_edp(intel_dp)) {
+ /* initialize panel mode from VBT if available for eDP */
+ if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
+ intel_dp->panel_fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (intel_dp->panel_fixed_mode) {
+ intel_dp->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ }
+ }
+ if (intel_dp->panel_fixed_mode) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
drm_mode_probed_add(connector, mode);
return 1;
}
@@ -2388,12 +2322,10 @@ intel_dp_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int ret;
- ret = drm_object_property_set_value(&connector->base, property, val);
+ ret = drm_connector_property_set_value(connector, property, val);
if (ret)
return ret;
@@ -2426,27 +2358,11 @@ intel_dp_set_property(struct drm_connector *connector,
goto done;
}
- if (is_edp(intel_dp) &&
- property == connector->dev->mode_config.scaling_mode_property) {
- if (val == DRM_MODE_SCALE_NONE) {
- DRM_DEBUG_KMS("no scaling not supported\n");
- return -EINVAL;
- }
-
- if (intel_connector->panel.fitting_mode == val) {
- /* the eDP scaling property is not changed */
- return 0;
- }
- intel_connector->panel.fitting_mode = val;
-
- goto done;
- }
-
return -EINVAL;
done:
- if (intel_encoder->base.crtc) {
- struct drm_crtc *crtc = intel_encoder->base.crtc;
+ if (intel_dp->base.base.crtc) {
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
@@ -2459,33 +2375,27 @@ intel_dp_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- if (!IS_ERR_OR_NULL(intel_connector->edid))
- kfree(intel_connector->edid);
-
- if (is_edp(intel_dp)) {
+ if (is_edp(intel_dp))
intel_panel_destroy_backlight(dev);
- intel_panel_fini(&intel_connector->panel);
- }
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
-void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
+ kfree(intel_dp->edid);
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
ironlake_panel_vdd_off_sync(intel_dp);
}
- kfree(intel_dig_port);
+ kfree(intel_dp);
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -2515,7 +2425,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
static void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
intel_dp_check_link_status(intel_dp);
}
@@ -2525,14 +2435,13 @@ int
intel_trans_dp_port_sel(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct intel_encoder *intel_encoder;
- struct intel_dp *intel_dp;
+ struct intel_encoder *encoder;
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
- intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_encoder->type == INTEL_OUTPUT_EDP)
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_dp->base.type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
}
@@ -2562,191 +2471,78 @@ bool intel_dpd_is_edp(struct drm_device *dev)
static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
-
- if (is_edp(intel_dp)) {
- drm_mode_create_scaling_mode_property(connector->dev);
- drm_object_attach_property(
- &connector->base,
- connector->dev->mode_config.scaling_mode_property,
- DRM_MODE_SCALE_ASPECT);
- intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
- }
-}
-
-static void
-intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct edp_power_seq cur, vbt, spec, final;
- u32 pp_on, pp_off, pp_div, pp;
-
- /* Workaround: Need to write PP_CONTROL with the unlock key as
- * the very first thing. */
- pp = ironlake_get_pp_control(dev_priv);
- I915_WRITE(PCH_PP_CONTROL, pp);
-
- pp_on = I915_READ(PCH_PP_ON_DELAYS);
- pp_off = I915_READ(PCH_PP_OFF_DELAYS);
- pp_div = I915_READ(PCH_PP_DIVISOR);
-
- /* Pull timing values out of registers */
- cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
- PANEL_POWER_UP_DELAY_SHIFT;
-
- cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
- PANEL_LIGHT_ON_DELAY_SHIFT;
-
- cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
- PANEL_LIGHT_OFF_DELAY_SHIFT;
-
- cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
- PANEL_POWER_DOWN_DELAY_SHIFT;
-
- cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
- PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
-
- DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
-
- vbt = dev_priv->edp.pps;
-
- /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
- * our hw here, which are all in 100usec. */
- spec.t1_t3 = 210 * 10;
- spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
- spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
- spec.t10 = 500 * 10;
- /* This one is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- spec.t11_t12 = (510 + 100) * 10;
-
- DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
-
- /* Use the max of the register settings and vbt. If both are
- * unset, fall back to the spec limits. */
-#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
- spec.field : \
- max(cur.field, vbt.field))
- assign_final(t1_t3);
- assign_final(t8);
- assign_final(t9);
- assign_final(t10);
- assign_final(t11_t12);
-#undef assign_final
-
-#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
- intel_dp->panel_power_up_delay = get_delay(t1_t3);
- intel_dp->backlight_on_delay = get_delay(t8);
- intel_dp->backlight_off_delay = get_delay(t9);
- intel_dp->panel_power_down_delay = get_delay(t10);
- intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
-#undef get_delay
-
- /* And finally store the new values in the power sequencer. */
- pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
- (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
- pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
- (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
- /* Compute the divisor for the pp clock, simply match the Bspec
- * formula. */
- pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
- << PP_REFERENCE_DIVIDER_SHIFT;
- pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000)
- << PANEL_POWER_CYCLE_DELAY_SHIFT);
-
- /* Haswell doesn't have any port selection bits for the panel
- * power sequencer any more. */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
- if (is_cpu_edp(intel_dp))
- pp_on |= PANEL_POWER_PORT_DP_A;
- else
- pp_on |= PANEL_POWER_PORT_DP_D;
- }
-
- I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
- I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
- I915_WRITE(PCH_PP_DIVISOR, pp_div);
-
-
- DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
- intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
- intel_dp->panel_power_cycle_delay);
-
- DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
- intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
-
- DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
- I915_READ(PCH_PP_ON_DELAYS),
- I915_READ(PCH_PP_OFF_DELAYS),
- I915_READ(PCH_PP_DIVISOR));
}
void
-intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector)
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{
- struct drm_connector *connector = &intel_connector->base;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *fixed_mode = NULL;
- enum port port = intel_dig_port->port;
+ struct drm_connector *connector;
+ struct intel_dp *intel_dp;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
const char *name = NULL;
int type;
+ intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
+ if (!intel_dp)
+ return;
+
+ intel_dp->output_reg = output_reg;
+ intel_dp->port = port;
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
- intel_dp->attached_connector = intel_connector;
- if (HAS_PCH_SPLIT(dev) && port == PORT_D)
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dp);
+ return;
+ }
+ intel_encoder = &intel_dp->base;
+
+ if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
if (intel_dpd_is_edp(dev))
intel_dp->is_pch_edp = true;
- /*
- * FIXME : We need to initialize built-in panels before external panels.
- * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
- */
- if (IS_VALLEYVIEW(dev) && port == PORT_C) {
- type = DRM_MODE_CONNECTOR_eDP;
- intel_encoder->type = INTEL_OUTPUT_EDP;
- } else if (port == PORT_A || is_pch_edp(intel_dp)) {
+ if (output_reg == DP_A || is_pch_edp(intel_dp)) {
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else {
- /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
- * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
- * rewrite it.
- */
type = DRM_MODE_CONNECTOR_DisplayPort;
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
}
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD;
- connector->interlace_allowed = true;
- connector->doublescan_allowed = 0;
+
+ intel_encoder->cloneable = false;
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
ironlake_panel_vdd_work);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = 0;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
- if (IS_HASWELL(dev))
- intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
- else
- intel_connector->get_hw_state = intel_connector_get_hw_state;
-
+ intel_encoder->enable = intel_enable_dp;
+ intel_encoder->pre_enable = intel_pre_enable_dp;
+ intel_encoder->disable = intel_disable_dp;
+ intel_encoder->post_disable = intel_post_disable_dp;
+ intel_encoder->get_hw_state = intel_dp_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
/* Set up the DDC bus. */
switch (port) {
@@ -2770,15 +2566,66 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
break;
}
- if (is_edp(intel_dp))
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
+ /* Cache some DPCD data in the eDP case */
+ if (is_edp(intel_dp)) {
+ struct edp_power_seq cur, vbt;
+ u32 pp_on, pp_off, pp_div;
+
+ pp_on = I915_READ(PCH_PP_ON_DELAYS);
+ pp_off = I915_READ(PCH_PP_OFF_DELAYS);
+ pp_div = I915_READ(PCH_PP_DIVISOR);
+
+ if (!pp_on || !pp_off || !pp_div) {
+ DRM_INFO("bad panel power sequencing delays, disabling panel\n");
+ intel_dp_encoder_destroy(&intel_dp->base.base);
+ intel_dp_destroy(&intel_connector->base);
+ return;
+ }
+
+ /* Pull timing values out of registers */
+ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
+
+ cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
+
+ cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+ cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
+
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+
+ DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+ vbt = dev_priv->edp.pps;
+
+ DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+
+#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
+
+ intel_dp->panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->backlight_on_delay = get_delay(t8);
+ intel_dp->backlight_off_delay = get_delay(t9);
+ intel_dp->panel_power_down_delay = get_delay(t10);
+ intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+
+ DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
+
+ DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+ }
intel_dp_i2c_init(intel_dp, intel_connector, name);
- /* Cache DPCD and EDID for edp. */
if (is_edp(intel_dp)) {
bool ret;
- struct drm_display_mode *scan;
struct edid *edid;
ironlake_edp_panel_vdd_on(intel_dp);
@@ -2793,47 +2640,29 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
} else {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
- intel_dp_encoder_destroy(&intel_encoder->base);
- intel_dp_destroy(connector);
+ intel_dp_encoder_destroy(&intel_dp->base.base);
+ intel_dp_destroy(&intel_connector->base);
return;
}
ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
- if (drm_add_edid_modes(connector, edid)) {
- drm_mode_connector_update_edid_property(connector, edid);
- drm_edid_to_eld(connector, edid);
- } else {
- kfree(edid);
- edid = ERR_PTR(-EINVAL);
- }
- } else {
- edid = ERR_PTR(-ENOENT);
- }
- intel_connector->edid = edid;
-
- /* prefer fixed mode from EDID if available */
- list_for_each_entry(scan, &connector->probed_modes, head) {
- if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
- fixed_mode = drm_mode_duplicate(dev, scan);
- break;
- }
+ drm_mode_connector_update_edid_property(connector,
+ edid);
+ intel_dp->edid_mode_count =
+ drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+ intel_dp->edid = edid;
}
-
- /* fallback to VBT if available for eDP */
- if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
- fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (fixed_mode)
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- }
-
ironlake_edp_panel_vdd_off(intel_dp, false);
}
+ intel_encoder->hot_plug = intel_dp_hot_plug;
+
if (is_edp(intel_dp)) {
- intel_panel_init(&intel_connector->panel, fixed_mode);
- intel_panel_setup_backlight(connector);
+ dev_priv->int_edp_connector = connector;
+ intel_panel_setup_backlight(dev);
}
intel_dp_add_properties(intel_dp, connector);
@@ -2847,45 +2676,3 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
-
-void
-intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
-{
- struct intel_digital_port *intel_dig_port;
- struct intel_encoder *intel_encoder;
- struct drm_encoder *encoder;
- struct intel_connector *intel_connector;
-
- intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
- if (!intel_dig_port)
- return;
-
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
- if (!intel_connector) {
- kfree(intel_dig_port);
- return;
- }
-
- intel_encoder = &intel_dig_port->base;
- encoder = &intel_encoder->base;
-
- drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
-
- intel_encoder->enable = intel_enable_dp;
- intel_encoder->pre_enable = intel_pre_enable_dp;
- intel_encoder->disable = intel_disable_dp;
- intel_encoder->post_disable = intel_post_disable_dp;
- intel_encoder->get_hw_state = intel_dp_get_hw_state;
-
- intel_dig_port->port = port;
- intel_dig_port->dp.output_reg = output_reg;
-
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- intel_encoder->cloneable = false;
- intel_encoder->hot_plug = intel_dp_hot_plug;
-
- intel_dp_init_connector(intel_dig_port, intel_connector);
-}
diff --git a/trunk/drivers/gpu/drm/i915/intel_drv.h b/trunk/drivers/gpu/drm/i915/intel_drv.h
index 522061ca0685..fe7142502f43 100644
--- a/trunk/drivers/gpu/drm/i915/intel_drv.h
+++ b/trunk/drivers/gpu/drm/i915/intel_drv.h
@@ -94,7 +94,6 @@
#define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
-#define INTEL_OUTPUT_UNKNOWN 9
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -164,11 +163,6 @@ struct intel_encoder {
int crtc_mask;
};
-struct intel_panel {
- struct drm_display_mode *fixed_mode;
- int fitting_mode;
-};
-
struct intel_connector {
struct drm_connector base;
/*
@@ -185,19 +179,12 @@ struct intel_connector {
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
-
- /* Panel info for eDP and LVDS */
- struct intel_panel panel;
-
- /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
- struct edid *edid;
};
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
enum plane plane;
- enum transcoder cpu_transcoder;
u8 lut_r[256], lut_g[256], lut_b[256];
/*
* Whether the crtc and the connected output pipeline is active. Implies
@@ -211,8 +198,6 @@ struct intel_crtc {
struct intel_unpin_work *unpin_work;
int fdi_lanes;
- atomic_t unpin_work_count;
-
/* Display surface base address adjustement for pageflips. Note that on
* gen4+ this only adjusts up to a tile, offsets within a tile are
* handled in the hw itself (with the TILEOFF register). */
@@ -227,14 +212,12 @@ struct intel_crtc {
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
- uint32_t ddi_pll_sel;
};
struct intel_plane {
struct drm_plane base;
enum pipe pipe;
struct drm_i915_gem_object *obj;
- bool can_scale;
int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024];
void (*update_plane)(struct drm_plane *plane,
@@ -334,8 +317,10 @@ struct dip_infoframe {
} __attribute__((packed));
struct intel_hdmi {
+ struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
+ int ddi_port;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
@@ -346,15 +331,18 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode);
};
+#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_MAX_DOWNSTREAM_PORTS 0x10
#define DP_LINK_CONFIGURATION_SIZE 9
struct intel_dp {
+ struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
enum hdmi_force_audio force_audio;
+ enum port port;
uint32_t color_range;
uint8_t link_bw;
uint8_t lane_count;
@@ -369,16 +357,11 @@ struct intel_dp {
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
+ struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
- struct intel_connector *attached_connector;
-};
-
-struct intel_digital_port {
- struct intel_encoder base;
- enum port port;
- struct intel_dp dp;
- struct intel_hdmi hdmi;
+ struct edid *edid; /* cached EDID for eDP */
+ int edid_mode_count;
};
static inline struct drm_crtc *
@@ -397,7 +380,7 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
struct intel_unpin_work {
struct work_struct work;
- struct drm_crtc *crtc;
+ struct drm_device *dev;
struct drm_i915_gem_object *old_fb_obj;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
@@ -412,8 +395,6 @@ struct intel_fbc_work {
int interval;
};
-int intel_pch_rawclk(struct drm_device *dev);
-
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
@@ -424,12 +405,7 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev,
int sdvox_reg, enum port port);
-extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
@@ -442,27 +418,10 @@ extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
-extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
-extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
-extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
-extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
-extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev);
-extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern int intel_edp_target_clock(struct intel_encoder *,
struct drm_display_mode *mode);
@@ -472,10 +431,6 @@ extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
/* intel_panel.c */
-extern int intel_panel_init(struct intel_panel *panel,
- struct drm_display_mode *fixed_mode);
-extern void intel_panel_fini(struct intel_panel *panel);
-
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
@@ -484,7 +439,7 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
-extern int intel_panel_setup_backlight(struct drm_connector *connector);
+extern int intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe);
extern void intel_panel_disable_backlight(struct drm_device *dev);
@@ -518,31 +473,6 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector
return to_intel_connector(connector)->encoder;
}
-static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
-{
- struct intel_digital_port *intel_dig_port =
- container_of(encoder, struct intel_digital_port, base.base);
- return &intel_dig_port->dp;
-}
-
-static inline struct intel_digital_port *
-enc_to_dig_port(struct drm_encoder *encoder)
-{
- return container_of(encoder, struct intel_digital_port, base.base);
-}
-
-static inline struct intel_digital_port *
-dp_to_dig_port(struct intel_dp *intel_dp)
-{
- return container_of(intel_dp, struct intel_digital_port, dp);
-}
-
-static inline struct intel_digital_port *
-hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
-{
- return container_of(intel_hdmi, struct intel_digital_port, hdmi);
-}
-
extern void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -551,9 +481,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern enum transcoder
-intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
@@ -623,10 +550,6 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
-extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
- unsigned int bpp,
- unsigned int pitch);
-
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
@@ -650,22 +573,12 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
+extern void intel_enable_ddi(struct intel_encoder *encoder);
+extern void intel_disable_ddi(struct intel_encoder *encoder);
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe);
-extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
-extern void intel_ddi_pll_init(struct drm_device *dev);
-extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
-extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder);
-extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
-extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
-extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
-extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
-extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
-extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
-extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
-extern bool
-intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+extern void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
#endif /* __INTEL_DRV_H__ */
diff --git a/trunk/drivers/gpu/drm/i915/intel_hdmi.c b/trunk/drivers/gpu/drm/i915/intel_hdmi.c
index 5c279b48df97..9ba0aaed7ee8 100644
--- a/trunk/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/trunk/drivers/gpu/drm/i915/intel_hdmi.c
@@ -36,15 +36,10 @@
#include
#include "i915_drv.h"
-static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
-{
- return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
-}
-
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
- struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
+ struct drm_device *dev = intel_hdmi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t enabled_bits;
@@ -56,14 +51,13 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
- struct intel_digital_port *intel_dig_port =
- container_of(encoder, struct intel_digital_port, base.base);
- return &intel_dig_port->hdmi;
+ return container_of(encoder, struct intel_hdmi, base.base);
}
static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
{
- return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
+ return container_of(intel_attached_encoder(connector),
+ struct intel_hdmi, base);
}
void intel_dip_infoframe_csum(struct dip_infoframe *frame)
@@ -760,16 +754,16 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return true;
}
static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
{
- struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
+ struct drm_device *dev = intel_hdmi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
@@ -792,9 +786,6 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
- struct intel_digital_port *intel_dig_port =
- hdmi_to_dig_port(intel_hdmi);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
@@ -823,7 +814,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
intel_hdmi->has_audio =
(intel_hdmi->force_audio == HDMI_AUDIO_ON);
- intel_encoder->type = INTEL_OUTPUT_HDMI;
}
return status;
@@ -869,12 +859,10 @@ intel_hdmi_set_property(struct drm_connector *connector,
uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
- struct intel_digital_port *intel_dig_port =
- hdmi_to_dig_port(intel_hdmi);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
- ret = drm_object_property_set_value(&connector->base, property, val);
+ ret = drm_connector_property_set_value(connector, property, val);
if (ret)
return ret;
@@ -910,8 +898,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
return -EINVAL;
done:
- if (intel_dig_port->base.base.crtc) {
- struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ if (intel_hdmi->base.base.crtc) {
+ struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
@@ -926,6 +914,12 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector);
}
+static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
+ .mode_fixup = intel_hdmi_mode_fixup,
+ .mode_set = intel_ddi_mode_set,
+ .disable = intel_encoder_noop,
+};
+
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_hdmi_mode_set,
@@ -957,24 +951,43 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_broadcast_rgb_property(connector);
}
-void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector)
+void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
{
- struct drm_connector *connector = &intel_connector->base;
- struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum port port = intel_dig_port->port;
+ struct drm_connector *connector;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
+ struct intel_hdmi *intel_hdmi;
+ intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
+ if (!intel_hdmi)
+ return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_hdmi);
+ return;
+ }
+
+ intel_encoder = &intel_hdmi->base;
+ drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
+ intel_encoder->cloneable = false;
+ intel_hdmi->ddi_port = port;
switch (port) {
case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
@@ -994,6 +1007,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
BUG();
}
+ intel_hdmi->sdvox_reg = sdvox_reg;
+
if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
@@ -1011,10 +1026,21 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
- if (IS_HASWELL(dev))
- intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
- else
- intel_connector->get_hw_state = intel_connector_get_hw_state;
+ if (IS_HASWELL(dev)) {
+ intel_encoder->enable = intel_enable_ddi;
+ intel_encoder->disable = intel_disable_ddi;
+ intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+ drm_encoder_helper_add(&intel_encoder->base,
+ &intel_hdmi_helper_funcs_hsw);
+ } else {
+ intel_encoder->enable = intel_enable_hdmi;
+ intel_encoder->disable = intel_disable_hdmi;
+ intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+ drm_encoder_helper_add(&intel_encoder->base,
+ &intel_hdmi_helper_funcs);
+ }
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
intel_hdmi_add_properties(intel_hdmi, connector);
@@ -1030,42 +1056,3 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
-
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
-{
- struct intel_digital_port *intel_dig_port;
- struct intel_encoder *intel_encoder;
- struct drm_encoder *encoder;
- struct intel_connector *intel_connector;
-
- intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
- if (!intel_dig_port)
- return;
-
- intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
- if (!intel_connector) {
- kfree(intel_dig_port);
- return;
- }
-
- intel_encoder = &intel_dig_port->base;
- encoder = &intel_encoder->base;
-
- drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
-
- intel_encoder->enable = intel_enable_hdmi;
- intel_encoder->disable = intel_disable_hdmi;
- intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
-
- intel_encoder->type = INTEL_OUTPUT_HDMI;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- intel_encoder->cloneable = false;
-
- intel_dig_port->port = port;
- intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
- intel_dig_port->dp.output_reg = 0;
-
- intel_hdmi_init_connector(intel_dig_port, intel_connector);
-}
diff --git a/trunk/drivers/gpu/drm/i915/intel_i2c.c b/trunk/drivers/gpu/drm/i915/intel_i2c.c
index 3ef5af15b812..c2c6dbc0971c 100644
--- a/trunk/drivers/gpu/drm/i915/intel_i2c.c
+++ b/trunk/drivers/gpu/drm/i915/intel_i2c.c
@@ -432,7 +432,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
- bus->force_bit = 1;
+ bus->force_bit = true;
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
out:
@@ -491,7 +491,7 @@ int intel_setup_gmbus(struct drm_device *dev)
/* gmbus seems to be broken on i830 */
if (IS_I830(dev))
- bus->force_bit = 1;
+ bus->force_bit = true;
intel_gpio_setup(bus, port);
@@ -532,10 +532,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- bus->force_bit += force_bit ? 1 : -1;
- DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
- force_bit ? "en" : "dis", adapter->name,
- bus->force_bit);
+ bus->force_bit = force_bit;
}
void intel_teardown_gmbus(struct drm_device *dev)
diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c
index 81502e8be26b..edba93b3474b 100644
--- a/trunk/drivers/gpu/drm/i915/intel_lvds.c
+++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c
@@ -40,30 +40,28 @@
#include
/* Private structure for the integrated LVDS support */
-struct intel_lvds_connector {
- struct intel_connector base;
-
- struct notifier_block lid_notifier;
-};
-
-struct intel_lvds_encoder {
+struct intel_lvds {
struct intel_encoder base;
+ struct edid *edid;
+
+ int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
bool pfit_dirty;
- struct intel_lvds_connector *attached_connector;
+ struct drm_display_mode *fixed_mode;
};
-static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
+static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
{
- return container_of(encoder, struct intel_lvds_encoder, base.base);
+ return container_of(encoder, struct intel_lvds, base.base);
}
-static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
+static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
{
- return container_of(connector, struct intel_lvds_connector, base.base);
+ return container_of(intel_attached_encoder(connector),
+ struct intel_lvds, base);
}
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
@@ -98,7 +96,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
static void intel_enable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -115,7 +113,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- if (lvds_encoder->pfit_dirty) {
+ if (intel_lvds->pfit_dirty) {
/*
* Enable automatic panel scaling so that non-native modes
* fill the screen. The panel fitter should only be
@@ -123,12 +121,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
* register description and PRM.
*/
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
- lvds_encoder->pfit_control,
- lvds_encoder->pfit_pgm_ratios);
+ intel_lvds->pfit_control,
+ intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
- lvds_encoder->pfit_dirty = false;
+ I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
+ intel_lvds->pfit_dirty = false;
}
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
@@ -142,7 +140,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
static void intel_disable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -162,9 +160,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
- if (lvds_encoder->pfit_control) {
+ if (intel_lvds->pfit_control) {
I915_WRITE(PFIT_CONTROL, 0);
- lvds_encoder->pfit_dirty = true;
+ intel_lvds->pfit_dirty = true;
}
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
@@ -174,8 +172,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+ struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
@@ -251,10 +249,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct intel_connector *intel_connector =
- &lvds_encoder->attached_connector->base;
- struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+ struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe;
@@ -264,7 +260,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false;
}
- if (intel_encoder_check_is_cloned(&lvds_encoder->base))
+ if (intel_encoder_check_is_cloned(&intel_lvds->base))
return false;
/*
@@ -273,12 +269,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
- adjusted_mode);
+ intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
if (HAS_PCH_SPLIT(dev)) {
- intel_pch_panel_fitting(dev,
- intel_connector->panel.fitting_mode,
+ intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
mode, adjusted_mode);
return true;
}
@@ -304,7 +298,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
- switch (intel_connector->panel.fitting_mode) {
+ switch (intel_lvds->fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
@@ -402,11 +396,11 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- if (pfit_control != lvds_encoder->pfit_control ||
- pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
- lvds_encoder->pfit_control = pfit_control;
- lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
- lvds_encoder->pfit_dirty = true;
+ if (pfit_control != intel_lvds->pfit_control ||
+ pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
+ intel_lvds->pfit_control = pfit_control;
+ intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+ intel_lvds->pfit_dirty = true;
}
dev_priv->lvds_border_bits = border;
@@ -455,15 +449,14 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
- struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
- /* use cached edid if we have one */
- if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
- return drm_add_edid_modes(connector, lvds_connector->base.edid);
+ if (intel_lvds->edid)
+ return drm_add_edid_modes(connector, intel_lvds->edid);
- mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
+ mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
if (mode == NULL)
return 0;
@@ -503,11 +496,10 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
{
- struct intel_lvds_connector *lvds_connector =
- container_of(nb, struct intel_lvds_connector, lid_notifier);
- struct drm_connector *connector = &lvds_connector->base.base;
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, lid_notifier);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_connector *connector = dev_priv->int_lvds_connector;
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
@@ -516,7 +508,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* check and update the status of LVDS connector after receiving
* the LID nofication event.
*/
- connector->status = connector->funcs->detect(connector, false);
+ if (connector)
+ connector->status = connector->funcs->detect(connector,
+ false);
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
@@ -547,18 +541,13 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
*/
static void intel_lvds_destroy(struct drm_connector *connector)
{
- struct intel_lvds_connector *lvds_connector =
- to_lvds_connector(connector);
-
- if (lvds_connector->lid_notifier.notifier_call)
- acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
-
- if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
- kfree(lvds_connector->base.edid);
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- intel_panel_destroy_backlight(connector->dev);
- intel_panel_fini(&lvds_connector->base.panel);
+ intel_panel_destroy_backlight(dev);
+ if (dev_priv->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -568,24 +557,22 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
struct drm_device *dev = connector->dev;
if (property == dev->mode_config.scaling_mode_property) {
- struct drm_crtc *crtc;
+ struct drm_crtc *crtc = intel_lvds->base.base.crtc;
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
- if (intel_connector->panel.fitting_mode == value) {
+ if (intel_lvds->fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
}
- intel_connector->panel.fitting_mode = value;
-
- crtc = intel_attached_encoder(connector)->base.crtc;
+ intel_lvds->fitting_mode = value;
if (crtc && crtc->enabled) {
/*
* If the CRTC is enabled, the display will be changed
@@ -925,15 +912,12 @@ static bool intel_lvds_supported(struct drm_device *dev)
bool intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds_encoder *lvds_encoder;
+ struct intel_lvds *intel_lvds;
struct intel_encoder *intel_encoder;
- struct intel_lvds_connector *lvds_connector;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
- struct drm_display_mode *fixed_mode = NULL;
- struct edid *edid;
struct drm_crtc *crtc;
u32 lvds;
int pipe;
@@ -961,25 +945,23 @@ bool intel_lvds_init(struct drm_device *dev)
}
}
- lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
- if (!lvds_encoder)
+ intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
+ if (!intel_lvds) {
return false;
+ }
- lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
- if (!lvds_connector) {
- kfree(lvds_encoder);
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_lvds);
return false;
}
- lvds_encoder->attached_connector = lvds_connector;
-
if (!HAS_PCH_SPLIT(dev)) {
- lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
+ intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
}
- intel_encoder = &lvds_encoder->base;
+ intel_encoder = &intel_lvds->base;
encoder = &intel_encoder->base;
- intel_connector = &lvds_connector->base;
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -1011,10 +993,14 @@ bool intel_lvds_init(struct drm_device *dev)
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
- drm_object_attach_property(&connector->base,
+ /*
+ * the initial panel fitting mode will be FULL_SCREEN.
+ */
+
+ drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
- intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+ intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -1029,21 +1015,20 @@ bool intel_lvds_init(struct drm_device *dev)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
- if (edid) {
- if (drm_add_edid_modes(connector, edid)) {
+ intel_lvds->edid = drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ pin));
+ if (intel_lvds->edid) {
+ if (drm_add_edid_modes(connector,
+ intel_lvds->edid)) {
drm_mode_connector_update_edid_property(connector,
- edid);
+ intel_lvds->edid);
} else {
- kfree(edid);
- edid = ERR_PTR(-EINVAL);
+ kfree(intel_lvds->edid);
+ intel_lvds->edid = NULL;
}
- } else {
- edid = ERR_PTR(-ENOENT);
}
- lvds_connector->base.edid = edid;
-
- if (IS_ERR_OR_NULL(edid)) {
+ if (!intel_lvds->edid) {
/* Didn't get an EDID, so
* Set wide sync ranges so we get all modes
* handed to valid_mode for checking
@@ -1056,26 +1041,22 @@ bool intel_lvds_init(struct drm_device *dev)
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- DRM_DEBUG_KMS("using preferred mode from EDID: ");
- drm_mode_debug_printmodeline(scan);
-
- fixed_mode = drm_mode_duplicate(dev, scan);
- if (fixed_mode) {
- intel_find_lvds_downclock(dev, fixed_mode,
- connector);
- goto out;
- }
+ intel_lvds->fixed_mode =
+ drm_mode_duplicate(dev, scan);
+ intel_find_lvds_downclock(dev,
+ intel_lvds->fixed_mode,
+ connector);
+ goto out;
}
}
/* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) {
- DRM_DEBUG_KMS("using mode from VBT: ");
- drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
-
- fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (fixed_mode) {
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ intel_lvds->fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (intel_lvds->fixed_mode) {
+ intel_lvds->fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
@@ -1095,17 +1076,16 @@ bool intel_lvds_init(struct drm_device *dev)
crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
- fixed_mode = intel_crtc_mode_get(dev, crtc);
- if (fixed_mode) {
- DRM_DEBUG_KMS("using current (BIOS) mode: ");
- drm_mode_debug_printmodeline(fixed_mode);
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
+ if (intel_lvds->fixed_mode) {
+ intel_lvds->fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
/* If we still don't have a mode after all that, give up. */
- if (!fixed_mode)
+ if (!intel_lvds->fixed_mode)
goto failed;
out:
@@ -1120,15 +1100,16 @@ bool intel_lvds_init(struct drm_device *dev)
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
- lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
- if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
+ dev_priv->lid_notifier.notifier_call = intel_lid_notify;
+ if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
DRM_DEBUG_KMS("lid notifier registration failed\n");
- lvds_connector->lid_notifier.notifier_call = NULL;
+ dev_priv->lid_notifier.notifier_call = NULL;
}
+ /* keep the LVDS connector */
+ dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
- intel_panel_init(&intel_connector->panel, fixed_mode);
- intel_panel_setup_backlight(connector);
+ intel_panel_setup_backlight(dev);
return true;
@@ -1136,9 +1117,7 @@ bool intel_lvds_init(struct drm_device *dev)
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
- if (fixed_mode)
- drm_mode_destroy(dev, fixed_mode);
- kfree(lvds_encoder);
- kfree(lvds_connector);
+ kfree(intel_lvds);
+ kfree(intel_connector);
return false;
}
diff --git a/trunk/drivers/gpu/drm/i915/intel_modes.c b/trunk/drivers/gpu/drm/i915/intel_modes.c
index b00f1c83adce..cabd84bf66eb 100644
--- a/trunk/drivers/gpu/drm/i915/intel_modes.c
+++ b/trunk/drivers/gpu/drm/i915/intel_modes.c
@@ -45,6 +45,7 @@ int intel_connector_update_modes(struct drm_connector *connector,
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
+ kfree(edid);
return ret;
}
@@ -60,16 +61,12 @@ int intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
- int ret;
edid = drm_get_edid(connector, adapter);
if (!edid)
return 0;
- ret = intel_connector_update_modes(connector, edid);
- kfree(edid);
-
- return ret;
+ return intel_connector_update_modes(connector, edid);
}
static const struct drm_prop_enum_list force_audio_names[] = {
@@ -97,7 +94,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
dev_priv->force_audio_property = prop;
}
- drm_object_attach_property(&connector->base, prop, 0);
+ drm_connector_attach_property(connector, prop, 0);
}
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
@@ -124,5 +121,5 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
dev_priv->broadcast_rgb_property = prop;
}
- drm_object_attach_property(&connector->base, prop, 0);
+ drm_connector_attach_property(connector, prop, 0);
}
diff --git a/trunk/drivers/gpu/drm/i915/intel_opregion.c b/trunk/drivers/gpu/drm/i915/intel_opregion.c
index 7741c22c934c..5530413213d8 100644
--- a/trunk/drivers/gpu/drm/i915/intel_opregion.c
+++ b/trunk/drivers/gpu/drm/i915/intel_opregion.c
@@ -154,8 +154,6 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 max;
- DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
-
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAILED;
diff --git a/trunk/drivers/gpu/drm/i915/intel_panel.c b/trunk/drivers/gpu/drm/i915/intel_panel.c
index c758ad277473..e2aacd329545 100644
--- a/trunk/drivers/gpu/drm/i915/intel_panel.c
+++ b/trunk/drivers/gpu/drm/i915/intel_panel.c
@@ -138,24 +138,24 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
if (HAS_PCH_SPLIT(dev_priv->dev)) {
val = I915_READ(BLC_PWM_PCH_CTL2);
- if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
- dev_priv->regfile.saveBLC_PWM_CTL2 = val;
+ if (dev_priv->saveBLC_PWM_CTL2 == 0) {
+ dev_priv->saveBLC_PWM_CTL2 = val;
} else if (val == 0) {
I915_WRITE(BLC_PWM_PCH_CTL2,
- dev_priv->regfile.saveBLC_PWM_CTL2);
- val = dev_priv->regfile.saveBLC_PWM_CTL2;
+ dev_priv->saveBLC_PWM_CTL2);
+ val = dev_priv->saveBLC_PWM_CTL2;
}
} else {
val = I915_READ(BLC_PWM_CTL);
- if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
- dev_priv->regfile.saveBLC_PWM_CTL = val;
- dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ if (dev_priv->saveBLC_PWM_CTL == 0) {
+ dev_priv->saveBLC_PWM_CTL = val;
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
} else if (val == 0) {
I915_WRITE(BLC_PWM_CTL,
- dev_priv->regfile.saveBLC_PWM_CTL);
+ dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_CTL2,
- dev_priv->regfile.saveBLC_PWM_CTL2);
- val = dev_priv->regfile.saveBLC_PWM_CTL;
+ dev_priv->saveBLC_PWM_CTL2);
+ val = dev_priv->saveBLC_PWM_CTL;
}
}
@@ -275,7 +275,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
}
tmp = I915_READ(BLC_PWM_CTL);
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
@@ -374,23 +374,26 @@ static void intel_panel_init_backlight(struct drm_device *dev)
enum drm_connector_status
intel_panel_detect(struct drm_device *dev)
{
+#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
+#endif
+ if (i915_panel_ignore_lid)
+ return i915_panel_ignore_lid > 0 ?
+ connector_status_connected :
+ connector_status_disconnected;
+
+ /* opregion lid state on HP 2540p is wrong at boot up,
+ * appears to be either the BIOS or Linux ACPI fault */
+#if 0
/* Assume that the BIOS does not lie through the OpRegion... */
- if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
+ if (dev_priv->opregion.lid_state)
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
connector_status_connected :
connector_status_disconnected;
- }
+#endif
- switch (i915_panel_ignore_lid) {
- case -2:
- return connector_status_connected;
- case -1:
- return connector_status_disconnected;
- default:
- return connector_status_unknown;
- }
+ return connector_status_unknown;
}
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -413,14 +416,21 @@ static const struct backlight_ops intel_panel_bl_ops = {
.get_brightness = intel_panel_get_brightness,
};
-int intel_panel_setup_backlight(struct drm_connector *connector)
+int intel_panel_setup_backlight(struct drm_device *dev)
{
- struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct backlight_properties props;
+ struct drm_connector *connector;
intel_panel_init_backlight(dev);
+ if (dev_priv->int_lvds_connector)
+ connector = dev_priv->int_lvds_connector;
+ else if (dev_priv->int_edp_connector)
+ connector = dev_priv->int_edp_connector;
+ else
+ return -ENODEV;
+
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = _intel_panel_get_max_backlight(dev);
@@ -450,9 +460,9 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
backlight_device_unregister(dev_priv->backlight);
}
#else
-int intel_panel_setup_backlight(struct drm_connector *connector)
+int intel_panel_setup_backlight(struct drm_device *dev)
{
- intel_panel_init_backlight(connector->dev);
+ intel_panel_init_backlight(dev);
return 0;
}
@@ -461,20 +471,3 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
return;
}
#endif
-
-int intel_panel_init(struct intel_panel *panel,
- struct drm_display_mode *fixed_mode)
-{
- panel->fixed_mode = fixed_mode;
-
- return 0;
-}
-
-void intel_panel_fini(struct intel_panel *panel)
-{
- struct intel_connector *intel_connector =
- container_of(panel, struct intel_connector, panel);
-
- if (panel->fixed_mode)
- drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
-}
diff --git a/trunk/drivers/gpu/drm/i915/intel_pm.c b/trunk/drivers/gpu/drm/i915/intel_pm.c
index 58c2f210154a..442968f8b201 100644
--- a/trunk/drivers/gpu/drm/i915/intel_pm.c
+++ b/trunk/drivers/gpu/drm/i915/intel_pm.c
@@ -1468,12 +1468,9 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
if (crtc->enabled && crtc->fb) {
- int cpp = crtc->fb->bits_per_pixel / 8;
- if (IS_GEN2(dev))
- cpp = 4;
-
planea_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size, cpp,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
latency_ns);
enabled = crtc;
} else
@@ -1482,12 +1479,9 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
if (crtc->enabled && crtc->fb) {
- int cpp = crtc->fb->bits_per_pixel / 8;
- if (IS_GEN2(dev))
- cpp = 4;
-
planeb_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size, cpp,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
latency_ns);
if (enabled == NULL)
enabled = crtc;
@@ -1577,7 +1571,8 @@ static void i830_update_wm(struct drm_device *dev)
planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
- 4, latency_ns);
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
@@ -2328,7 +2323,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 limits = gen6_rps_limits(dev_priv, &val);
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
@@ -2378,15 +2373,9 @@ int intel_enable_rc6(const struct drm_device *dev)
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
- if (INTEL_INFO(dev)->gen == 5) {
-#ifdef CONFIG_INTEL_IOMMU
- /* Disable rc6 on ilk if VT-d is on. */
- if (intel_iommu_gfx_mapped)
- return false;
-#endif
- DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
- return INTEL_RC6_ENABLE;
- }
+ /* Disable RC6 on Ironlake */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
if (IS_HASWELL(dev)) {
DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
@@ -2409,12 +2398,12 @@ static void gen6_enable_rps(struct drm_device *dev)
struct intel_ring_buffer *ring;
u32 rp_state_cap;
u32 gt_perf_status;
- u32 rc6vids, pcu_mbox, rc6_mask = 0;
+ u32 pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
- int i, ret;
+ int i;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* Here begins a magic sequence of register writes to enable
* auto-downclocking.
@@ -2508,16 +2497,30 @@ static void gen6_enable_rps(struct drm_device *dev)
GEN6_RP_UP_BUSY_AVG |
(IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
- ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
- if (!ret) {
- pcu_mbox = 0;
- ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
- if (ret && pcu_mbox & (1<<31)) { /* OC supported */
- dev_priv->rps.max_delay = pcu_mbox & 0xff;
- DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
- }
- } else {
- DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+ I915_WRITE(GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+
+ /* Check for overclock support */
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
+ pcu_mbox = I915_READ(GEN6_PCODE_DATA);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+ if (pcu_mbox & (1<<31)) { /* OC supported */
+ dev_priv->rps.max_delay = pcu_mbox & 0xff;
+ DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
@@ -2531,20 +2534,6 @@ static void gen6_enable_rps(struct drm_device *dev)
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
- rc6vids = 0;
- ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
- if (IS_GEN6(dev) && ret) {
- DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
- } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
- DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
- GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
- rc6vids &= 0xffff00;
- rc6vids |= GEN6_ENCODE_RC6_VID(450);
- ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
- if (ret)
- DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
- }
-
gen6_gt_force_wake_put(dev_priv);
}
@@ -2552,11 +2541,10 @@ static void gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
- int gpu_freq;
- unsigned int ia_freq, max_ia_freq;
+ int gpu_freq, ia_freq, max_ia_freq;
int scaling_factor = 180;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
max_ia_freq = cpufreq_quick_get_max(0);
/*
@@ -2587,11 +2575,17 @@ static void gen6_update_ring_freq(struct drm_device *dev)
else
ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
- ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
- sandybridge_pcode_write(dev_priv,
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
- ia_freq | gpu_freq);
+ I915_WRITE(GEN6_PCODE_DATA,
+ (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
+ gpu_freq);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
+ GEN6_PCODE_READY) == 0, 10)) {
+ DRM_ERROR("pcode write of freq table timed out\n");
+ continue;
+ }
}
}
@@ -2599,16 +2593,16 @@ void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->ips.renderctx) {
- i915_gem_object_unpin(dev_priv->ips.renderctx);
- drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
- dev_priv->ips.renderctx = NULL;
+ if (dev_priv->renderctx) {
+ i915_gem_object_unpin(dev_priv->renderctx);
+ drm_gem_object_unreference(&dev_priv->renderctx->base);
+ dev_priv->renderctx = NULL;
}
- if (dev_priv->ips.pwrctx) {
- i915_gem_object_unpin(dev_priv->ips.pwrctx);
- drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
- dev_priv->ips.pwrctx = NULL;
+ if (dev_priv->pwrctx) {
+ i915_gem_object_unpin(dev_priv->pwrctx);
+ drm_gem_object_unreference(&dev_priv->pwrctx->base);
+ dev_priv->pwrctx = NULL;
}
}
@@ -2634,14 +2628,14 @@ static int ironlake_setup_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->ips.renderctx == NULL)
- dev_priv->ips.renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->ips.renderctx)
+ if (dev_priv->renderctx == NULL)
+ dev_priv->renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->renderctx)
return -ENOMEM;
- if (dev_priv->ips.pwrctx == NULL)
- dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->ips.pwrctx) {
+ if (dev_priv->pwrctx == NULL)
+ dev_priv->pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->pwrctx) {
ironlake_teardown_rc6(dev);
return -ENOMEM;
}
@@ -2679,7 +2673,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
+ intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -2701,7 +2695,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
return;
}
- I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
@@ -3310,72 +3304,37 @@ static void intel_init_emon(struct drm_device *dev)
void intel_disable_gt_powersave(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
- cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
- mutex_lock(&dev_priv->rps.hw_lock);
gen6_disable_rps(dev);
- mutex_unlock(&dev_priv->rps.hw_lock);
}
}
-static void intel_gen6_powersave_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- rps.delayed_resume_work.work);
- struct drm_device *dev = dev_priv->dev;
-
- mutex_lock(&dev_priv->rps.hw_lock);
- gen6_enable_rps(dev);
- gen6_update_ring_freq(dev);
- mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
void intel_enable_gt_powersave(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
- /*
- * PCU communication is slow and this doesn't need to be
- * done at any specific time, so do this out of our fast path
- * to make resume and init faster.
- */
- schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
- round_jiffies_up_relative(HZ));
+ gen6_enable_rps(dev);
+ gen6_update_ring_freq(dev);
}
}
-static void ibx_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
/* Required for FBC */
- dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
- ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
- ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
+ dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
+ DPFCRUNIT_CLOCK_GATE_DISABLE |
+ DPFDUNIT_CLOCK_GATE_DISABLE;
+ /* Required for CxSR */
+ dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
@@ -3383,6 +3342,8 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
/*
* According to the spec the following bits should be set in
* order to enable memory self-refresh
@@ -3393,7 +3354,9 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
(I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
- dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
+ I915_WRITE(ILK_DSPCLK_GATE,
+ (I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE));
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
@@ -3415,51 +3378,28 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPFC_DIS1 |
+ ILK_DPFC_DIS2 |
+ ILK_CLK_FBC);
}
- I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
-
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
I915_WRITE(_3D_CHICKEN2,
_3D_CHICKEN2_WM_READ_PIPELINED << 16 |
_3D_CHICKEN2_WM_READ_PIPELINED);
-
- /* WaDisableRenderCachePipelinedFlush */
- I915_WRITE(CACHE_MODE_0,
- _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
-
- ibx_init_clock_gating(dev);
-}
-
-static void cpt_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
- DPLS_EDP_PPS_FIX_DIS);
- /* WADP0ClockGatingDisable */
- for_each_pipe(pipe) {
- I915_WRITE(TRANS_CHICKEN1(pipe),
- TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
- }
}
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
@@ -3514,12 +3454,11 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
- I915_WRITE(ILK_DSPCLK_GATE_D,
- I915_READ(ILK_DSPCLK_GATE_D) |
- ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
- ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE |
+ ILK_DPFD_CLK_GATE);
- /* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@@ -3534,8 +3473,6 @@ static void gen6_init_clock_gating(struct drm_device *dev)
* platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
-
- cpt_init_clock_gating(dev);
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -3550,24 +3487,13 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
-static void lpt_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /*
- * TODO: this bit should only be enabled when really needed, then
- * disabled when not needed anymore in order to save power.
- */
- if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
- I915_WRITE(SOUTH_DSPCLK_GATE_D,
- I915_READ(SOUTH_DSPCLK_GATE_D) |
- PCH_LP_PARTITION_LEVEL_DISABLE);
-}
-
static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
@@ -3578,6 +3504,12 @@ static void haswell_init_clock_gating(struct drm_device *dev)
*/
I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3606,10 +3538,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
- /* WaMbcDriverBootEnable */
- I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
- GEN6_MBCTL_ENABLE_BOOT_FETCH);
-
/* XXX: This is a workaround for early silicon revisions and should be
* removed later.
*/
@@ -3619,38 +3547,27 @@ static void haswell_init_clock_gating(struct drm_device *dev)
WM_DBG_DISALLOW_SPRITE |
WM_DBG_DISALLOW_MAXFIFO);
- lpt_init_clock_gating(dev);
}
static void ivybridge_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
uint32_t snpcr;
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
-
- /* WaDisableEarlyCull */
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
- /* WaDisableBackToBackFlipFix */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
- /* WaDisablePSDDualDispatchEnable */
- if (IS_IVB_GT1(dev))
- I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
- else
- I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
- _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
-
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3659,18 +3576,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_L3CNTLREG1,
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
- GEN7_WA_L3_CHICKEN_MODE);
- if (IS_IVB_GT1(dev))
- I915_WRITE(GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
- else
- I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-
-
- /* WaForceL3Serialization */
- I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
- ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+ GEN7_WA_L3_CHICKEN_MODE);
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
@@ -3701,7 +3607,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
intel_flush_display_plane(dev_priv, pipe);
}
- /* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@@ -3715,59 +3620,39 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
-
- cpt_init_clock_gating(dev);
}
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
-
- /* WaDisableEarlyCull */
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
- /* WaDisableBackToBackFlipFix */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
- I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
-
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
- I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
+ I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
- /* WaForceL3Serialization */
- I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
- ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
-
- /* WaDisableDopClockGating */
- I915_WRITE(GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-
- /* WaForceL3Serialization */
- I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
- ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
-
/* This is required by WaCatErrorRejectionIssue */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- /* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@@ -3819,13 +3704,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
PLANEA_FLIPDONE_INT_EN);
-
- /*
- * WaDisableVLVClockGating_VBIIssue
- * Disable clock gating on th GCFG unit to prevent a delay
- * in the reporting of vblank events.
- */
- I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -3844,10 +3722,6 @@ static void g4x_init_clock_gating(struct drm_device *dev)
if (IS_GM45(dev))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
-
- /* WaDisableRenderCachePipelinedFlush */
- I915_WRITE(CACHE_MODE_0,
- _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
}
static void crestline_init_clock_gating(struct drm_device *dev)
@@ -3903,11 +3777,44 @@ static void i830_init_clock_gating(struct drm_device *dev)
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ DPLS_EDP_PPS_FIX_DIS);
+ /* Without this, mode sets may fail silently on FDI */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
+}
+
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->display.init_clock_gating(dev);
+
+ if (dev_priv->display.init_pch_clock_gating)
+ dev_priv->display.init_pch_clock_gating(dev);
}
/* Starting with Haswell, we have different power wells for
@@ -3933,7 +3840,7 @@ void intel_init_power_wells(struct drm_device *dev)
if ((well & HSW_PWR_WELL_STATE) == 0) {
I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
- if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
+ if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
}
}
@@ -3971,6 +3878,11 @@ void intel_init_pm(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_IBX(dev))
+ dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
+ else if (HAS_PCH_CPT(dev))
+ dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
+
if (IS_GEN5(dev)) {
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
dev_priv->display.update_wm = ironlake_update_wm;
@@ -4081,12 +3993,6 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
DRM_ERROR("GT thread status wait timed out\n");
}
-static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE, 0);
- POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
-}
-
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
@@ -4100,7 +4006,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
+ I915_WRITE_NOTRACE(FORCEWAKE, 1);
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@ -4110,12 +4016,6 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
__gen6_gt_wait_for_thread_c0(dev_priv);
}
-static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
- POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
-}
-
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
@@ -4129,7 +4029,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@ -4173,7 +4073,7 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
/* gen6_gt_check_fifodbg doubles as the POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
@@ -4211,18 +4111,13 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
-static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
-}
-
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4233,89 +4128,49 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
-void intel_gt_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_VALLEYVIEW(dev)) {
- vlv_force_wake_reset(dev_priv);
- } else if (INTEL_INFO(dev)->gen >= 6) {
- __gen6_gt_force_wake_reset(dev_priv);
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- __gen6_gt_force_wake_mt_reset(dev_priv);
- }
-}
-
void intel_gt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
spin_lock_init(&dev_priv->gt_lock);
- intel_gt_reset(dev);
-
if (IS_VALLEYVIEW(dev)) {
dev_priv->gt.force_wake_get = vlv_force_wake_get;
dev_priv->gt.force_wake_put = vlv_force_wake_put;
- } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
- dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
- } else if (IS_GEN6(dev)) {
+ } else if (INTEL_INFO(dev)->gen >= 6) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
- }
- INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
- intel_gen6_powersave_work);
-}
-
-int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
-{
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
- DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
- return -EAGAIN;
- }
-
- I915_WRITE(GEN6_PCODE_DATA, *val);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
-
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500)) {
- DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
- return -ETIMEDOUT;
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ u32 ecobus;
+
+ /* A small trick here - if the bios hasn't configured
+ * MT forcewake, and if the device is in RC6, then
+ * force_wake_mt_get will not wake the device and the
+ * ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT
+ * forcewake being disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->gt.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->gt.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
}
-
- *val = I915_READ(GEN6_PCODE_DATA);
- I915_WRITE(GEN6_PCODE_DATA, 0);
-
- return 0;
}
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
-{
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
- if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
- DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
- return -EAGAIN;
- }
-
- I915_WRITE(GEN6_PCODE_DATA, val);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
-
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500)) {
- DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
- return -ETIMEDOUT;
- }
-
- I915_WRITE(GEN6_PCODE_DATA, 0);
-
- return 0;
-}
diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c
index 987eb5fdaf39..ecbc5c5dbbbc 100644
--- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -245,7 +245,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
/*
* TLB invalidate requires a post-sync write.
*/
- flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
+ flags |= PIPE_CONTROL_QW_WRITE;
}
ret = intel_ring_begin(ring, 4);
@@ -558,9 +558,12 @@ update_mboxes(struct intel_ring_buffer *ring,
u32 seqno,
u32 mmio_offset)
{
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, mmio_offset);
+ intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_REGISTER |
+ MI_SEMAPHORE_UPDATE);
intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, mmio_offset);
}
/**
@@ -961,9 +964,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
}
static int
-i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 length,
- unsigned flags)
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
{
int ret;
@@ -974,7 +975,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
- (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ MI_BATCH_NON_SECURE_I965);
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@@ -983,8 +984,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
static int
i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len,
- unsigned flags)
+ u32 offset, u32 len)
{
int ret;
@@ -993,7 +993,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
intel_ring_emit(ring, offset + len - 8);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
@@ -1003,8 +1003,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
static int
i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len,
- unsigned flags)
+ u32 offset, u32 len)
{
int ret;
@@ -1013,7 +1012,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
intel_ring_advance(ring);
return 0;
@@ -1076,29 +1075,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
return ret;
}
-static int init_phys_hws_pga(struct intel_ring_buffer *ring)
-{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- u32 addr;
-
- if (!dev_priv->status_page_dmah) {
- dev_priv->status_page_dmah =
- drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
- if (!dev_priv->status_page_dmah)
- return -ENOMEM;
- }
-
- addr = dev_priv->status_page_dmah->busaddr;
- if (INTEL_INFO(ring->dev)->gen >= 4)
- addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
- I915_WRITE(HWS_PGA, addr);
-
- ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
-
- return 0;
-}
-
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
@@ -1117,11 +1093,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ret = init_status_page(ring);
if (ret)
return ret;
- } else {
- BUG_ON(ring->id != RCS);
- ret = init_phys_hws_pga(ring);
- if (ret)
- return ret;
}
obj = i915_gem_alloc_object(dev, ring->size);
@@ -1420,48 +1391,19 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
return ret;
cmd = MI_FLUSH_DW;
- /*
- * Bspec vol 1c.5 - video engine command streamer:
- * "If ENABLED, all TLBs will be invalidated once the flush
- * operation is complete. This bit is only valid when the
- * Post-Sync Operation field is a value of 1h or 3h."
- */
if (invalidate & I915_GEM_GPU_DOMAINS)
- cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
- MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
-static int
-hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len,
- unsigned flags)
-{
- int ret;
-
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
-
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
- (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
- /* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
-
- return 0;
-}
-
static int
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
- u32 offset, u32 len,
- unsigned flags)
+ u32 offset, u32 len)
{
int ret;
@@ -1469,9 +1411,7 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
if (ret)
return ret;
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START |
- (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@@ -1492,17 +1432,10 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return ret;
cmd = MI_FLUSH_DW;
- /*
- * Bspec vol 1c.3 - blitter engine command streamer:
- * "If ENABLED, all TLBs will be invalidated once the flush
- * operation is complete. This bit is only valid when the
- * Post-Sync Operation field is a value of 1h or 3h."
- */
if (invalidate & I915_GEM_DOMAIN_RENDER)
- cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
- MI_FLUSH_DW_OP_STOREDW;
+ cmd |= MI_INVALIDATE_TLB;
intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -1557,9 +1490,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
- if (IS_HASWELL(dev))
- ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
- else if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_INFO(dev)->gen >= 6)
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 4)
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
@@ -1570,6 +1501,12 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
+
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ }
+
return intel_init_ring_buffer(dev, ring);
}
@@ -1577,7 +1514,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
- int ret;
ring->name = "render ring";
ring->id = RCS;
@@ -1615,13 +1551,16 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
+ if (!I915_NEED_GFX_HWS(dev))
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
ring->size = size;
ring->effective_size = ring->size;
- if (IS_I830(ring->dev) || IS_845G(ring->dev))
+ if (IS_I830(ring->dev))
ring->effective_size -= 128;
ring->virtual_start = ioremap_wc(start, size);
@@ -1631,12 +1570,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
return -ENOMEM;
}
- if (!I915_NEED_GFX_HWS(dev)) {
- ret = init_phys_hws_pga(ring);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -1685,6 +1618,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
}
ring->init = init_ring_common;
+
return intel_init_ring_buffer(dev, ring);
}
diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h
index 5af65b89765f..2ea7a311a1f0 100644
--- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -81,9 +81,7 @@ struct intel_ring_buffer {
u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
- u32 offset, u32 length,
- unsigned flags);
-#define I915_DISPATCH_SECURE 0x1
+ u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring);
int (*sync_to)(struct intel_ring_buffer *ring,
struct intel_ring_buffer *to,
@@ -183,8 +181,6 @@ intel_read_status_page(struct intel_ring_buffer *ring,
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define I915_GEM_HWS_INDEX 0x20
-#define I915_GEM_HWS_SCRATCH_INDEX 0x30
-#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo.c b/trunk/drivers/gpu/drm/i915/intel_sdvo.c
index a4bee83df745..a6ac0b416964 100644
--- a/trunk/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/trunk/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1228,30 +1228,6 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
- /* HW workaround for IBX, we need to move the port to
- * transcoder A before disabling it. */
- if (HAS_PCH_IBX(encoder->base.dev)) {
- struct drm_crtc *crtc = encoder->base.crtc;
- int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
-
- if (temp & SDVO_PIPE_B_SELECT) {
- temp &= ~SDVO_PIPE_B_SELECT;
- I915_WRITE(intel_sdvo->sdvo_reg, temp);
- POSTING_READ(intel_sdvo->sdvo_reg);
-
- /* Again we need to write this twice. */
- I915_WRITE(intel_sdvo->sdvo_reg, temp);
- POSTING_READ(intel_sdvo->sdvo_reg);
-
- /* Transcoder selection bits only update
- * effectively on vblank. */
- if (crtc)
- intel_wait_for_vblank(encoder->base.dev, pipe);
- else
- msleep(50);
- }
- }
-
intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
}
}
@@ -1268,20 +1244,8 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
u8 status;
temp = I915_READ(intel_sdvo->sdvo_reg);
- if ((temp & SDVO_ENABLE) == 0) {
- /* HW workaround for IBX, we need to move the port
- * to transcoder A before disabling it. */
- if (HAS_PCH_IBX(dev)) {
- struct drm_crtc *crtc = encoder->base.crtc;
- int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
-
- /* Restore the transcoder select bit. */
- if (pipe == PIPE_B)
- temp |= SDVO_PIPE_B_SELECT;
- }
-
+ if ((temp & SDVO_ENABLE) == 0)
intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
- }
for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -1832,7 +1796,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_sdvo_connector);
+ kfree(connector);
}
static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
@@ -1864,7 +1828,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
uint8_t cmd;
int ret;
- ret = drm_object_property_set_value(&connector->base, property, val);
+ ret = drm_connector_property_set_value(connector, property, val);
if (ret)
return ret;
@@ -1919,7 +1883,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
temp_value = val;
if (intel_sdvo_connector->left == property) {
- drm_object_property_set_value(&connector->base,
+ drm_connector_property_set_value(connector,
intel_sdvo_connector->right, val);
if (intel_sdvo_connector->left_margin == temp_value)
return 0;
@@ -1931,7 +1895,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->right == property) {
- drm_object_property_set_value(&connector->base,
+ drm_connector_property_set_value(connector,
intel_sdvo_connector->left, val);
if (intel_sdvo_connector->right_margin == temp_value)
return 0;
@@ -1943,7 +1907,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->top == property) {
- drm_object_property_set_value(&connector->base,
+ drm_connector_property_set_value(connector,
intel_sdvo_connector->bottom, val);
if (intel_sdvo_connector->top_margin == temp_value)
return 0;
@@ -1955,7 +1919,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (intel_sdvo_connector->bottom == property) {
- drm_object_property_set_value(&connector->base,
+ drm_connector_property_set_value(connector,
intel_sdvo_connector->top, val);
if (intel_sdvo_connector->bottom_margin == temp_value)
return 0;
@@ -2108,24 +2072,17 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
else
mapping = &dev_priv->sdvo_mappings[1];
- if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
+ pin = GMBUS_PORT_DPB;
+ if (mapping->initialized)
pin = mapping->i2c_pin;
- else
- pin = GMBUS_PORT_DPB;
-
- sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
- /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
- * our code totally fails once we start using gmbus. Hence fall back to
- * bit banging for now. */
- intel_gmbus_force_bit(sdvo->i2c, true);
-}
-
-/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
-static void
-intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
-{
- intel_gmbus_force_bit(sdvo->i2c, false);
+ if (intel_gmbus_is_port_valid(pin)) {
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
+ intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
+ intel_gmbus_force_bit(sdvo->i2c, true);
+ } else {
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
+ }
}
static bool
@@ -2244,7 +2201,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true;
}
- intel_sdvo->base.cloneable = true;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
@@ -2275,7 +2231,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
- intel_sdvo->base.cloneable = false;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
@@ -2318,8 +2273,6 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
- intel_sdvo->base.cloneable = true;
-
intel_sdvo_connector_init(intel_sdvo_connector,
intel_sdvo);
return true;
@@ -2350,9 +2303,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
- /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */
- intel_sdvo->base.cloneable = false;
-
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
@@ -2425,6 +2375,18 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
return true;
}
+static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector, *tmp;
+
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (intel_attached_encoder(connector) == &intel_sdvo->base)
+ intel_sdvo_destroy(connector);
+ }
+}
+
static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
int type)
@@ -2465,7 +2427,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
- drm_object_attach_property(&intel_sdvo_connector->base.base.base,
+ drm_connector_attach_property(&intel_sdvo_connector->base.base,
intel_sdvo_connector->tv_format, 0);
return true;
@@ -2481,7 +2443,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!intel_sdvo_connector->name) return false; \
- drm_object_attach_property(&connector->base, \
+ drm_connector_attach_property(connector, \
intel_sdvo_connector->name, \
intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2518,7 +2480,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->left)
return false;
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
intel_sdvo_connector->left,
intel_sdvo_connector->left_margin);
@@ -2527,7 +2489,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->right)
return false;
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
intel_sdvo_connector->right,
intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2555,7 +2517,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->top)
return false;
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
intel_sdvo_connector->top,
intel_sdvo_connector->top_margin);
@@ -2565,7 +2527,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->bottom)
return false;
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
intel_sdvo_connector->bottom,
intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2597,7 +2559,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->dot_crawl)
return false;
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
intel_sdvo_connector->dot_crawl,
intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
@@ -2701,8 +2663,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
intel_sdvo->is_sdvob = is_sdvob;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
- if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
- goto err_i2c_bus;
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
+ kfree(intel_sdvo);
+ return false;
+ }
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
@@ -2746,9 +2710,20 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
SDVO_NAME(intel_sdvo));
- goto err;
+ /* Output_setup can leave behind connectors! */
+ goto err_output;
}
+ /*
+ * Cloning SDVO with anything is often impossible, since the SDVO
+ * encoder can request a special input timing mode. And even if that's
+ * not the case we have evidence that cloning a plain unscaled mode with
+ * VGA doesn't really work. Furthermore the cloning flags are way too
+ * simplistic anyway to express such constraints, so just give up on
+ * cloning for SDVO encoders.
+ */
+ intel_sdvo->base.cloneable = false;
+
/* Only enable the hotplug irq if we need it, to work around noisy
* hotplug lines.
*/
@@ -2759,12 +2734,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
- goto err;
+ goto err_output;
if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
&intel_sdvo->pixel_clock_min,
&intel_sdvo->pixel_clock_max))
- goto err;
+ goto err_output;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
@@ -2784,11 +2759,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
return true;
+err_output:
+ intel_sdvo_output_cleanup(intel_sdvo);
+
err:
drm_encoder_cleanup(&intel_encoder->base);
i2c_del_adapter(&intel_sdvo->ddc);
-err_i2c_bus:
- intel_sdvo_unselect_i2c_bus(intel_sdvo);
kfree(intel_sdvo);
return false;
diff --git a/trunk/drivers/gpu/drm/i915/intel_sprite.c b/trunk/drivers/gpu/drm/i915/intel_sprite.c
index 827dcd4edf1c..82f5e5c7009d 100644
--- a/trunk/drivers/gpu/drm/i915/intel_sprite.c
+++ b/trunk/drivers/gpu/drm/i915/intel_sprite.c
@@ -48,8 +48,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
- unsigned long sprsurf_offset, linear_offset;
- int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ int pixel_size;
sprctl = I915_READ(SPRCTL(pipe));
@@ -62,24 +61,33 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+ pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888;
+ pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
+ pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
+ pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
+ pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
+ pixel_size = 2;
break;
default:
- BUG();
+ DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
+ sprctl |= SPRITE_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
}
if (obj->tiling_mode != I915_TILING_NONE)
@@ -119,28 +127,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-
- linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
- sprsurf_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
- linear_offset -= sprsurf_offset;
-
- /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
- * register */
- if (IS_HASWELL(dev))
- I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
- else if (obj->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE) {
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
- else
- I915_WRITE(SPRLINOFF(pipe), linear_offset);
+ } else {
+ unsigned long offset;
+ offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ I915_WRITE(SPRLINOFF(pipe), offset);
+ }
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
- if (intel_plane->can_scale)
- I915_WRITE(SPRSCALE(pipe), sprscale);
+ I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
POSTING_READ(SPRSURF(pipe));
}
@@ -154,8 +152,7 @@ ivb_disable_plane(struct drm_plane *plane)
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
- if (intel_plane->can_scale)
- I915_WRITE(SPRSCALE(pipe), 0);
+ I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
@@ -228,10 +225,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
- int pipe = intel_plane->pipe;
- unsigned long dvssurf_offset, linear_offset;
+ int pipe = intel_plane->pipe, pixel_size;
u32 dvscntr, dvsscale;
- int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
dvscntr = I915_READ(DVSCNTR(pipe));
@@ -244,24 +239,33 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
+ pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888;
+ pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
+ pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
+ pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
+ pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
+ pixel_size = 2;
break;
default:
- BUG();
+ DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
+ dvscntr |= DVS_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
}
if (obj->tiling_mode != I915_TILING_NONE)
@@ -285,23 +289,18 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-
- linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
- dvssurf_offset =
- intel_gen4_compute_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
- linear_offset -= dvssurf_offset;
-
- if (obj->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE) {
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
- else
- I915_WRITE(DVSLINOFF(pipe), linear_offset);
+ } else {
+ unsigned long offset;
+ offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ I915_WRITE(DVSLINOFF(pipe), offset);
+ }
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
POSTING_READ(DVSSURF(pipe));
}
@@ -423,8 +422,6 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj, *old_obj;
int pipe = intel_plane->pipe;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
int ret = 0;
int x = src_x >> 16, y = src_y >> 16;
int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
@@ -439,7 +436,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
src_h = src_h >> 16;
/* Pipe must be running... */
- if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
+ if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
return -EINVAL;
if (crtc_x >= primary_w || crtc_y >= primary_h)
@@ -449,15 +446,6 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (intel_plane->pipe != intel_crtc->pipe)
return -EINVAL;
- /* Sprite planes can be linear or x-tiled surfaces */
- switch (obj->tiling_mode) {
- case I915_TILING_NONE:
- case I915_TILING_X:
- break;
- default:
- return -EINVAL;
- }
-
/*
* Clamp the width & height into the visible area. Note we don't
* try to scale the source if part of the visible region is offscreen.
@@ -484,12 +472,6 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (!crtc_w || !crtc_h) /* Again, nothing to display */
goto out;
- /*
- * We may not have a scaler, eg. HSW does not have it any more
- */
- if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
- return -EINVAL;
-
/*
* We can take a larger source and scale it down, but
* only so much... 16x is the max on SNB.
@@ -683,7 +665,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
switch (INTEL_INFO(dev)->gen) {
case 5:
case 6:
- intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
@@ -700,10 +681,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
break;
case 7:
- if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
- intel_plane->can_scale = false;
- else
- intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
diff --git a/trunk/drivers/gpu/drm/i915/intel_tv.c b/trunk/drivers/gpu/drm/i915/intel_tv.c
index ea93520c1278..62bb048c135e 100644
--- a/trunk/drivers/gpu/drm/i915/intel_tv.c
+++ b/trunk/drivers/gpu/drm/i915/intel_tv.c
@@ -1088,11 +1088,13 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
int dspcntr_reg = DSPCNTR(intel_crtc->plane);
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
+ int dspbase_reg = DSPADDR(intel_crtc->plane);
int xpos = 0x0, ypos = 0x0;
unsigned int xsize, ysize;
/* Pipe must be off here */
I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev_priv, intel_crtc->plane);
+ /* Flush the plane changes */
+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
/* Wait for vblank for the disable to take effect */
if (IS_GEN2(dev))
@@ -1121,7 +1123,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
I915_WRITE(pipeconf_reg, pipeconf);
I915_WRITE(dspcntr_reg, dspcntr);
- intel_flush_display_plane(dev_priv, intel_crtc->plane);
+ /* Flush the plane changes */
+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
}
j = 0;
@@ -1289,7 +1292,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
}
intel_tv->tv_format = tv_mode->name;
- drm_object_property_set_value(&connector->base,
+ drm_connector_property_set_value(connector,
connector->dev->mode_config.tv_mode_property, i);
}
@@ -1443,7 +1446,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
int ret = 0;
bool changed = false;
- ret = drm_object_property_set_value(&connector->base, property, val);
+ ret = drm_connector_property_set_value(connector, property, val);
if (ret < 0)
goto out;
@@ -1655,18 +1658,18 @@ intel_tv_init(struct drm_device *dev)
ARRAY_SIZE(tv_modes),
tv_format_names);
- drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
+ drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
initial_mode);
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev->mode_config.tv_left_margin_property,
intel_tv->margin[TV_MARGIN_LEFT]);
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev->mode_config.tv_top_margin_property,
intel_tv->margin[TV_MARGIN_TOP]);
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev->mode_config.tv_right_margin_property,
intel_tv->margin[TV_MARGIN_RIGHT]);
- drm_object_attach_property(&connector->base,
+ drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
drm_sysfs_connector_add(connector);
diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_main.c b/trunk/drivers/gpu/drm/mgag200/mgag200_main.c
index 70dd3c5529d4..d6a1aae33701 100644
--- a/trunk/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/trunk/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -133,8 +133,6 @@ static int mga_vram_init(struct mga_device *mdev)
{
void __iomem *mem;
struct apertures_struct *aper = alloc_apertures(1);
- if (!aper)
- return -ENOMEM;
/* BAR 0 is VRAM */
mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
@@ -142,9 +140,9 @@ static int mga_vram_init(struct mga_device *mdev)
aper->ranges[0].base = mdev->mc.vram_base;
aper->ranges[0].size = mdev->mc.vram_window;
+ aper->count = 1;
remove_conflicting_framebuffers(aper, "mgafb", true);
- kfree(aper);
if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
"mgadrmfb_vram")) {
diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_ttm.c b/trunk/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 49d60a620122..1504699666c4 100644
--- a/trunk/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/trunk/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -355,7 +355,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
ttm_bo_type_device, &mgabo->placement,
- align >> PAGE_SHIFT, false, NULL, acc_size,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
NULL, mgag200_bo_ttm_destroy);
if (ret)
return ret;
diff --git a/trunk/drivers/gpu/drm/nouveau/Makefile b/trunk/drivers/gpu/drm/nouveau/Makefile
index ab25752a0b1e..a990df4d6c04 100644
--- a/trunk/drivers/gpu/drm/nouveau/Makefile
+++ b/trunk/drivers/gpu/drm/nouveau/Makefile
@@ -11,7 +11,6 @@ nouveau-y := core/core/client.o
nouveau-y += core/core/engctx.o
nouveau-y += core/core/engine.o
nouveau-y += core/core/enum.o
-nouveau-y += core/core/falcon.o
nouveau-y += core/core/gpuobj.o
nouveau-y += core/core/handle.o
nouveau-y += core/core/mm.o
@@ -30,7 +29,6 @@ nouveau-y += core/subdev/bios/base.o
nouveau-y += core/subdev/bios/bit.o
nouveau-y += core/subdev/bios/conn.o
nouveau-y += core/subdev/bios/dcb.o
-nouveau-y += core/subdev/bios/disp.o
nouveau-y += core/subdev/bios/dp.o
nouveau-y += core/subdev/bios/extdev.o
nouveau-y += core/subdev/bios/gpio.o
@@ -66,19 +64,9 @@ nouveau-y += core/subdev/devinit/nv50.o
nouveau-y += core/subdev/fb/base.o
nouveau-y += core/subdev/fb/nv04.o
nouveau-y += core/subdev/fb/nv10.o
-nouveau-y += core/subdev/fb/nv1a.o
nouveau-y += core/subdev/fb/nv20.o
-nouveau-y += core/subdev/fb/nv25.o
nouveau-y += core/subdev/fb/nv30.o
-nouveau-y += core/subdev/fb/nv35.o
-nouveau-y += core/subdev/fb/nv36.o
nouveau-y += core/subdev/fb/nv40.o
-nouveau-y += core/subdev/fb/nv41.o
-nouveau-y += core/subdev/fb/nv44.o
-nouveau-y += core/subdev/fb/nv46.o
-nouveau-y += core/subdev/fb/nv47.o
-nouveau-y += core/subdev/fb/nv49.o
-nouveau-y += core/subdev/fb/nv4e.o
nouveau-y += core/subdev/fb/nv50.o
nouveau-y += core/subdev/fb/nvc0.o
nouveau-y += core/subdev/gpio/base.o
@@ -123,10 +111,7 @@ nouveau-y += core/engine/dmaobj/base.o
nouveau-y += core/engine/dmaobj/nv04.o
nouveau-y += core/engine/dmaobj/nv50.o
nouveau-y += core/engine/dmaobj/nvc0.o
-nouveau-y += core/engine/dmaobj/nvd0.o
nouveau-y += core/engine/bsp/nv84.o
-nouveau-y += core/engine/bsp/nvc0.o
-nouveau-y += core/engine/bsp/nve0.o
nouveau-y += core/engine/copy/nva3.o
nouveau-y += core/engine/copy/nvc0.o
nouveau-y += core/engine/copy/nve0.o
@@ -134,21 +119,7 @@ nouveau-y += core/engine/crypt/nv84.o
nouveau-y += core/engine/crypt/nv98.o
nouveau-y += core/engine/disp/nv04.o
nouveau-y += core/engine/disp/nv50.o
-nouveau-y += core/engine/disp/nv84.o
-nouveau-y += core/engine/disp/nv94.o
-nouveau-y += core/engine/disp/nva0.o
-nouveau-y += core/engine/disp/nva3.o
nouveau-y += core/engine/disp/nvd0.o
-nouveau-y += core/engine/disp/nve0.o
-nouveau-y += core/engine/disp/dacnv50.o
-nouveau-y += core/engine/disp/hdanva3.o
-nouveau-y += core/engine/disp/hdanvd0.o
-nouveau-y += core/engine/disp/hdminv84.o
-nouveau-y += core/engine/disp/hdminva3.o
-nouveau-y += core/engine/disp/hdminvd0.o
-nouveau-y += core/engine/disp/sornv50.o
-nouveau-y += core/engine/disp/sornv94.o
-nouveau-y += core/engine/disp/sornvd0.o
nouveau-y += core/engine/disp/vga.o
nouveau-y += core/engine/fifo/base.o
nouveau-y += core/engine/fifo/nv04.o
@@ -180,14 +151,11 @@ nouveau-y += core/engine/mpeg/nv40.o
nouveau-y += core/engine/mpeg/nv50.o
nouveau-y += core/engine/mpeg/nv84.o
nouveau-y += core/engine/ppp/nv98.o
-nouveau-y += core/engine/ppp/nvc0.o
nouveau-y += core/engine/software/nv04.o
nouveau-y += core/engine/software/nv10.o
nouveau-y += core/engine/software/nv50.o
nouveau-y += core/engine/software/nvc0.o
nouveau-y += core/engine/vp/nv84.o
-nouveau-y += core/engine/vp/nvc0.o
-nouveau-y += core/engine/vp/nve0.o
# drm/core
nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
@@ -198,7 +166,7 @@ nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
# drm/kms
nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
-nouveau-y += nouveau_connector.o nouveau_dp.o
+nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o
nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
# drm/kms/nv04:nv50
@@ -207,7 +175,9 @@ nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
# drm/kms/nv50-
-nouveau-y += nv50_display.o
+nouveau-y += nv50_display.o nvd0_display.o
+nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
+nouveau-y += nv50_evo.o
# drm/pm
nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
diff --git a/trunk/drivers/gpu/drm/nouveau/core/core/engctx.c b/trunk/drivers/gpu/drm/nouveau/core/core/engctx.c
index 84c71fad2b6c..e41b10d5eb59 100644
--- a/trunk/drivers/gpu/drm/nouveau/core/core/engctx.c
+++ b/trunk/drivers/gpu/drm/nouveau/core/core/engctx.c
@@ -189,21 +189,6 @@ nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
return nouveau_gpuobj_fini(&engctx->base, suspend);
}
-int
-_nouveau_engctx_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_engctx *engctx;
- int ret;
-
- ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256,
- NVOBJ_FLAG_ZERO_ALLOC, &engctx);
- *pobject = nv_object(engctx);
- return ret;
-}
-
void
_nouveau_engctx_dtor(struct nouveau_object *object)
{
diff --git a/trunk/drivers/gpu/drm/nouveau/core/core/falcon.c b/trunk/drivers/gpu/drm/nouveau/core/core/falcon.c
deleted file mode 100644
index 6b0843c33877..000000000000
--- a/trunk/drivers/gpu/drm/nouveau/core/core/falcon.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include
-
-#include
-
-u32
-_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
-{
- struct nouveau_falcon *falcon = (void *)object;
- return nv_rd32(falcon, falcon->addr + addr);
-}
-
-void
-_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
- struct nouveau_falcon *falcon = (void *)object;
- nv_wr32(falcon, falcon->addr + addr, data);
-}
-
-int
-_nouveau_falcon_init(struct nouveau_object *object)
-{
- struct nouveau_device *device = nv_device(object);
- struct nouveau_falcon *falcon = (void *)object;
- const struct firmware *fw;
- char name[32] = "internal";
- int ret, i;
- u32 caps;
-
- /* enable engine, and determine its capabilities */
- ret = nouveau_engine_init(&falcon->base);
- if (ret)
- return ret;
-
- if (device->chipset < 0xa3 ||
- device->chipset == 0xaa || device->chipset == 0xac) {
- falcon->version = 0;
- falcon->secret = (falcon->addr == 0x087000) ? 1 : 0;
- } else {
- caps = nv_ro32(falcon, 0x12c);
- falcon->version = (caps & 0x0000000f);
- falcon->secret = (caps & 0x00000030) >> 4;
- }
-
- caps = nv_ro32(falcon, 0x108);
- falcon->code.limit = (caps & 0x000001ff) << 8;
- falcon->data.limit = (caps & 0x0003fe00) >> 1;
-
- nv_debug(falcon, "falcon version: %d\n", falcon->version);
- nv_debug(falcon, "secret level: %d\n", falcon->secret);
- nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
- nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
-
- /* wait for 'uc halted' to be signalled before continuing */
- if (falcon->secret) {
- nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
- nv_wo32(falcon, 0x004, 0x00000010);
- }
-
- /* disable all interrupts */
- nv_wo32(falcon, 0x014, 0xffffffff);
-
- /* no default ucode provided by the engine implementation, try and
- * locate a "self-bootstrapping" firmware image for the engine
- */
- if (!falcon->code.data) {
- snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
- device->chipset, falcon->addr >> 12);
-
- ret = request_firmware(&fw, name, &device->pdev->dev);
- if (ret == 0) {
- falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
- falcon->code.size = fw->size;
- falcon->data.data = NULL;
- falcon->data.size = 0;
- release_firmware(fw);
- }
-
- falcon->external = true;
- }
-
- /* next step is to try and load "static code/data segment" firmware
- * images for the engine
- */
- if (!falcon->code.data) {
- snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
- device->chipset, falcon->addr >> 12);
-
- ret = request_firmware(&fw, name, &device->pdev->dev);
- if (ret) {
- nv_error(falcon, "unable to load firmware data\n");
- return ret;
- }
-
- falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
- falcon->data.size = fw->size;
- release_firmware(fw);
- if (!falcon->data.data)
- return -ENOMEM;
-
- snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
- device->chipset, falcon->addr >> 12);
-
- ret = request_firmware(&fw, name, &device->pdev->dev);
- if (ret) {
- nv_error(falcon, "unable to load firmware code\n");
- return ret;
- }
-
- falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
- falcon->code.size = fw->size;
- release_firmware(fw);
- if (!falcon->code.data)
- return -ENOMEM;
- }
-
- nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
- "static code/data segments" : "self-bootstrapping");
-
- /* ensure any "self-bootstrapping" firmware image is in vram */
- if (!falcon->data.data && !falcon->core) {
- ret = nouveau_gpuobj_new(object->parent, NULL,
- falcon->code.size, 256, 0,
- &falcon->core);
- if (ret) {
- nv_error(falcon, "core allocation failed, %d\n", ret);
- return ret;
- }
-
- for (i = 0; i < falcon->code.size; i += 4)
- nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
- }
-
- /* upload firmware bootloader (or the full code segments) */
- if (falcon->core) {
- if (device->card_type < NV_C0)
- nv_wo32(falcon, 0x618, 0x04000000);
- else
- nv_wo32(falcon, 0x618, 0x00000114);
- nv_wo32(falcon, 0x11c, 0);
- nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
- nv_wo32(falcon, 0x114, 0);
- nv_wo32(falcon, 0x118, 0x00006610);
- } else {
- if (falcon->code.size > falcon->code.limit ||
- falcon->data.size > falcon->data.limit) {
- nv_error(falcon, "ucode exceeds falcon limit(s)\n");
- return -EINVAL;
- }
-
- if (falcon->version < 3) {
- nv_wo32(falcon, 0xff8, 0x00100000);
- for (i = 0; i < falcon->code.size / 4; i++)
- nv_wo32(falcon, 0xff4, falcon->code.data[i]);
- } else {
- nv_wo32(falcon, 0x180, 0x01000000);
- for (i = 0; i < falcon->code.size / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wo32(falcon, 0x188, i >> 6);
- nv_wo32(falcon, 0x184, falcon->code.data[i]);
- }
- }
- }
-
- /* upload data segment (if necessary), zeroing the remainder */
- if (falcon->version < 3) {
- nv_wo32(falcon, 0xff8, 0x00000000);
- for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
- nv_wo32(falcon, 0xff4, falcon->data.data[i]);
- for (; i < falcon->data.limit; i += 4)
- nv_wo32(falcon, 0xff4, 0x00000000);
- } else {
- nv_wo32(falcon, 0x1c0, 0x01000000);
- for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
- nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
- for (; i < falcon->data.limit / 4; i++)
- nv_wo32(falcon, 0x1c4, 0x00000000);
- }
-
- /* start it running */
- nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
- nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
- nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
- nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
- return 0;
-}
-
-int
-_nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
-{
- struct nouveau_falcon *falcon = (void *)object;
-
- if (!suspend) {
- nouveau_gpuobj_ref(NULL, &falcon->core);
- if (falcon->external) {
- kfree(falcon->data.data);
- kfree(falcon->code.data);
- falcon->code.data = NULL;
- }
- }
-
- nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
- nv_wo32(falcon, 0x014, 0xffffffff);
-
- return nouveau_engine_fini(&falcon->base, suspend);
-}
-
-int
-nouveau_falcon_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, u32 addr, bool enable,
- const char *iname, const char *fname,
- int length, void **pobject)
-{
- struct nouveau_falcon *falcon;
- int ret;
-
- ret = nouveau_engine_create_(parent, engine, oclass, enable, iname,
- fname, length, pobject);
- falcon = *pobject;
- if (ret)
- return ret;
-
- falcon->addr = addr;
- return 0;
-}
diff --git a/trunk/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/trunk/drivers/gpu/drm/nouveau/core/core/gpuobj.c
index 560b2214cf1c..70586fde69cf 100644
--- a/trunk/drivers/gpu/drm/nouveau/core/core/gpuobj.c
+++ b/trunk/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -183,7 +183,7 @@ _nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
}
u32
-_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr)
+_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
{
struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
@@ -193,7 +193,7 @@ _nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr)
}
void
-_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
+_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
{
struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
diff --git a/trunk/drivers/gpu/drm/nouveau/core/core/mm.c b/trunk/drivers/gpu/drm/nouveau/core/core/mm.c
index 0261a11b2ae0..a6d3cd6490f7 100644
--- a/trunk/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/trunk/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -234,18 +234,15 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
int
nouveau_mm_fini(struct nouveau_mm *mm)
{
- if (nouveau_mm_initialised(mm)) {
- struct nouveau_mm_node *node, *heap =
- list_first_entry(&mm->nodes, typeof(*heap), nl_entry);
- int nodes = 0;
-
- list_for_each_entry(node, &mm->nodes, nl_entry) {
- if (WARN_ON(nodes++ == mm->heap_nodes))
- return -EBUSY;
- }
+ struct nouveau_mm_node *node, *heap =
+ list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
+ int nodes = 0;
- kfree(heap);
+ list_for_each_entry(node, &mm->nodes, nl_entry) {
+ if (WARN_ON(nodes++ == mm->heap_nodes))
+ return -EBUSY;
}
+ kfree(heap);
return 0;
}
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
index 1d9f614cb97d..66f7dfd907ee 100644
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
+++ b/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -22,13 +22,18 @@
* Authors: Ben Skeggs
*/
-#include
+#include
#include
+#include
#include
struct nv84_bsp_priv {
- struct nouveau_engine base;
+ struct nouveau_bsp base;
+};
+
+struct nv84_bsp_chan {
+ struct nouveau_bsp_chan base;
};
/*******************************************************************************
@@ -44,16 +49,61 @@ nv84_bsp_sclass[] = {
* BSP context
******************************************************************************/
+static int
+nv84_bsp_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv84_bsp_chan *priv;
+ int ret;
+
+ ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
+ 0, 0, 0, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+nv84_bsp_context_dtor(struct nouveau_object *object)
+{
+ struct nv84_bsp_chan *priv = (void *)object;
+ nouveau_bsp_context_destroy(&priv->base);
+}
+
+static int
+nv84_bsp_context_init(struct nouveau_object *object)
+{
+ struct nv84_bsp_chan *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_bsp_context_init(&priv->base);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv84_bsp_chan *priv = (void *)object;
+ return nouveau_bsp_context_fini(&priv->base, suspend);
+}
+
static struct nouveau_oclass
nv84_bsp_cclass = {
.handle = NV_ENGCTX(BSP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_engctx_ctor,
- .dtor = _nouveau_engctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
- .rd32 = _nouveau_engctx_rd32,
- .wr32 = _nouveau_engctx_wr32,
+ .ctor = nv84_bsp_context_ctor,
+ .dtor = nv84_bsp_context_dtor,
+ .init = nv84_bsp_context_init,
+ .fini = nv84_bsp_context_fini,
+ .rd32 = _nouveau_bsp_context_rd32,
+ .wr32 = _nouveau_bsp_context_wr32,
},
};
@@ -61,6 +111,11 @@ nv84_bsp_cclass = {
* BSP engine/subdev functions
******************************************************************************/
+static void
+nv84_bsp_intr(struct nouveau_subdev *subdev)
+{
+}
+
static int
nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -69,25 +124,52 @@ nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv84_bsp_priv *priv;
int ret;
- ret = nouveau_engine_create(parent, engine, oclass, true,
- "PBSP", "bsp", &priv);
+ ret = nouveau_bsp_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x04008000;
+ nv_subdev(priv)->intr = nv84_bsp_intr;
nv_engine(priv)->cclass = &nv84_bsp_cclass;
nv_engine(priv)->sclass = nv84_bsp_sclass;
return 0;
}
+static void
+nv84_bsp_dtor(struct nouveau_object *object)
+{
+ struct nv84_bsp_priv *priv = (void *)object;
+ nouveau_bsp_destroy(&priv->base);
+}
+
+static int
+nv84_bsp_init(struct nouveau_object *object)
+{
+ struct nv84_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_bsp_init(&priv->base);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+nv84_bsp_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv84_bsp_priv *priv = (void *)object;
+ return nouveau_bsp_fini(&priv->base, suspend);
+}
+
struct nouveau_oclass
nv84_bsp_oclass = {
.handle = NV_ENGINE(BSP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_bsp_ctor,
- .dtor = _nouveau_engine_dtor,
- .init = _nouveau_engine_init,
- .fini = _nouveau_engine_fini,
+ .dtor = nv84_bsp_dtor,
+ .init = nv84_bsp_init,
+ .fini = nv84_bsp_fini,
},
};
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
deleted file mode 100644
index 0a5aa6bb0870..000000000000
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2012 Maarten Lankhorst
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Maarten Lankhorst
- */
-
-#include
-
-#include
-
-struct nvc0_bsp_priv {
- struct nouveau_falcon base;
-};
-
-/*******************************************************************************
- * BSP object classes
- ******************************************************************************/
-
-static struct nouveau_oclass
-nvc0_bsp_sclass[] = {
- { 0x90b1, &nouveau_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PBSP context
- ******************************************************************************/
-
-static struct nouveau_oclass
-nvc0_bsp_cclass = {
- .handle = NV_ENGCTX(BSP, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PBSP engine/subdev functions
- ******************************************************************************/
-
-static int
-nvc0_bsp_init(struct nouveau_object *object)
-{
- struct nvc0_bsp_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_falcon_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x084010, 0x0000fff2);
- nv_wr32(priv, 0x08401c, 0x0000fff2);
- return 0;
-}
-
-static int
-nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_bsp_priv *priv;
- int ret;
-
- ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
- "PBSP", "bsp", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00008000;
- nv_engine(priv)->cclass = &nvc0_bsp_cclass;
- nv_engine(priv)->sclass = nvc0_bsp_sclass;
- return 0;
-}
-
-struct nouveau_oclass
-nvc0_bsp_oclass = {
- .handle = NV_ENGINE(BSP, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_bsp_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = nvc0_bsp_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
- },
-};
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
deleted file mode 100644
index d4f23bbd75b4..000000000000
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include
-
-#include
-
-struct nve0_bsp_priv {
- struct nouveau_falcon base;
-};
-
-/*******************************************************************************
- * BSP object classes
- ******************************************************************************/
-
-static struct nouveau_oclass
-nve0_bsp_sclass[] = {
- { 0x95b1, &nouveau_object_ofuncs },
- {},
-};
-
-/*******************************************************************************
- * PBSP context
- ******************************************************************************/
-
-static struct nouveau_oclass
-nve0_bsp_cclass = {
- .handle = NV_ENGCTX(BSP, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
- },
-};
-
-/*******************************************************************************
- * PBSP engine/subdev functions
- ******************************************************************************/
-
-static int
-nve0_bsp_init(struct nouveau_object *object)
-{
- struct nve0_bsp_priv *priv = (void *)object;
- int ret;
-
- ret = nouveau_falcon_init(&priv->base);
- if (ret)
- return ret;
-
- nv_wr32(priv, 0x084010, 0x0000fff2);
- nv_wr32(priv, 0x08401c, 0x0000fff2);
- return 0;
-}
-
-static int
-nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nve0_bsp_priv *priv;
- int ret;
-
- ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
- "PBSP", "bsp", &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- nv_subdev(priv)->unit = 0x00008000;
- nv_engine(priv)->cclass = &nve0_bsp_cclass;
- nv_engine(priv)->sclass = nve0_bsp_sclass;
- return 0;
-}
-
-struct nouveau_oclass
-nve0_bsp_oclass = {
- .handle = NV_ENGINE(BSP, 0xe0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nve0_bsp_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = nve0_bsp_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
- },
-};
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index 283248c7b050..4df6da0af740 100644
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -22,9 +22,10 @@
* Authors: Ben Skeggs
*/
-#include
-#include
+#include
#include
+#include
+#include
#include
#include
@@ -35,7 +36,11 @@
#include "fuc/nva3.fuc.h"
struct nva3_copy_priv {
- struct nouveau_falcon base;
+ struct nouveau_copy base;
+};
+
+struct nva3_copy_chan {
+ struct nouveau_copy_chan base;
};
/*******************************************************************************
@@ -52,16 +57,34 @@ nva3_copy_sclass[] = {
* PCOPY context
******************************************************************************/
+static int
+nva3_copy_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nva3_copy_chan *priv;
+ int ret;
+
+ ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static struct nouveau_oclass
nva3_copy_cclass = {
.handle = NV_ENGCTX(COPY0, 0xa3),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
+ .ctor = nva3_copy_context_ctor,
+ .dtor = _nouveau_copy_context_dtor,
+ .init = _nouveau_copy_context_init,
+ .fini = _nouveau_copy_context_fini,
+ .rd32 = _nouveau_copy_context_rd32,
+ .wr32 = _nouveau_copy_context_wr32,
},
};
@@ -77,40 +100,41 @@ static const struct nouveau_enum nva3_copy_isr_error_name[] = {
{}
};
-void
+static void
nva3_copy_intr(struct nouveau_subdev *subdev)
{
struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
struct nouveau_engine *engine = nv_engine(subdev);
- struct nouveau_falcon *falcon = (void *)subdev;
struct nouveau_object *engctx;
- u32 dispatch = nv_ro32(falcon, 0x01c);
- u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
- u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
- u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
- u32 addr = nv_ro32(falcon, 0x040) >> 16;
+ struct nva3_copy_priv *priv = (void *)subdev;
+ u32 dispatch = nv_rd32(priv, 0x10401c);
+ u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
+ u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
+ u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
+ u32 addr = nv_rd32(priv, 0x104040) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_ro32(falcon, 0x044);
+ u32 data = nv_rd32(priv, 0x104044);
int chid;
engctx = nouveau_engctx_get(engine, inst);
chid = pfifo->chid(pfifo, engctx);
if (stat & 0x00000040) {
- nv_error(falcon, "DISPATCH_ERROR [");
+ nv_error(priv, "DISPATCH_ERROR [");
nouveau_enum_print(nva3_copy_isr_error_name, ssta);
printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
chid, inst << 12, subc, mthd, data);
- nv_wo32(falcon, 0x004, 0x00000040);
+ nv_wr32(priv, 0x104004, 0x00000040);
stat &= ~0x00000040;
}
if (stat) {
- nv_error(falcon, "unhandled intr 0x%08x\n", stat);
- nv_wo32(falcon, 0x004, stat);
+ nv_error(priv, "unhandled intr 0x%08x\n", stat);
+ nv_wr32(priv, 0x104004, stat);
}
+ nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -130,8 +154,7 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nva3_copy_priv *priv;
int ret;
- ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable,
- "PCE0", "copy0", &priv);
+ ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -141,22 +164,59 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->cclass = &nva3_copy_cclass;
nv_engine(priv)->sclass = nva3_copy_sclass;
nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
- nv_falcon(priv)->code.data = nva3_pcopy_code;
- nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code);
- nv_falcon(priv)->data.data = nva3_pcopy_data;
- nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data);
return 0;
}
+static int
+nva3_copy_init(struct nouveau_object *object)
+{
+ struct nva3_copy_priv *priv = (void *)object;
+ int ret, i;
+
+ ret = nouveau_copy_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x104014, 0xffffffff);
+
+ /* upload ucode */
+ nv_wr32(priv, 0x1041c0, 0x01000000);
+ for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
+ nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
+
+ nv_wr32(priv, 0x104180, 0x01000000);
+ for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(priv, 0x104188, i >> 6);
+ nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
+ }
+
+ /* start it running */
+ nv_wr32(priv, 0x10410c, 0x00000000);
+ nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
+ nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
+ return 0;
+}
+
+static int
+nva3_copy_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nva3_copy_priv *priv = (void *)object;
+
+ nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
+ nv_wr32(priv, 0x104014, 0xffffffff);
+
+ return nouveau_copy_fini(&priv->base, suspend);
+}
+
struct nouveau_oclass
nva3_copy_oclass = {
.handle = NV_ENGINE(COPY0, 0xa3),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nva3_copy_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = _nouveau_falcon_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
+ .dtor = _nouveau_copy_dtor,
+ .init = nva3_copy_init,
+ .fini = nva3_copy_fini,
},
};
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index b3ed2737e21f..06d4a8791055 100644
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -22,9 +22,10 @@
* Authors: Ben Skeggs
*/
-#include
-#include
+#include
#include
+#include
+#include
#include
#include
@@ -32,7 +33,11 @@
#include "fuc/nvc0.fuc.h"
struct nvc0_copy_priv {
- struct nouveau_falcon base;
+ struct nouveau_copy base;
+};
+
+struct nvc0_copy_chan {
+ struct nouveau_copy_chan base;
};
/*******************************************************************************
@@ -55,14 +60,32 @@ nvc0_copy1_sclass[] = {
* PCOPY context
******************************************************************************/
+static int
+nvc0_copy_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_copy_chan *priv;
+ int ret;
+
+ ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
+ 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static struct nouveau_ofuncs
nvc0_copy_context_ofuncs = {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
+ .ctor = nvc0_copy_context_ctor,
+ .dtor = _nouveau_copy_context_dtor,
+ .init = _nouveau_copy_context_init,
+ .fini = _nouveau_copy_context_fini,
+ .rd32 = _nouveau_copy_context_rd32,
+ .wr32 = _nouveau_copy_context_wr32,
};
static struct nouveau_oclass
@@ -81,18 +104,50 @@ nvc0_copy1_cclass = {
* PCOPY engine/subdev functions
******************************************************************************/
-static int
-nvc0_copy_init(struct nouveau_object *object)
+static const struct nouveau_enum nvc0_copy_isr_error_name[] = {
+ { 0x0001, "ILLEGAL_MTHD" },
+ { 0x0002, "INVALID_ENUM" },
+ { 0x0003, "INVALID_BITFIELD" },
+ {}
+};
+
+static void
+nvc0_copy_intr(struct nouveau_subdev *subdev)
{
- struct nvc0_copy_priv *priv = (void *)object;
- int ret;
+ struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_object *engctx;
+ int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
+ struct nvc0_copy_priv *priv = (void *)subdev;
+ u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
+ u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
+ u32 stat = intr & disp & ~(disp >> 16);
+ u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
+ u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
+ u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
+ u32 mthd = (addr & 0x07ff) << 2;
+ u32 subc = (addr & 0x3800) >> 11;
+ u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
+ int chid;
- ret = nouveau_falcon_init(&priv->base);
- if (ret)
- return ret;
+ engctx = nouveau_engctx_get(engine, inst);
+ chid = pfifo->chid(pfifo, engctx);
- nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0);
- return 0;
+ if (stat & 0x00000040) {
+ nv_error(priv, "DISPATCH_ERROR [");
+ nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
+ printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, (u64)inst << 12, subc, mthd, data);
+ nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
+ stat &= ~0x00000040;
+ }
+
+ if (stat) {
+ nv_error(priv, "unhandled intr 0x%08x\n", stat);
+ nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
+ }
+
+ nouveau_engctx_put(engctx);
}
static int
@@ -106,20 +161,15 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000100)
return -ENODEV;
- ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
- "PCE0", "copy0", &priv);
+ ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000040;
- nv_subdev(priv)->intr = nva3_copy_intr;
+ nv_subdev(priv)->intr = nvc0_copy_intr;
nv_engine(priv)->cclass = &nvc0_copy0_cclass;
nv_engine(priv)->sclass = nvc0_copy0_sclass;
- nv_falcon(priv)->code.data = nvc0_pcopy_code;
- nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
- nv_falcon(priv)->data.data = nvc0_pcopy_data;
- nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
return 0;
}
@@ -134,33 +184,72 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000200)
return -ENODEV;
- ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
- "PCE1", "copy1", &priv);
+ ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000080;
- nv_subdev(priv)->intr = nva3_copy_intr;
+ nv_subdev(priv)->intr = nvc0_copy_intr;
nv_engine(priv)->cclass = &nvc0_copy1_cclass;
nv_engine(priv)->sclass = nvc0_copy1_sclass;
- nv_falcon(priv)->code.data = nvc0_pcopy_code;
- nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
- nv_falcon(priv)->data.data = nvc0_pcopy_data;
- nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
return 0;
}
+static int
+nvc0_copy_init(struct nouveau_object *object)
+{
+ int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
+ struct nvc0_copy_priv *priv = (void *)object;
+ int ret, i;
+
+ ret = nouveau_copy_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
+
+ /* upload ucode */
+ nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
+ for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
+ nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
+
+ nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
+ for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
+ nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
+ }
+
+ /* start it running */
+ nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
+ nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
+ nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
+ nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
+ return 0;
+}
+
+static int
+nvc0_copy_fini(struct nouveau_object *object, bool suspend)
+{
+ int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
+ struct nvc0_copy_priv *priv = (void *)object;
+
+ nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
+ nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
+
+ return nouveau_copy_fini(&priv->base, suspend);
+}
+
struct nouveau_oclass
nvc0_copy0_oclass = {
.handle = NV_ENGINE(COPY0, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_copy0_ctor,
- .dtor = _nouveau_falcon_dtor,
+ .dtor = _nouveau_copy_dtor,
.init = nvc0_copy_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
+ .fini = nvc0_copy_fini,
},
};
@@ -169,10 +258,8 @@ nvc0_copy1_oclass = {
.handle = NV_ENGINE(COPY1, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_copy1_ctor,
- .dtor = _nouveau_falcon_dtor,
+ .dtor = _nouveau_copy_dtor,
.init = nvc0_copy_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
+ .fini = nvc0_copy_fini,
},
};
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
index dbbe9e8998fe..2017c1579ac5 100644
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ b/trunk/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -30,7 +30,11 @@
#include
struct nve0_copy_priv {
- struct nouveau_engine base;
+ struct nouveau_copy base;
+};
+
+struct nve0_copy_chan {
+ struct nouveau_copy_chan base;
};
/*******************************************************************************
@@ -47,14 +51,32 @@ nve0_copy_sclass[] = {
* PCOPY context
******************************************************************************/
+static int
+nve0_copy_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_copy_chan *priv;
+ int ret;
+
+ ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
+ 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static struct nouveau_ofuncs
nve0_copy_context_ofuncs = {
- .ctor = _nouveau_engctx_ctor,
- .dtor = _nouveau_engctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
- .rd32 = _nouveau_engctx_rd32,
- .wr32 = _nouveau_engctx_wr32,
+ .ctor = nve0_copy_context_ctor,
+ .dtor = _nouveau_copy_context_dtor,
+ .init = _nouveau_copy_context_init,
+ .fini = _nouveau_copy_context_fini,
+ .rd32 = _nouveau_copy_context_rd32,
+ .wr32 = _nouveau_copy_context_wr32,
};
static struct nouveau_oclass
@@ -78,8 +100,7 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000100)
return -ENODEV;
- ret = nouveau_engine_create(parent, engine, oclass, true,
- "PCE0", "copy0", &priv);
+ ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -101,8 +122,7 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (nv_rd32(parent, 0x022500) & 0x00000200)
return -ENODEV;
- ret = nouveau_engine_create(parent, engine, oclass, true,
- "PCE1", "copy1", &priv);
+ ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -118,9 +138,9 @@ nve0_copy0_oclass = {
.handle = NV_ENGINE(COPY0, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_copy0_ctor,
- .dtor = _nouveau_engine_dtor,
- .init = _nouveau_engine_init,
- .fini = _nouveau_engine_fini,
+ .dtor = _nouveau_copy_dtor,
+ .init = _nouveau_copy_init,
+ .fini = _nouveau_copy_fini,
},
};
@@ -129,8 +149,8 @@ nve0_copy1_oclass = {
.handle = NV_ENGINE(COPY1, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_copy1_ctor,
- .dtor = _nouveau_engine_dtor,
- .init = _nouveau_engine_init,
- .fini = _nouveau_engine_fini,
+ .dtor = _nouveau_copy_dtor,
+ .init = _nouveau_copy_init,
+ .fini = _nouveau_copy_fini,
},
};
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index b97490512723..1d85e5b66ca0 100644
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -34,7 +34,11 @@
#include
struct nv84_crypt_priv {
- struct nouveau_engine base;
+ struct nouveau_crypt base;
+};
+
+struct nv84_crypt_chan {
+ struct nouveau_crypt_chan base;
};
/*******************************************************************************
@@ -83,16 +87,34 @@ nv84_crypt_sclass[] = {
* PCRYPT context
******************************************************************************/
+static int
+nv84_crypt_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv84_crypt_chan *priv;
+ int ret;
+
+ ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
+ 0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static struct nouveau_oclass
nv84_crypt_cclass = {
.handle = NV_ENGCTX(CRYPT, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_engctx_ctor,
- .dtor = _nouveau_engctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
- .rd32 = _nouveau_engctx_rd32,
- .wr32 = _nouveau_engctx_wr32,
+ .ctor = nv84_crypt_context_ctor,
+ .dtor = _nouveau_crypt_context_dtor,
+ .init = _nouveau_crypt_context_init,
+ .fini = _nouveau_crypt_context_fini,
+ .rd32 = _nouveau_crypt_context_rd32,
+ .wr32 = _nouveau_crypt_context_wr32,
},
};
@@ -135,6 +157,7 @@ nv84_crypt_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x102130, stat);
nv_wr32(priv, 0x10200c, 0x10);
+ nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -153,8 +176,7 @@ nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv84_crypt_priv *priv;
int ret;
- ret = nouveau_engine_create(parent, engine, oclass, true,
- "PCRYPT", "crypt", &priv);
+ ret = nouveau_crypt_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -173,7 +195,7 @@ nv84_crypt_init(struct nouveau_object *object)
struct nv84_crypt_priv *priv = (void *)object;
int ret;
- ret = nouveau_engine_init(&priv->base);
+ ret = nouveau_crypt_init(&priv->base);
if (ret)
return ret;
@@ -188,8 +210,8 @@ nv84_crypt_oclass = {
.handle = NV_ENGINE(CRYPT, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_crypt_ctor,
- .dtor = _nouveau_engine_dtor,
+ .dtor = _nouveau_crypt_dtor,
.init = nv84_crypt_init,
- .fini = _nouveau_engine_fini,
+ .fini = _nouveau_crypt_fini,
},
};
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index 21986f3bf0c8..9e3876c89b96 100644
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/trunk/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -26,7 +26,6 @@
#include
#include
#include
-#include
#include
#include
@@ -37,7 +36,11 @@
#include "fuc/nv98.fuc.h"
struct nv98_crypt_priv {
- struct nouveau_falcon base;
+ struct nouveau_crypt base;
+};
+
+struct nv98_crypt_chan {
+ struct nouveau_crypt_chan base;
};
/*******************************************************************************
@@ -54,16 +57,34 @@ nv98_crypt_sclass[] = {
* PCRYPT context
******************************************************************************/
+static int
+nv98_crypt_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv98_crypt_chan *priv;
+ int ret;
+
+ ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
+ 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static struct nouveau_oclass
nv98_crypt_cclass = {
.handle = NV_ENGCTX(CRYPT, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_falcon_context_ctor,
- .dtor = _nouveau_falcon_context_dtor,
- .init = _nouveau_falcon_context_init,
- .fini = _nouveau_falcon_context_fini,
- .rd32 = _nouveau_falcon_context_rd32,
- .wr32 = _nouveau_falcon_context_wr32,
+ .ctor = nv98_crypt_context_ctor,
+ .dtor = _nouveau_crypt_context_dtor,
+ .init = _nouveau_crypt_context_init,
+ .fini = _nouveau_crypt_context_fini,
+ .rd32 = _nouveau_crypt_context_rd32,
+ .wr32 = _nouveau_crypt_context_wr32,
},
};
@@ -113,6 +134,7 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
nv_wr32(priv, 0x087004, stat);
}
+ nv50_fb_trap(nouveau_fb(priv), 1);
nouveau_engctx_put(engctx);
}
@@ -131,8 +153,7 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_crypt_priv *priv;
int ret;
- ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true,
- "PCRYPT", "crypt", &priv);
+ ret = nouveau_crypt_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -142,10 +163,36 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->cclass = &nv98_crypt_cclass;
nv_engine(priv)->sclass = nv98_crypt_sclass;
nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
- nv_falcon(priv)->code.data = nv98_pcrypt_code;
- nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code);
- nv_falcon(priv)->data.data = nv98_pcrypt_data;
- nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data);
+ return 0;
+}
+
+static int
+nv98_crypt_init(struct nouveau_object *object)
+{
+ struct nv98_crypt_priv *priv = (void *)object;
+ int ret, i;
+
+ ret = nouveau_crypt_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* wait for exit interrupt to signal */
+ nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x087004, 0x00000010);
+
+ /* upload microcode code and data segments */
+ nv_wr32(priv, 0x087ff8, 0x00100000);
+ for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
+ nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
+
+ nv_wr32(priv, 0x087ff8, 0x00000000);
+ for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
+ nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
+
+ /* start it running */
+ nv_wr32(priv, 0x08710c, 0x00000000);
+ nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
+ nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
return 0;
}
@@ -154,10 +201,8 @@ nv98_crypt_oclass = {
.handle = NV_ENGINE(CRYPT, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_crypt_ctor,
- .dtor = _nouveau_falcon_dtor,
- .init = _nouveau_falcon_init,
- .fini = _nouveau_falcon_fini,
- .rd32 = _nouveau_falcon_rd32,
- .wr32 = _nouveau_falcon_wr32,
+ .dtor = _nouveau_crypt_dtor,
+ .init = nv98_crypt_init,
+ .fini = _nouveau_crypt_fini,
},
};
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
deleted file mode 100644
index d0817d94454c..000000000000
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include
-#include
-
-#include
-#include
-#include
-
-#include "nv50.h"
-
-int
-nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
-{
- const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) |
- (data & NV50_DISP_DAC_PWR_VSYNC) |
- (data & NV50_DISP_DAC_PWR_DATA) |
- (data & NV50_DISP_DAC_PWR_STATE);
- const u32 doff = (or * 0x800);
- nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
- nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
- nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
- return 0;
-}
-
-int
-nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
-{
- const u32 doff = (or * 0x800);
- int load = -EINVAL;
- nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
- udelay(9500);
- nv_wr32(priv, 0x61a00c + doff, 0x80000000);
- load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
- nv_wr32(priv, 0x61a00c + doff, 0x00000000);
- return load;
-}
-
-int
-nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
- u32 *data = args;
- int ret;
-
- if (size < sizeof(u32))
- return -EINVAL;
-
- switch (mthd & ~0x3f) {
- case NV50_DISP_DAC_PWR:
- ret = priv->dac.power(priv, or, data[0]);
- break;
- case NV50_DISP_DAC_LOAD:
- ret = priv->dac.sense(priv, or, data[0]);
- if (ret >= 0) {
- data[0] = ret;
- ret = 0;
- }
- break;
- default:
- BUG_ON(1);
- }
-
- return ret;
-}
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
deleted file mode 100644
index 373dbcc523b2..000000000000
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include
-#include
-
-#include "nv50.h"
-
-int
-nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
-{
- const u32 soff = (or * 0x800);
- int i;
-
- if (data && data[0]) {
- for (i = 0; i < size; i++)
- nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
- nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
- } else
- if (data) {
- nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
- } else {
- nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
- }
-
- return 0;
-}
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
deleted file mode 100644
index dc57e24fc1df..000000000000
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include
-#include
-
-#include
-#include
-#include
-#include
-
-#include "nv50.h"
-
-int
-nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
-{
- const u32 soff = (or * 0x030);
- int i;
-
- if (data && data[0]) {
- for (i = 0; i < size; i++)
- nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
- nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
- } else
- if (data) {
- nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
- } else {
- nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
- }
-
- return 0;
-}
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
deleted file mode 100644
index 0d36bdc51417..000000000000
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include
-#include
-
-#include "nv50.h"
-
-int
-nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
-{
- const u32 hoff = (head * 0x800);
-
- if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
- nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
- nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
- nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
- return 0;
- }
-
- /* AVI InfoFrame */
- nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
- nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
- nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
- nv_wr32(priv, 0x616530 + hoff, 0x00000000);
- nv_wr32(priv, 0x616534 + hoff, 0x00000000);
- nv_wr32(priv, 0x616538 + hoff, 0x00000000);
- nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
-
- /* Audio InfoFrame */
- nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
- nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
- nv_wr32(priv, 0x61650c + hoff, 0x00000071);
- nv_wr32(priv, 0x616510 + hoff, 0x00000000);
- nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
-
- /* ??? */
- nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
- nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
- nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
-
- /* HDMI_CTRL */
- nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
- return 0;
-}
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
deleted file mode 100644
index f065fc248adf..000000000000
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include
-#include
-
-#include "nv50.h"
-
-int
-nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
-{
- const u32 soff = (or * 0x800);
-
- if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
- nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
- nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
- nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
- return 0;
- }
-
- /* AVI InfoFrame */
- nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
- nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
- nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
- nv_wr32(priv, 0x61c530 + soff, 0x00000000);
- nv_wr32(priv, 0x61c534 + soff, 0x00000000);
- nv_wr32(priv, 0x61c538 + soff, 0x00000000);
- nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
-
- /* Audio InfoFrame */
- nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
- nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
- nv_wr32(priv, 0x61c50c + soff, 0x00000071);
- nv_wr32(priv, 0x61c510 + soff, 0x00000000);
- nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
-
- /* ??? */
- nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
- nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
- nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
-
- /* HDMI_CTRL */
- nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
- return 0;
-}
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
deleted file mode 100644
index 5151bb261832..000000000000
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include
-#include
-
-#include "nv50.h"
-
-int
-nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
-{
- const u32 hoff = (head * 0x800);
-
- if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
- nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
- nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
- nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
- return 0;
- }
-
- /* AVI InfoFrame */
- nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
- nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
- nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
- nv_wr32(priv, 0x616724 + hoff, 0x00000000);
- nv_wr32(priv, 0x616728 + hoff, 0x00000000);
- nv_wr32(priv, 0x61672c + hoff, 0x00000000);
- nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
-
- /* ??? InfoFrame? */
- nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
- nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
- nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
-
- /* HDMI_CTRL */
- nv_mask(priv, 0x616798 + hoff, 0x401f007f, data);
-
- /* NFI, audio doesn't work without it though.. */
- nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
- return 0;
-}
diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index c6f80055e988..15b182c84ce8 100644
--- a/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/trunk/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -22,743 +22,24 @@
* Authors: Ben Skeggs
*/
-#include
-#include