diff --git a/[refs] b/[refs]
index 48b5b403fe02..aa57757afd55 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: 2cb85a7bd2ca6db3ab3d632d0a1b6ca3770ddcf4
+refs/heads/master: a93178a13dbd35850ec8a86b023e1f8953e80dae
diff --git a/trunk/Documentation/DMA-attributes.txt b/trunk/Documentation/DMA-attributes.txt
index f50309081ac7..e59480db9ee0 100644
--- a/trunk/Documentation/DMA-attributes.txt
+++ b/trunk/Documentation/DMA-attributes.txt
@@ -91,3 +91,12 @@ transferred to 'device' domain. This attribute can be also used for
dma_unmap_{single,page,sg} functions family to force buffer to stay in
device domain after releasing a mapping for it. Use this attribute with
care!
+
+DMA_ATTR_FORCE_CONTIGUOUS
+-------------------------
+
+By default DMA-mapping subsystem is allowed to assemble the buffer
+allocated by dma_alloc_attrs() function from individual pages if it can
+be mapped as contiguous chunk into device dma address space. By
+specifing this attribute the allocated buffer is forced to be contiguous
+also in physical memory.
diff --git a/trunk/Documentation/DocBook/drm.tmpl b/trunk/Documentation/DocBook/drm.tmpl
index b0300529ab13..4ee2304f82f9 100644
--- a/trunk/Documentation/DocBook/drm.tmpl
+++ b/trunk/Documentation/DocBook/drm.tmpl
@@ -1141,23 +1141,13 @@ int max_width, max_height;
the page_flip operation will be called with a
non-NULL event argument pointing to a
drm_pending_vblank_event instance. Upon page
- flip completion the driver must fill the
- event::event
- sequence, tv_sec
- and tv_usec fields with the associated
- vertical blanking count and timestamp, add the event to the
- drm_file list of events to be signaled, and wake
- up any waiting process. This can be performed with
+ flip completion the driver must call drm_send_vblank_event
+ to fill in the event and send to wake up any waiting processes.
+ This can be performed with
event.sequence = drm_vblank_count_and_time(..., &now);
- event->event.tv_sec = now.tv_sec;
- event->event.tv_usec = now.tv_usec;
-
spin_lock_irqsave(&dev->event_lock, flags);
- list_add_tail(&event->base.link, &event->base.file_priv->event_list);
- wake_up_interruptible(&event->base.file_priv->event_wait);
+ ...
+ drm_send_vblank_event(dev, pipe, event);
spin_unlock_irqrestore(&dev->event_lock, flags);
]]>
@@ -1621,10 +1611,10 @@ void intel_crt_init(struct drm_device *dev)
-
+
- Mid-layer Helper Functions
+ Mode Setting Helper Functions
The CRTC, encoder and connector functions provided by the drivers
implement the DRM API. They're called by the DRM core and ioctl handlers
@@ -2106,6 +2096,21 @@ void intel_crt_init(struct drm_device *dev)
+
+ Modeset Helper Functions Reference
+!Edrivers/gpu/drm/drm_crtc_helper.c
+
+
+ fbdev Helper Functions Reference
+!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
+!Edrivers/gpu/drm/drm_fb_helper.c
+
+
+ Display Port Helper Functions Reference
+!Pdrivers/gpu/drm/drm_dp_helper.c dp helpers
+!Iinclude/drm/drm_dp_helper.h
+!Edrivers/gpu/drm/drm_dp_helper.c
+
diff --git a/trunk/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/trunk/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
new file mode 100644
index 000000000000..b4fa934ae3a2
--- /dev/null
+++ b/trunk/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -0,0 +1,191 @@
+NVIDIA Tegra host1x
+
+Required properties:
+- compatible: "nvidia,tegra-host1x"
+- reg: Physical base address and length of the controller's registers.
+- interrupts: The interrupt outputs from the controller.
+- #address-cells: The number of cells used to represent physical base addresses
+ in the host1x address space. Should be 1.
+- #size-cells: The number of cells used to represent the size of an address
+ range in the host1x address space. Should be 1.
+- ranges: The mapping of the host1x address space to the CPU address space.
+
+The host1x top-level node defines a number of children, each representing one
+of the following host1x client modules:
+
+- mpe: video encoder
+
+ Required properties:
+ - compatible: "nvidia,tegra-mpe"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- vi: video input
+
+ Required properties:
+ - compatible: "nvidia,tegra-vi"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- epp: encoder pre-processor
+
+ Required properties:
+ - compatible: "nvidia,tegra-epp"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- isp: image signal processor
+
+ Required properties:
+ - compatible: "nvidia,tegra-isp"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- gr2d: 2D graphics engine
+
+ Required properties:
+ - compatible: "nvidia,tegra-gr2d"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- gr3d: 3D graphics engine
+
+ Required properties:
+ - compatible: "nvidia,tegra-gr3d"
+ - reg: Physical base address and length of the controller's registers.
+
+- dc: display controller
+
+ Required properties:
+ - compatible: "nvidia,tegra-dc"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+ Each display controller node has a child node, named "rgb", that represents
+ the RGB output associated with the controller. It can take the following
+ optional properties:
+ - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+ - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
+ - nvidia,edid: supplies a binary EDID blob
+
+- hdmi: High Definition Multimedia Interface
+
+ Required properties:
+ - compatible: "nvidia,tegra-hdmi"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+ - vdd-supply: regulator for supply voltage
+ - pll-supply: regulator for PLL
+
+ Optional properties:
+ - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+ - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
+ - nvidia,edid: supplies a binary EDID blob
+
+- tvo: TV encoder output
+
+ Required properties:
+ - compatible: "nvidia,tegra-tvo"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+
+- dsi: display serial interface
+
+ Required properties:
+ - compatible: "nvidia,tegra-dsi"
+ - reg: Physical base address and length of the controller's registers.
+
+Example:
+
+/ {
+ ...
+
+ host1x {
+ compatible = "nvidia,tegra20-host1x", "simple-bus";
+ reg = <0x50000000 0x00024000>;
+ interrupts = <0 65 0x04 /* mpcore syncpt */
+ 0 67 0x04>; /* mpcore general */
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges = <0x54000000 0x54000000 0x04000000>;
+
+ mpe {
+ compatible = "nvidia,tegra20-mpe";
+ reg = <0x54040000 0x00040000>;
+ interrupts = <0 68 0x04>;
+ };
+
+ vi {
+ compatible = "nvidia,tegra20-vi";
+ reg = <0x54080000 0x00040000>;
+ interrupts = <0 69 0x04>;
+ };
+
+ epp {
+ compatible = "nvidia,tegra20-epp";
+ reg = <0x540c0000 0x00040000>;
+ interrupts = <0 70 0x04>;
+ };
+
+ isp {
+ compatible = "nvidia,tegra20-isp";
+ reg = <0x54100000 0x00040000>;
+ interrupts = <0 71 0x04>;
+ };
+
+ gr2d {
+ compatible = "nvidia,tegra20-gr2d";
+ reg = <0x54140000 0x00040000>;
+ interrupts = <0 72 0x04>;
+ };
+
+ gr3d {
+ compatible = "nvidia,tegra20-gr3d";
+ reg = <0x54180000 0x00040000>;
+ };
+
+ dc@54200000 {
+ compatible = "nvidia,tegra20-dc";
+ reg = <0x54200000 0x00040000>;
+ interrupts = <0 73 0x04>;
+
+ rgb {
+ status = "disabled";
+ };
+ };
+
+ dc@54240000 {
+ compatible = "nvidia,tegra20-dc";
+ reg = <0x54240000 0x00040000>;
+ interrupts = <0 74 0x04>;
+
+ rgb {
+ status = "disabled";
+ };
+ };
+
+ hdmi {
+ compatible = "nvidia,tegra20-hdmi";
+ reg = <0x54280000 0x00040000>;
+ interrupts = <0 75 0x04>;
+ status = "disabled";
+ };
+
+ tvo {
+ compatible = "nvidia,tegra20-tvo";
+ reg = <0x542c0000 0x00040000>;
+ interrupts = <0 76 0x04>;
+ status = "disabled";
+ };
+
+ dsi {
+ compatible = "nvidia,tegra20-dsi";
+ reg = <0x54300000 0x00040000>;
+ status = "disabled";
+ };
+ };
+
+ ...
+};
diff --git a/trunk/Documentation/devicetree/bindings/mfd/stmpe.txt b/trunk/Documentation/devicetree/bindings/mfd/stmpe.txt
new file mode 100644
index 000000000000..56edb5520685
--- /dev/null
+++ b/trunk/Documentation/devicetree/bindings/mfd/stmpe.txt
@@ -0,0 +1,28 @@
+* ST Microelectronics STMPE Multi-Functional Device
+
+STMPE is an MFD device which may expose the following inbuilt devices: gpio,
+keypad, touchscreen, adc, pwm, rotator.
+
+Required properties:
+ - compatible : "st,stmpe[610|801|811|1601|2401|2403]"
+ - reg : I2C/SPI address of the device
+
+Optional properties:
+ - interrupts : The interrupt outputs from the controller
+ - interrupt-controller : Marks the device node as an interrupt controller
+ - interrupt-parent : Specifies which IRQ controller we're connected to
+ - wakeup-source : Marks the input device as wakable
+ - st,autosleep-timeout : Valid entries (ms); 4, 16, 32, 64, 128, 256, 512 and 1024
+
+Example:
+
+ stmpe1601: stmpe1601@40 {
+ compatible = "st,stmpe1601";
+ reg = <0x40>;
+ interrupts = <26 0x4>;
+ interrupt-parent = <&gpio6>;
+ interrupt-controller;
+
+ wakeup-source;
+ st,autosleep-timeout = <1024>;
+ };
diff --git a/trunk/Documentation/devicetree/bindings/regulator/tps65217.txt b/trunk/Documentation/devicetree/bindings/regulator/tps65217.txt
index d316fb895daf..4f05d208c95c 100644
--- a/trunk/Documentation/devicetree/bindings/regulator/tps65217.txt
+++ b/trunk/Documentation/devicetree/bindings/regulator/tps65217.txt
@@ -11,6 +11,9 @@ Required properties:
using the standard binding for regulators found at
Documentation/devicetree/bindings/regulator/regulator.txt.
+Optional properties:
+- ti,pmic-shutdown-controller: Telling the PMIC to shutdown on PWR_EN toggle.
+
The valid names for regulators are:
tps65217: dcdc1, dcdc2, dcdc3, ldo1, ldo2, ldo3 and ldo4
@@ -20,6 +23,7 @@ Example:
tps: tps@24 {
compatible = "ti,tps65217";
+ ti,pmic-shutdown-controller;
regulators {
dcdc1_reg: dcdc1 {
diff --git a/trunk/Documentation/filesystems/ext4.txt b/trunk/Documentation/filesystems/ext4.txt
index 104322bf378c..34ea4f1fa6ea 100644
--- a/trunk/Documentation/filesystems/ext4.txt
+++ b/trunk/Documentation/filesystems/ext4.txt
@@ -200,12 +200,9 @@ inode_readahead_blks=n This tuning parameter controls the maximum
table readahead algorithm will pre-read into
the buffer cache. The default value is 32 blocks.
-nouser_xattr Disables Extended User Attributes. If you have extended
- attribute support enabled in the kernel configuration
- (CONFIG_EXT4_FS_XATTR), extended attribute support
- is enabled by default on mount. See the attr(5) manual
- page and http://acl.bestbits.at/ for more information
- about extended attributes.
+nouser_xattr Disables Extended User Attributes. See the
+ attr(5) manual page and http://acl.bestbits.at/
+ for more information about extended attributes.
noacl This option disables POSIX Access Control List
support. If ACL support is enabled in the kernel
diff --git a/trunk/Documentation/i2c/smbus-protocol b/trunk/Documentation/i2c/smbus-protocol
index 49f5b680809d..d1f22618e14b 100644
--- a/trunk/Documentation/i2c/smbus-protocol
+++ b/trunk/Documentation/i2c/smbus-protocol
@@ -23,6 +23,12 @@ don't match these function names. For some of the operations which pass a
single data byte, the functions using SMBus protocol operation names execute
a different protocol operation entirely.
+Each transaction type corresponds to a functionality flag. Before calling a
+transaction function, a device driver should always check (just once) for
+the corresponding functionality flag to ensure that the underlying I2C
+adapter supports the transaction in question. See
+ for the details.
+
Key to symbols
==============
@@ -49,6 +55,8 @@ This sends a single bit to the device, at the place of the Rd/Wr bit.
A Addr Rd/Wr [A] P
+Functionality flag: I2C_FUNC_SMBUS_QUICK
+
SMBus Receive Byte: i2c_smbus_read_byte()
==========================================
@@ -60,6 +68,8 @@ the previous SMBus command.
S Addr Rd [A] [Data] NA P
+Functionality flag: I2C_FUNC_SMBUS_READ_BYTE
+
SMBus Send Byte: i2c_smbus_write_byte()
========================================
@@ -69,6 +79,8 @@ to a device. See Receive Byte for more information.
S Addr Wr [A] Data [A] P
+Functionality flag: I2C_FUNC_SMBUS_WRITE_BYTE
+
SMBus Read Byte: i2c_smbus_read_byte_data()
============================================
@@ -78,6 +90,8 @@ The register is specified through the Comm byte.
S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P
+Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
+
SMBus Read Word: i2c_smbus_read_word_data()
============================================
@@ -88,6 +102,8 @@ byte. But this time, the data is a complete word (16 bits).
S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
+Functionality flag: I2C_FUNC_SMBUS_READ_WORD_DATA
+
Note the convenience function i2c_smbus_read_word_swapped is
available for reads where the two data bytes are the other way
around (not SMBus compliant, but very popular.)
@@ -102,6 +118,8 @@ the Read Byte operation.
S Addr Wr [A] Comm [A] Data [A] P
+Functionality flag: I2C_FUNC_SMBUS_WRITE_BYTE_DATA
+
SMBus Write Word: i2c_smbus_write_word_data()
==============================================
@@ -112,6 +130,8 @@ specified through the Comm byte.
S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A] P
+Functionality flag: I2C_FUNC_SMBUS_WRITE_WORD_DATA
+
Note the convenience function i2c_smbus_write_word_swapped is
available for writes where the two data bytes are the other way
around (not SMBus compliant, but very popular.)
@@ -126,6 +146,8 @@ This command selects a device register (through the Comm byte), sends
S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A]
S Addr Rd [A] [DataLow] A [DataHigh] NA P
+Functionality flag: I2C_FUNC_SMBUS_PROC_CALL
+
SMBus Block Read: i2c_smbus_read_block_data()
==============================================
@@ -137,6 +159,8 @@ of data is specified by the device in the Count byte.
S Addr Wr [A] Comm [A]
S Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
+Functionality flag: I2C_FUNC_SMBUS_READ_BLOCK_DATA
+
SMBus Block Write: i2c_smbus_write_block_data()
================================================
@@ -147,6 +171,8 @@ Comm byte. The amount of data is specified in the Count byte.
S Addr Wr [A] Comm [A] Count [A] Data [A] Data [A] ... [A] Data [A] P
+Functionality flag: I2C_FUNC_SMBUS_WRITE_BLOCK_DATA
+
SMBus Block Write - Block Read Process Call
===========================================
@@ -160,6 +186,8 @@ This command selects a device register (through the Comm byte), sends
S Addr Wr [A] Comm [A] Count [A] Data [A] ...
S Addr Rd [A] [Count] A [Data] ... A P
+Functionality flag: I2C_FUNC_SMBUS_BLOCK_PROC_CALL
+
SMBus Host Notify
=================
@@ -229,15 +257,7 @@ designated register that is specified through the Comm byte.
S Addr Wr [A] Comm [A]
S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
-
-I2C Block Read (2 Comm bytes)
-=============================
-
-This command reads a block of bytes from a device, from a
-designated register that is specified through the two Comm bytes.
-
-S Addr Wr [A] Comm1 [A] Comm2 [A]
- S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
+Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK
I2C Block Write: i2c_smbus_write_i2c_block_data()
@@ -249,3 +269,5 @@ Comm byte. Note that command lengths of 0, 2, or more bytes are
supported as they are indistinguishable from data.
S Addr Wr [A] Comm [A] Data [A] Data [A] ... [A] Data [A] P
+
+Functionality flag: I2C_FUNC_SMBUS_WRITE_I2C_BLOCK
diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt
index 20e248cc03a9..ea8e5b485576 100644
--- a/trunk/Documentation/kernel-parameters.txt
+++ b/trunk/Documentation/kernel-parameters.txt
@@ -2032,6 +2032,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nr_uarts= [SERIAL] maximum number of UARTs to be registered.
+ numa_balancing= [KNL,X86] Enable or disable automatic NUMA balancing.
+ Allowed values are enable and disable
+
numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
one of ['zone', 'node', 'default'] can be specified
This can be set from sysctl after boot.
diff --git a/trunk/Documentation/kref.txt b/trunk/Documentation/kref.txt
index 48ba715d5a63..ddf85a5dde0c 100644
--- a/trunk/Documentation/kref.txt
+++ b/trunk/Documentation/kref.txt
@@ -213,3 +213,91 @@ presentation on krefs, which can be found at:
and:
http://www.kroah.com/linux/talks/ols_2004_kref_talk/
+
+The above example could also be optimized using kref_get_unless_zero() in
+the following way:
+
+static struct my_data *get_entry()
+{
+ struct my_data *entry = NULL;
+ mutex_lock(&mutex);
+ if (!list_empty(&q)) {
+ entry = container_of(q.next, struct my_data, link);
+ if (!kref_get_unless_zero(&entry->refcount))
+ entry = NULL;
+ }
+ mutex_unlock(&mutex);
+ return entry;
+}
+
+static void release_entry(struct kref *ref)
+{
+ struct my_data *entry = container_of(ref, struct my_data, refcount);
+
+ mutex_lock(&mutex);
+ list_del(&entry->link);
+ mutex_unlock(&mutex);
+ kfree(entry);
+}
+
+static void put_entry(struct my_data *entry)
+{
+ kref_put(&entry->refcount, release_entry);
+}
+
+Which is useful to remove the mutex lock around kref_put() in put_entry(), but
+it's important that kref_get_unless_zero is enclosed in the same critical
+section that finds the entry in the lookup table,
+otherwise kref_get_unless_zero may reference already freed memory.
+Note that it is illegal to use kref_get_unless_zero without checking its
+return value. If you are sure (by already having a valid pointer) that
+kref_get_unless_zero() will return true, then use kref_get() instead.
+
+The function kref_get_unless_zero also makes it possible to use rcu
+locking for lookups in the above example:
+
+struct my_data
+{
+ struct rcu_head rhead;
+ .
+ struct kref refcount;
+ .
+ .
+};
+
+static struct my_data *get_entry_rcu()
+{
+ struct my_data *entry = NULL;
+ rcu_read_lock();
+ if (!list_empty(&q)) {
+ entry = container_of(q.next, struct my_data, link);
+ if (!kref_get_unless_zero(&entry->refcount))
+ entry = NULL;
+ }
+ rcu_read_unlock();
+ return entry;
+}
+
+static void release_entry_rcu(struct kref *ref)
+{
+ struct my_data *entry = container_of(ref, struct my_data, refcount);
+
+ mutex_lock(&mutex);
+ list_del_rcu(&entry->link);
+ mutex_unlock(&mutex);
+ kfree_rcu(entry, rhead);
+}
+
+static void put_entry(struct my_data *entry)
+{
+ kref_put(&entry->refcount, release_entry_rcu);
+}
+
+But note that the struct kref member needs to remain in valid memory for a
+rcu grace period after release_entry_rcu was called. That can be accomplished
+by using kfree_rcu(entry, rhead) as done above, or by calling synchronize_rcu()
+before using kfree, but note that synchronize_rcu() may sleep for a
+substantial amount of time.
+
+
+Thomas Hellstrom
diff --git a/trunk/Documentation/prctl/seccomp_filter.txt b/trunk/Documentation/prctl/seccomp_filter.txt
index 597c3c581375..1e469ef75778 100644
--- a/trunk/Documentation/prctl/seccomp_filter.txt
+++ b/trunk/Documentation/prctl/seccomp_filter.txt
@@ -95,12 +95,15 @@ SECCOMP_RET_KILL:
SECCOMP_RET_TRAP:
Results in the kernel sending a SIGSYS signal to the triggering
- task without executing the system call. The kernel will
- rollback the register state to just before the system call
- entry such that a signal handler in the task will be able to
- inspect the ucontext_t->uc_mcontext registers and emulate
- system call success or failure upon return from the signal
- handler.
+ task without executing the system call. siginfo->si_call_addr
+ will show the address of the system call instruction, and
+ siginfo->si_syscall and siginfo->si_arch will indicate which
+ syscall was attempted. The program counter will be as though
+ the syscall happened (i.e. it will not point to the syscall
+ instruction). The return value register will contain an arch-
+ dependent value -- if resuming execution, set it to something
+ sensible. (The architecture dependency is because replacing
+ it with -ENOSYS could overwrite some useful information.)
The SECCOMP_RET_DATA portion of the return value will be passed
as si_errno.
@@ -123,6 +126,18 @@ SECCOMP_RET_TRACE:
the BPF program return value will be available to the tracer
via PTRACE_GETEVENTMSG.
+ The tracer can skip the system call by changing the syscall number
+ to -1. Alternatively, the tracer can change the system call
+ requested by changing the system call to a valid syscall number. If
+ the tracer asks to skip the system call, then the system call will
+ appear to return the value that the tracer puts in the return value
+ register.
+
+ The seccomp check will not be run again after the tracer is
+ notified. (This means that seccomp-based sandboxes MUST NOT
+ allow use of ptrace, even of other sandboxed processes, without
+ extreme care; ptracers can use this mechanism to escape.)
+
SECCOMP_RET_ALLOW:
Results in the system call being executed.
@@ -161,3 +176,50 @@ architecture supports both ptrace_event and seccomp, it will be able to
support seccomp filter with minor fixup: SIGSYS support and seccomp return
value checking. Then it must just add CONFIG_HAVE_ARCH_SECCOMP_FILTER
to its arch-specific Kconfig.
+
+
+
+Caveats
+-------
+
+The vDSO can cause some system calls to run entirely in userspace,
+leading to surprises when you run programs on different machines that
+fall back to real syscalls. To minimize these surprises on x86, make
+sure you test with
+/sys/devices/system/clocksource/clocksource0/current_clocksource set to
+something like acpi_pm.
+
+On x86-64, vsyscall emulation is enabled by default. (vsyscalls are
+legacy variants on vDSO calls.) Currently, emulated vsyscalls will honor seccomp, with a few oddities:
+
+- A return value of SECCOMP_RET_TRAP will set a si_call_addr pointing to
+ the vsyscall entry for the given call and not the address after the
+ 'syscall' instruction. Any code which wants to restart the call
+ should be aware that (a) a ret instruction has been emulated and (b)
+ trying to resume the syscall will again trigger the standard vsyscall
+ emulation security checks, making resuming the syscall mostly
+ pointless.
+
+- A return value of SECCOMP_RET_TRACE will signal the tracer as usual,
+ but the syscall may not be changed to another system call using the
+ orig_rax register. It may only be changed to -1 order to skip the
+ currently emulated call. Any other change MAY terminate the process.
+ The rip value seen by the tracer will be the syscall entry address;
+ this is different from normal behavior. The tracer MUST NOT modify
+ rip or rsp. (Do not rely on other changes terminating the process.
+ They might work. For example, on some kernels, choosing a syscall
+ that only exists in future kernels will be correctly emulated (by
+ returning -ENOSYS).
+
+To detect this quirky behavior, check for addr & ~0x0C00 ==
+0xFFFFFFFFFF600000. (For SECCOMP_RET_TRACE, use rip. For
+SECCOMP_RET_TRAP, use siginfo->si_call_addr.) Do not check any other
+condition: future kernels may improve vsyscall emulation and current
+kernels in vsyscall=native mode will behave differently, but the
+instructions at 0xF...F600{0,4,8,C}00 will not be system calls in these
+cases.
+
+Note that modern systems are unlikely to use vsyscalls at all -- they
+are a legacy feature and they are considerably slower than standard
+syscalls. New code will use the vDSO, and vDSO-issued system calls
+are indistinguishable from normal system calls.
diff --git a/trunk/Documentation/security/keys.txt b/trunk/Documentation/security/keys.txt
index 7d9ca92022d8..7b4145d00452 100644
--- a/trunk/Documentation/security/keys.txt
+++ b/trunk/Documentation/security/keys.txt
@@ -994,6 +994,23 @@ payload contents" for more information.
reference pointer if successful.
+(*) A keyring can be created by:
+
+ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
+ const struct cred *cred,
+ key_perm_t perm,
+ unsigned long flags,
+ struct key *dest);
+
+ This creates a keyring with the given attributes and returns it. If dest
+ is not NULL, the new keyring will be linked into the keyring to which it
+ points. No permission checks are made upon the destination keyring.
+
+ Error EDQUOT can be returned if the keyring would overload the quota (pass
+ KEY_ALLOC_NOT_IN_QUOTA in flags if the keyring shouldn't be accounted
+ towards the user's quota). Error ENOMEM can also be returned.
+
+
(*) To check the validity of a key, this function can be called:
int validate_key(struct key *key);
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index f71d2f901a69..6892b26025ba 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -2549,6 +2549,15 @@ S: Supported
F: drivers/gpu/drm/exynos
F: include/drm/exynos*
+DRM DRIVERS FOR NVIDIA TEGRA
+M: Thierry Reding
+L: dri-devel@lists.freedesktop.org
+L: linux-tegra@vger.kernel.org
+T: git git://gitorious.org/thierryreding/linux.git
+S: Maintained
+F: drivers/gpu/drm/tegra/
+F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+
DSCC4 DRIVER
M: Francois Romieu
L: netdev@vger.kernel.org
@@ -3712,7 +3721,7 @@ I2C/SMBUS STUB DRIVER
M: "Mark M. Hoffman"
L: linux-i2c@vger.kernel.org
S: Maintained
-F: drivers/i2c/busses/i2c-stub.c
+F: drivers/i2c/i2c-stub.c
I2C SUBSYSTEM
M: Wolfram Sang
diff --git a/trunk/arch/arm/boot/dts/Makefile b/trunk/arch/arm/boot/dts/Makefile
index b4015854d1ce..d077ef8426df 100644
--- a/trunk/arch/arm/boot/dts/Makefile
+++ b/trunk/arch/arm/boot/dts/Makefile
@@ -132,8 +132,8 @@ dtb-$(CONFIG_ARCH_SPEAR3XX)+= spear300-evb.dtb \
spear320-evb.dtb \
spear320-hmi.dtb
dtb-$(CONFIG_ARCH_SPEAR6XX)+= spear600-evb.dtb
-dtb-$(CONFIG_ARCH_SUNXI) += sun4i-cubieboard.dtb \
- sun5i-olinuxino.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun4i-a10-cubieboard.dtb \
+ sun5i-a13-olinuxino.dtb
dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \
tegra20-medcom-wide.dtb \
tegra20-paz00.dtb \
diff --git a/trunk/arch/arm/boot/dts/sun4i-cubieboard.dts b/trunk/arch/arm/boot/dts/sun4i-cubieboard.dts
index f4ca126ad994..5cab82540437 100644
--- a/trunk/arch/arm/boot/dts/sun4i-cubieboard.dts
+++ b/trunk/arch/arm/boot/dts/sun4i-cubieboard.dts
@@ -11,11 +11,11 @@
*/
/dts-v1/;
-/include/ "sun4i.dtsi"
+/include/ "sun4i-a10.dtsi"
/ {
model = "Cubietech Cubieboard";
- compatible = "cubietech,cubieboard", "allwinner,sun4i";
+ compatible = "cubietech,a10-cubieboard", "allwinner,sun4i-a10";
aliases {
serial0 = &uart0;
diff --git a/trunk/arch/arm/boot/dts/sun5i-olinuxino.dts b/trunk/arch/arm/boot/dts/sun5i-olinuxino.dts
index d6ff889a5d87..498a091a4ea2 100644
--- a/trunk/arch/arm/boot/dts/sun5i-olinuxino.dts
+++ b/trunk/arch/arm/boot/dts/sun5i-olinuxino.dts
@@ -12,11 +12,11 @@
*/
/dts-v1/;
-/include/ "sun5i.dtsi"
+/include/ "sun5i-a13.dtsi"
/ {
model = "Olimex A13-Olinuxino";
- compatible = "olimex,a13-olinuxino", "allwinner,sun5i";
+ compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13";
chosen {
bootargs = "earlyprintk console=ttyS0,115200";
diff --git a/trunk/arch/arm/mach-davinci/board-da850-evm.c b/trunk/arch/arm/mach-davinci/board-da850-evm.c
index 7211772edd9d..0299915575a8 100644
--- a/trunk/arch/arm/mach-davinci/board-da850-evm.c
+++ b/trunk/arch/arm/mach-davinci/board-da850-evm.c
@@ -41,6 +41,7 @@
#include
#include
#include
+#include
#include
#include
diff --git a/trunk/arch/arm/mach-exynos/common.h b/trunk/arch/arm/mach-exynos/common.h
index dac146df79ac..04744f9c120f 100644
--- a/trunk/arch/arm/mach-exynos/common.h
+++ b/trunk/arch/arm/mach-exynos/common.h
@@ -25,7 +25,7 @@ void exynos_init_late(void);
#ifdef CONFIG_PM_GENERIC_DOMAINS
int exynos_pm_late_initcall(void);
#else
-static int exynos_pm_late_initcall(void) { return 0; }
+static inline int exynos_pm_late_initcall(void) { return 0; }
#endif
#ifdef CONFIG_ARCH_EXYNOS4
diff --git a/trunk/arch/arm/mach-omap2/cclock44xx_data.c b/trunk/arch/arm/mach-omap2/cclock44xx_data.c
index aa56c3e5bb34..5789a5e25563 100644
--- a/trunk/arch/arm/mach-omap2/cclock44xx_data.c
+++ b/trunk/arch/arm/mach-omap2/cclock44xx_data.c
@@ -40,6 +40,14 @@
#define OMAP4430_MODULEMODE_HWCTRL_SHIFT 0
#define OMAP4430_MODULEMODE_SWCTRL_SHIFT 1
+/*
+ * OMAP4 ABE DPLL default frequency. In OMAP4460 TRM version V, section
+ * "3.6.3.2.3 CM1_ABE Clock Generator" states that the "DPLL_ABE_X2_CLK
+ * must be set to 196.608 MHz" and hence, the DPLL locked frequency is
+ * half of this value.
+ */
+#define OMAP4_DPLL_ABE_DEFFREQ 98304000
+
/* Root clocks */
DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0);
@@ -124,6 +132,8 @@ static struct dpll_data dpll_abe_dd = {
.enable_mask = OMAP4430_DPLL_EN_MASK,
.autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
.idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
+ .m4xen_mask = OMAP4430_DPLL_REGM4XEN_MASK,
+ .lpmode_mask = OMAP4430_DPLL_LPMODE_EN_MASK,
.max_multiplier = 2047,
.max_divider = 128,
.min_divider = 1,
@@ -233,7 +243,7 @@ static struct dpll_data dpll_core_dd = {
static const char *dpll_core_ck_parents[] = {
- "sys_clkin_ck",
+ "sys_clkin_ck", "core_hsd_byp_clk_mux_ck"
};
static struct clk dpll_core_ck;
@@ -286,9 +296,9 @@ DEFINE_CLK_DIVIDER(div_core_ck, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck, 0x0,
OMAP4430_CM_CLKSEL_CORE, OMAP4430_CLKSEL_CORE_SHIFT,
OMAP4430_CLKSEL_CORE_WIDTH, 0x0, NULL);
-DEFINE_CLK_OMAP_HSDIVIDER(div_iva_hs_clk, "dpll_core_m5x2_ck",
- &dpll_core_m5x2_ck, 0x0, OMAP4430_CM_BYPCLK_DPLL_IVA,
- OMAP4430_CLKSEL_0_1_MASK);
+DEFINE_CLK_DIVIDER(div_iva_hs_clk, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck,
+ 0x0, OMAP4430_CM_BYPCLK_DPLL_IVA, OMAP4430_CLKSEL_0_1_SHIFT,
+ OMAP4430_CLKSEL_0_1_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
DEFINE_CLK_DIVIDER(div_mpu_hs_clk, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck,
0x0, OMAP4430_CM_BYPCLK_DPLL_MPU, OMAP4430_CLKSEL_0_1_SHIFT,
@@ -363,8 +373,21 @@ static struct dpll_data dpll_iva_dd = {
.min_divider = 1,
};
+static const char *dpll_iva_ck_parents[] = {
+ "sys_clkin_ck", "iva_hsd_byp_clk_mux_ck"
+};
+
static struct clk dpll_iva_ck;
+static const struct clk_ops dpll_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .recalc_rate = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .get_parent = &omap2_init_dpll_parent,
+};
+
static struct clk_hw_omap dpll_iva_ck_hw = {
.hw = {
.clk = &dpll_iva_ck,
@@ -373,7 +396,7 @@ static struct clk_hw_omap dpll_iva_ck_hw = {
.ops = &clkhwops_omap3_dpll,
};
-DEFINE_STRUCT_CLK(dpll_iva_ck, dpll_core_ck_parents, dpll_abe_ck_ops);
+DEFINE_STRUCT_CLK(dpll_iva_ck, dpll_iva_ck_parents, dpll_ck_ops);
static const char *dpll_iva_x2_ck_parents[] = {
"dpll_iva_ck",
@@ -416,6 +439,10 @@ static struct dpll_data dpll_mpu_dd = {
.min_divider = 1,
};
+static const char *dpll_mpu_ck_parents[] = {
+ "sys_clkin_ck", "div_mpu_hs_clk"
+};
+
static struct clk dpll_mpu_ck;
static struct clk_hw_omap dpll_mpu_ck_hw = {
@@ -426,7 +453,7 @@ static struct clk_hw_omap dpll_mpu_ck_hw = {
.ops = &clkhwops_omap3_dpll,
};
-DEFINE_STRUCT_CLK(dpll_mpu_ck, dpll_core_ck_parents, dpll_abe_ck_ops);
+DEFINE_STRUCT_CLK(dpll_mpu_ck, dpll_mpu_ck_parents, dpll_ck_ops);
DEFINE_CLK_FIXED_FACTOR(mpu_periphclk, "dpll_mpu_ck", &dpll_mpu_ck, 0x0, 1, 2);
@@ -464,6 +491,9 @@ static struct dpll_data dpll_per_dd = {
.min_divider = 1,
};
+static const char *dpll_per_ck_parents[] = {
+ "sys_clkin_ck", "per_hsd_byp_clk_mux_ck"
+};
static struct clk dpll_per_ck;
@@ -475,7 +505,7 @@ static struct clk_hw_omap dpll_per_ck_hw = {
.ops = &clkhwops_omap3_dpll,
};
-DEFINE_STRUCT_CLK(dpll_per_ck, dpll_core_ck_parents, dpll_abe_ck_ops);
+DEFINE_STRUCT_CLK(dpll_per_ck, dpll_per_ck_parents, dpll_ck_ops);
DEFINE_CLK_DIVIDER(dpll_per_m2_ck, "dpll_per_ck", &dpll_per_ck, 0x0,
OMAP4430_CM_DIV_M2_DPLL_PER, OMAP4430_DPLL_CLKOUT_DIV_SHIFT,
@@ -559,6 +589,10 @@ static struct dpll_data dpll_usb_dd = {
.min_divider = 1,
};
+static const char *dpll_usb_ck_parents[] = {
+ "sys_clkin_ck", "usb_hs_clk_div_ck"
+};
+
static struct clk dpll_usb_ck;
static struct clk_hw_omap dpll_usb_ck_hw = {
@@ -569,7 +603,7 @@ static struct clk_hw_omap dpll_usb_ck_hw = {
.ops = &clkhwops_omap3_dpll,
};
-DEFINE_STRUCT_CLK(dpll_usb_ck, dpll_core_ck_parents, dpll_abe_ck_ops);
+DEFINE_STRUCT_CLK(dpll_usb_ck, dpll_usb_ck_parents, dpll_ck_ops);
static const char *dpll_usb_clkdcoldo_ck_parents[] = {
"dpll_usb_ck",
@@ -696,9 +730,13 @@ DEFINE_CLK_DIVIDER(syc_clk_div_ck, "sys_clkin_ck", &sys_clkin_ck, 0x0,
OMAP4430_CM_ABE_DSS_SYS_CLKSEL, OMAP4430_CLKSEL_0_0_SHIFT,
OMAP4430_CLKSEL_0_0_WIDTH, 0x0, NULL);
+static const char *dbgclk_mux_ck_parents[] = {
+ "sys_clkin_ck"
+};
+
static struct clk dbgclk_mux_ck;
DEFINE_STRUCT_CLK_HW_OMAP(dbgclk_mux_ck, NULL);
-DEFINE_STRUCT_CLK(dbgclk_mux_ck, dpll_core_ck_parents,
+DEFINE_STRUCT_CLK(dbgclk_mux_ck, dbgclk_mux_ck_parents,
dpll_usb_clkdcoldo_ck_ops);
/* Leaf clocks controlled by modules */
@@ -1935,10 +1973,10 @@ static struct omap_clk omap44xx_clks[] = {
CLK("4803e000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
CLK("48086000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
CLK("48088000.timer", "timer_sys_ck", &sys_clkin_ck, CK_443X),
- CLK("49038000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
- CLK("4903a000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
- CLK("4903c000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
- CLK("4903e000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK("40138000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK("4013a000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK("4013c000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK("4013e000.timer", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
CLK(NULL, "cpufreq_ck", &dpll_mpu_ck, CK_443X),
};
@@ -1955,6 +1993,7 @@ int __init omap4xxx_clk_init(void)
{
u32 cpu_clkflg;
struct omap_clk *c;
+ int rc;
if (cpu_is_omap443x()) {
cpu_mask = RATE_IN_4430;
@@ -1983,5 +2022,18 @@ int __init omap4xxx_clk_init(void)
omap2_clk_enable_init_clocks(enable_init_clks,
ARRAY_SIZE(enable_init_clks));
+ /*
+ * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power
+ * state when turning the ABE clock domain. Workaround this by
+ * locking the ABE DPLL on boot.
+ */
+ if (cpu_is_omap446x()) {
+ rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck);
+ if (!rc)
+ rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+ }
+
return 0;
}
diff --git a/trunk/arch/arm/mach-omap2/clock.h b/trunk/arch/arm/mach-omap2/clock.h
index 9917f793c3b6..b40204837bd7 100644
--- a/trunk/arch/arm/mach-omap2/clock.h
+++ b/trunk/arch/arm/mach-omap2/clock.h
@@ -195,6 +195,10 @@ struct clksel {
* @enable_mask: mask of the DPLL mode bitfield in @control_reg
* @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate()
* @last_rounded_m: cache of the last M result of omap2_dpll_round_rate()
+ * @last_rounded_m4xen: cache of the last M4X result of
+ * omap4_dpll_regm4xen_round_rate()
+ * @last_rounded_lpmode: cache of the last lpmode result of
+ * omap4_dpll_lpmode_recalc()
* @max_multiplier: maximum valid non-bypass multiplier value (actual)
* @last_rounded_n: cache of the last N result of omap2_dpll_round_rate()
* @min_divider: minimum valid non-bypass divider value (actual)
@@ -205,6 +209,8 @@ struct clksel {
* @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg
* @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg
* @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg
+ * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg
+ * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg
* @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg
* @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs
* @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs
@@ -233,6 +239,8 @@ struct dpll_data {
u32 enable_mask;
unsigned long last_rounded_rate;
u16 last_rounded_m;
+ u8 last_rounded_m4xen;
+ u8 last_rounded_lpmode;
u16 max_multiplier;
u8 last_rounded_n;
u8 min_divider;
@@ -245,6 +253,8 @@ struct dpll_data {
u32 idlest_mask;
u32 dco_mask;
u32 sddiv_mask;
+ u32 lpmode_mask;
+ u32 m4xen_mask;
u8 auto_recal_bit;
u8 recal_en_bit;
u8 recal_st_bit;
diff --git a/trunk/arch/arm/mach-omap2/clockdomain.c b/trunk/arch/arm/mach-omap2/clockdomain.c
index 384873580b23..7faf82d4e85c 100644
--- a/trunk/arch/arm/mach-omap2/clockdomain.c
+++ b/trunk/arch/arm/mach-omap2/clockdomain.c
@@ -998,7 +998,8 @@ int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
spin_lock_irqsave(&clkdm->lock, flags);
/* corner case: disabling unused clocks */
- if (__clk_get_enable_count(clk) == 0)
+ if ((__clk_get_enable_count(clk) == 0) &&
+ (atomic_read(&clkdm->usecount) == 0))
goto ccd_exit;
if (atomic_read(&clkdm->usecount) == 0) {
diff --git a/trunk/arch/arm/mach-omap2/cpuidle34xx.c b/trunk/arch/arm/mach-omap2/cpuidle34xx.c
index bca7a8885703..22590dbe8f14 100644
--- a/trunk/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/trunk/arch/arm/mach-omap2/cpuidle34xx.c
@@ -40,6 +40,8 @@ struct omap3_idle_statedata {
u32 core_state;
};
+static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
+
static struct omap3_idle_statedata omap3_idle_data[] = {
{
.mpu_state = PWRDM_POWER_ON,
@@ -71,7 +73,7 @@ static struct omap3_idle_statedata omap3_idle_data[] = {
},
};
-static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
+/* Private functions */
static int __omap3_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
@@ -260,11 +262,11 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
return ret;
}
-DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
+static DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
-struct cpuidle_driver omap3_idle_driver = {
- .name = "omap3_idle",
- .owner = THIS_MODULE,
+static struct cpuidle_driver omap3_idle_driver = {
+ .name = "omap3_idle",
+ .owner = THIS_MODULE,
.states = {
{
.enter = omap3_enter_idle_bm,
@@ -327,6 +329,8 @@ struct cpuidle_driver omap3_idle_driver = {
.safe_state_index = 0,
};
+/* Public functions */
+
/**
* omap3_idle_init - Init routine for OMAP3 idle
*
diff --git a/trunk/arch/arm/mach-omap2/cpuidle44xx.c b/trunk/arch/arm/mach-omap2/cpuidle44xx.c
index 288bee6cbb76..d639aef0deda 100644
--- a/trunk/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/trunk/arch/arm/mach-omap2/cpuidle44xx.c
@@ -54,6 +54,8 @@ static struct clockdomain *cpu_clkdm[NR_CPUS];
static atomic_t abort_barrier;
static bool cpu_done[NR_CPUS];
+/* Private functions */
+
/**
* omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions
* @dev: cpuidle device
@@ -161,9 +163,19 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
return index;
}
-DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
+/*
+ * For each cpu, setup the broadcast timer because local timers
+ * stops for the states above C1.
+ */
+static void omap_setup_broadcast_timer(void *arg)
+{
+ int cpu = smp_processor_id();
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
+}
+
+static DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
-struct cpuidle_driver omap4_idle_driver = {
+static struct cpuidle_driver omap4_idle_driver = {
.name = "omap4_idle",
.owner = THIS_MODULE,
.en_core_tk_irqen = 1,
@@ -178,7 +190,7 @@ struct cpuidle_driver omap4_idle_driver = {
.desc = "MPUSS ON"
},
{
- /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
+ /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
.exit_latency = 328 + 440,
.target_residency = 960,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
@@ -200,15 +212,7 @@ struct cpuidle_driver omap4_idle_driver = {
.safe_state_index = 0,
};
-/*
- * For each cpu, setup the broadcast timer because local timers
- * stops for the states above C1.
- */
-static void omap_setup_broadcast_timer(void *arg)
-{
- int cpu = smp_processor_id();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
-}
+/* Public functions */
/**
* omap4_idle_init - Init routine for OMAP4 idle
diff --git a/trunk/arch/arm/mach-omap2/dpll3xxx.c b/trunk/arch/arm/mach-omap2/dpll3xxx.c
index fafb28c0dcbc..2bb18838cba9 100644
--- a/trunk/arch/arm/mach-omap2/dpll3xxx.c
+++ b/trunk/arch/arm/mach-omap2/dpll3xxx.c
@@ -291,16 +291,13 @@ static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
/*
* _omap3_noncore_dpll_program - set non-core DPLL M,N values directly
- * @clk: struct clk * of DPLL to set
- * @m: DPLL multiplier to set
- * @n: DPLL divider to set
- * @freqsel: FREQSEL value to set
+ * @clk: struct clk * of DPLL to set
+ * @freqsel: FREQSEL value to set
*
- * Program the DPLL with the supplied M, N values, and wait for the DPLL to
- * lock.. Returns -EINVAL upon error, or 0 upon success.
+ * Program the DPLL with the last M, N values calculated, and wait for
+ * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success.
*/
-static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 m, u8 n,
- u16 freqsel)
+static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
{
struct dpll_data *dd = clk->dpll_data;
u8 dco, sd_div;
@@ -323,23 +320,45 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 m, u8 n,
/* Set DPLL multiplier, divider */
v = __raw_readl(dd->mult_div1_reg);
v &= ~(dd->mult_mask | dd->div1_mask);
- v |= m << __ffs(dd->mult_mask);
- v |= (n - 1) << __ffs(dd->div1_mask);
+ v |= dd->last_rounded_m << __ffs(dd->mult_mask);
+ v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
/* Configure dco and sd_div for dplls that have these fields */
if (dd->dco_mask) {
- _lookup_dco(clk, &dco, m, n);
+ _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
v &= ~(dd->dco_mask);
v |= dco << __ffs(dd->dco_mask);
}
if (dd->sddiv_mask) {
- _lookup_sddiv(clk, &sd_div, m, n);
+ _lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
+ dd->last_rounded_n);
v &= ~(dd->sddiv_mask);
v |= sd_div << __ffs(dd->sddiv_mask);
}
__raw_writel(v, dd->mult_div1_reg);
+ /* Set 4X multiplier and low-power mode */
+ if (dd->m4xen_mask || dd->lpmode_mask) {
+ v = __raw_readl(dd->control_reg);
+
+ if (dd->m4xen_mask) {
+ if (dd->last_rounded_m4xen)
+ v |= dd->m4xen_mask;
+ else
+ v &= ~dd->m4xen_mask;
+ }
+
+ if (dd->lpmode_mask) {
+ if (dd->last_rounded_lpmode)
+ v |= dd->lpmode_mask;
+ else
+ v &= ~dd->lpmode_mask;
+ }
+
+ __raw_writel(v, dd->control_reg);
+ }
+
/* We let the clock framework set the other output dividers later */
/* REVISIT: Set ramp-up delay? */
@@ -492,8 +511,7 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
pr_debug("%s: %s: set rate: locking rate to %lu.\n",
__func__, __clk_get_name(hw->clk), rate);
- ret = omap3_noncore_dpll_program(clk, dd->last_rounded_m,
- dd->last_rounded_n, freqsel);
+ ret = omap3_noncore_dpll_program(clk, freqsel);
if (!ret)
new_parent = dd->clk_ref;
}
diff --git a/trunk/arch/arm/mach-omap2/dpll44xx.c b/trunk/arch/arm/mach-omap2/dpll44xx.c
index d3326c474fdc..d28b0f726715 100644
--- a/trunk/arch/arm/mach-omap2/dpll44xx.c
+++ b/trunk/arch/arm/mach-omap2/dpll44xx.c
@@ -20,6 +20,15 @@
#include "clock44xx.h"
#include "cm-regbits-44xx.h"
+/*
+ * Maximum DPLL input frequency (FINT) and output frequency (FOUT) that
+ * can supported when using the DPLL low-power mode. Frequencies are
+ * defined in OMAP4430/60 Public TRM section 3.6.3.3.2 "Enable Control,
+ * Status, and Low-Power Operation Mode".
+ */
+#define OMAP4_DPLL_LP_FINT_MAX 1000000
+#define OMAP4_DPLL_LP_FOUT_MAX 100000000
+
/* Supported only on OMAP4 */
int omap4_dpllmx_gatectrl_read(struct clk_hw_omap *clk)
{
@@ -81,6 +90,31 @@ const struct clk_hw_omap_ops clkhwops_omap4_dpllmx = {
.deny_idle = omap4_dpllmx_deny_gatectrl,
};
+/**
+ * omap4_dpll_lpmode_recalc - compute DPLL low-power setting
+ * @dd: pointer to the dpll data structure
+ *
+ * Calculates if low-power mode can be enabled based upon the last
+ * multiplier and divider values calculated. If low-power mode can be
+ * enabled, then the bit to enable low-power mode is stored in the
+ * last_rounded_lpmode variable. This implementation is based upon the
+ * criteria for enabling low-power mode as described in the OMAP4430/60
+ * Public TRM section 3.6.3.3.2 "Enable Control, Status, and Low-Power
+ * Operation Mode".
+ */
+static void omap4_dpll_lpmode_recalc(struct dpll_data *dd)
+{
+ long fint, fout;
+
+ fint = __clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1);
+ fout = fint * dd->last_rounded_m;
+
+ if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX))
+ dd->last_rounded_lpmode = 1;
+ else
+ dd->last_rounded_lpmode = 0;
+}
+
/**
* omap4_dpll_regm4xen_recalc - compute DPLL rate, considering REGM4XEN bit
* @clk: struct clk * of the DPLL to compute the rate for
@@ -130,7 +164,6 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
unsigned long *parent_rate)
{
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
- u32 v;
struct dpll_data *dd;
long r;
@@ -139,18 +172,31 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
dd = clk->dpll_data;
- /* regm4xen adds a multiplier of 4 to DPLL calculations */
- v = __raw_readl(dd->control_reg) & OMAP4430_DPLL_REGM4XEN_MASK;
-
- if (v)
- target_rate = target_rate / OMAP4430_REGM4XEN_MULT;
+ dd->last_rounded_m4xen = 0;
+ /*
+ * First try to compute the DPLL configuration for
+ * target rate without using the 4X multiplier.
+ */
r = omap2_dpll_round_rate(hw, target_rate, NULL);
+ if (r != ~0)
+ goto out;
+
+ /*
+ * If we did not find a valid DPLL configuration, try again, but
+ * this time see if using the 4X multiplier can help. Enabling the
+ * 4X multiplier is equivalent to dividing the target rate by 4.
+ */
+ r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT,
+ NULL);
if (r == ~0)
return r;
- if (v)
- clk->dpll_data->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
+ dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
+ dd->last_rounded_m4xen = 1;
+
+out:
+ omap4_dpll_lpmode_recalc(dd);
- return clk->dpll_data->last_rounded_rate;
+ return dd->last_rounded_rate;
}
diff --git a/trunk/arch/arm/mach-tegra/common.c b/trunk/arch/arm/mach-tegra/common.c
index 0816562725f6..d54cfc54b9fe 100644
--- a/trunk/arch/arm/mach-tegra/common.c
+++ b/trunk/arch/arm/mach-tegra/common.c
@@ -104,7 +104,7 @@ static __initdata struct tegra_clk_init_table tegra20_clk_init_table[] = {
static __initdata struct tegra_clk_init_table tegra30_clk_init_table[] = {
/* name parent rate enabled */
{ "clk_m", NULL, 0, true },
- { "pll_p", "clk_m", 408000000, true },
+ { "pll_p", "pll_ref", 408000000, true },
{ "pll_p_out1", "pll_p", 9600000, true },
{ "pll_p_out4", "pll_p", 102000000, true },
{ "sclk", "pll_p_out4", 102000000, true },
diff --git a/trunk/arch/arm/mach-tegra/tegra30_clocks.c b/trunk/arch/arm/mach-tegra/tegra30_clocks.c
index efc000e32e1c..d7147779f8ea 100644
--- a/trunk/arch/arm/mach-tegra/tegra30_clocks.c
+++ b/trunk/arch/arm/mach-tegra/tegra30_clocks.c
@@ -2045,9 +2045,7 @@ struct clk_ops tegra30_periph_clk_ops = {
static int tegra30_dsib_clk_set_parent(struct clk_hw *hw, u8 index)
{
struct clk *d = clk_get_sys(NULL, "pll_d");
- /* The DSIB parent selection bit is in PLLD base
- register - can not do direct r-m-w, must be
- protected by PLLD lock */
+ /* The DSIB parent selection bit is in PLLD base register */
tegra_clk_cfg_ex(
d, TEGRA_CLK_PLLD_MIPI_MUX_SEL, index);
diff --git a/trunk/arch/arm/mach-u300/core.c b/trunk/arch/arm/mach-u300/core.c
index 12f3994c43db..0374b9863e9b 100644
--- a/trunk/arch/arm/mach-u300/core.c
+++ b/trunk/arch/arm/mach-u300/core.c
@@ -27,7 +27,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -1543,39 +1542,6 @@ static struct pinctrl_map __initdata u300_pinmux_map[] = {
pin_highz_conf),
};
-struct u300_mux_hog {
- struct device *dev;
- struct pinctrl *p;
-};
-
-static struct u300_mux_hog u300_mux_hogs[] = {
- {
- .dev = &uart0_device.dev,
- },
- {
- .dev = &mmcsd_device.dev,
- },
-};
-
-static int __init u300_pinctrl_fetch(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(u300_mux_hogs); i++) {
- struct pinctrl *p;
-
- p = pinctrl_get_select_default(u300_mux_hogs[i].dev);
- if (IS_ERR(p)) {
- pr_err("u300: could not get pinmux hog for dev %s\n",
- dev_name(u300_mux_hogs[i].dev));
- continue;
- }
- u300_mux_hogs[i].p = p;
- }
- return 0;
-}
-subsys_initcall(u300_pinctrl_fetch);
-
/*
* Notice that AMBA devices are initialized before platform devices.
*
diff --git a/trunk/arch/arm/mach-ux500/devices-db8500.h b/trunk/arch/arm/mach-ux500/devices-db8500.h
index 4b24c9992654..a5e05f6e256f 100644
--- a/trunk/arch/arm/mach-ux500/devices-db8500.h
+++ b/trunk/arch/arm/mach-ux500/devices-db8500.h
@@ -8,6 +8,7 @@
#ifndef __DEVICES_DB8500_H
#define __DEVICES_DB8500_H
+#include
#include
#include "devices-common.h"
diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c
index 5383bc018571..6b2fb87c8698 100644
--- a/trunk/arch/arm/mm/dma-mapping.c
+++ b/trunk/arch/arm/mm/dma-mapping.c
@@ -1034,7 +1034,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
spin_unlock_irqrestore(&mapping->lock, flags);
}
-static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
+ gfp_t gfp, struct dma_attrs *attrs)
{
struct page **pages;
int count = size >> PAGE_SHIFT;
@@ -1048,6 +1049,23 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
if (!pages)
return NULL;
+ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
+ {
+ unsigned long order = get_order(size);
+ struct page *page;
+
+ page = dma_alloc_from_contiguous(dev, count, order);
+ if (!page)
+ goto error;
+
+ __dma_clear_buffer(page, size);
+
+ for (i = 0; i < count; i++)
+ pages[i] = page + i;
+
+ return pages;
+ }
+
while (count) {
int j, order = __fls(count);
@@ -1081,14 +1099,21 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
return NULL;
}
-static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+static int __iommu_free_buffer(struct device *dev, struct page **pages,
+ size_t size, struct dma_attrs *attrs)
{
int count = size >> PAGE_SHIFT;
int array_size = count * sizeof(struct page *);
int i;
- for (i = 0; i < count; i++)
- if (pages[i])
- __free_pages(pages[i], 0);
+
+ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+ dma_release_from_contiguous(dev, pages[0], count);
+ } else {
+ for (i = 0; i < count; i++)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ }
+
if (array_size <= PAGE_SIZE)
kfree(pages);
else
@@ -1250,7 +1275,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (gfp & GFP_ATOMIC)
return __iommu_alloc_atomic(dev, size, handle);
- pages = __iommu_alloc_buffer(dev, size, gfp);
+ pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
if (!pages)
return NULL;
@@ -1271,7 +1296,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
err_mapping:
__iommu_remove_mapping(dev, *handle, size);
err_buffer:
- __iommu_free_buffer(dev, pages, size);
+ __iommu_free_buffer(dev, pages, size, attrs);
return NULL;
}
@@ -1327,7 +1352,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
__iommu_remove_mapping(dev, handle, size);
- __iommu_free_buffer(dev, pages, size);
+ __iommu_free_buffer(dev, pages, size, attrs);
}
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
diff --git a/trunk/arch/m68k/Kconfig.cpu b/trunk/arch/m68k/Kconfig.cpu
index 2f2d87b40341..b1cfff832fb5 100644
--- a/trunk/arch/m68k/Kconfig.cpu
+++ b/trunk/arch/m68k/Kconfig.cpu
@@ -35,7 +35,8 @@ endchoice
if M68KCLASSIC
config M68000
- bool
+ bool "MC68000"
+ depends on !MMU
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_MULDIV64
select CPU_HAS_NO_UNALIGNED
diff --git a/trunk/arch/m68k/Makefile b/trunk/arch/m68k/Makefile
index 7636751f2f87..2f02acfb8edf 100644
--- a/trunk/arch/m68k/Makefile
+++ b/trunk/arch/m68k/Makefile
@@ -92,7 +92,7 @@ endif
head-y := arch/m68k/kernel/head.o
head-$(CONFIG_SUN3) := arch/m68k/kernel/sun3-head.o
head-$(CONFIG_M68360) := arch/m68k/platform/68360/head.o
-head-$(CONFIG_M68000) := arch/m68k/platform/68328/head.o
+head-$(CONFIG_M68000) := arch/m68k/platform/68000/head.o
head-$(CONFIG_COLDFIRE) := arch/m68k/platform/coldfire/head.o
core-y += arch/m68k/kernel/ arch/m68k/mm/
@@ -114,9 +114,7 @@ core-$(CONFIG_M68040) += arch/m68k/fpsp040/
core-$(CONFIG_M68060) += arch/m68k/ifpsp060/
core-$(CONFIG_M68KFPU_EMU) += arch/m68k/math-emu/
core-$(CONFIG_M68360) += arch/m68k/platform/68360/
-core-$(CONFIG_M68000) += arch/m68k/platform/68328/
-core-$(CONFIG_M68EZ328) += arch/m68k/platform/68EZ328/
-core-$(CONFIG_M68VZ328) += arch/m68k/platform/68VZ328/
+core-$(CONFIG_M68000) += arch/m68k/platform/68000/
core-$(CONFIG_COLDFIRE) += arch/m68k/platform/coldfire/
diff --git a/trunk/arch/m68k/include/asm/m5249sim.h b/trunk/arch/m68k/include/asm/m5249sim.h
deleted file mode 100644
index fdf45e6807c9..000000000000
--- a/trunk/arch/m68k/include/asm/m5249sim.h
+++ /dev/null
@@ -1,269 +0,0 @@
-/****************************************************************************/
-
-/*
- * m5249sim.h -- ColdFire 5249 System Integration Module support.
- *
- * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
- */
-
-/****************************************************************************/
-#ifndef m5249sim_h
-#define m5249sim_h
-/****************************************************************************/
-
-#define CPU_NAME "COLDFIRE(m5249)"
-#define CPU_INSTR_PER_JIFFY 3
-#define MCF_BUSCLK (MCF_CLK / 2)
-
-#include
-
-/*
- * The 5249 has a second MBAR region, define its address.
- */
-#define MCF_MBAR2 0x80000000
-
-/*
- * Define the 5249 SIM register set addresses.
- */
-#define MCFSIM_RSR (MCF_MBAR + 0x00) /* Reset Status */
-#define MCFSIM_SYPCR (MCF_MBAR + 0x01) /* System Protection */
-#define MCFSIM_SWIVR (MCF_MBAR + 0x02) /* SW Watchdog intr */
-#define MCFSIM_SWSR (MCF_MBAR + 0x03) /* SW Watchdog srv */
-#define MCFSIM_PAR (MCF_MBAR + 0x04) /* Pin Assignment */
-#define MCFSIM_IRQPAR (MCF_MBAR + 0x06) /* Intr Assignment */
-#define MCFSIM_MPARK (MCF_MBAR + 0x0C) /* BUS Master Ctrl */
-#define MCFSIM_IPR (MCF_MBAR + 0x40) /* Interrupt Pending */
-#define MCFSIM_IMR (MCF_MBAR + 0x44) /* Interrupt Mask */
-#define MCFSIM_AVR (MCF_MBAR + 0x4b) /* Autovector Ctrl */
-#define MCFSIM_ICR0 (MCF_MBAR + 0x4c) /* Intr Ctrl reg 0 */
-#define MCFSIM_ICR1 (MCF_MBAR + 0x4d) /* Intr Ctrl reg 1 */
-#define MCFSIM_ICR2 (MCF_MBAR + 0x4e) /* Intr Ctrl reg 2 */
-#define MCFSIM_ICR3 (MCF_MBAR + 0x4f) /* Intr Ctrl reg 3 */
-#define MCFSIM_ICR4 (MCF_MBAR + 0x50) /* Intr Ctrl reg 4 */
-#define MCFSIM_ICR5 (MCF_MBAR + 0x51) /* Intr Ctrl reg 5 */
-#define MCFSIM_ICR6 (MCF_MBAR + 0x52) /* Intr Ctrl reg 6 */
-#define MCFSIM_ICR7 (MCF_MBAR + 0x53) /* Intr Ctrl reg 7 */
-#define MCFSIM_ICR8 (MCF_MBAR + 0x54) /* Intr Ctrl reg 8 */
-#define MCFSIM_ICR9 (MCF_MBAR + 0x55) /* Intr Ctrl reg 9 */
-#define MCFSIM_ICR10 (MCF_MBAR + 0x56) /* Intr Ctrl reg 10 */
-#define MCFSIM_ICR11 (MCF_MBAR + 0x57) /* Intr Ctrl reg 11 */
-
-#define MCFSIM_CSAR0 (MCF_MBAR + 0x80) /* CS 0 Address reg */
-#define MCFSIM_CSMR0 (MCF_MBAR + 0x84) /* CS 0 Mask reg */
-#define MCFSIM_CSCR0 (MCF_MBAR + 0x8a) /* CS 0 Control reg */
-#define MCFSIM_CSAR1 (MCF_MBAR + 0x8c) /* CS 1 Address reg */
-#define MCFSIM_CSMR1 (MCF_MBAR + 0x90) /* CS 1 Mask reg */
-#define MCFSIM_CSCR1 (MCF_MBAR + 0x96) /* CS 1 Control reg */
-#define MCFSIM_CSAR2 (MCF_MBAR + 0x98) /* CS 2 Address reg */
-#define MCFSIM_CSMR2 (MCF_MBAR + 0x9c) /* CS 2 Mask reg */
-#define MCFSIM_CSCR2 (MCF_MBAR + 0xa2) /* CS 2 Control reg */
-#define MCFSIM_CSAR3 (MCF_MBAR + 0xa4) /* CS 3 Address reg */
-#define MCFSIM_CSMR3 (MCF_MBAR + 0xa8) /* CS 3 Mask reg */
-#define MCFSIM_CSCR3 (MCF_MBAR + 0xae) /* CS 3 Control reg */
-
-#define MCFSIM_DCR (MCF_MBAR + 0x100) /* DRAM Control */
-#define MCFSIM_DACR0 (MCF_MBAR + 0x108) /* DRAM 0 Addr/Ctrl */
-#define MCFSIM_DMR0 (MCF_MBAR + 0x10c) /* DRAM 0 Mask */
-#define MCFSIM_DACR1 (MCF_MBAR + 0x110) /* DRAM 1 Addr/Ctrl */
-#define MCFSIM_DMR1 (MCF_MBAR + 0x114) /* DRAM 1 Mask */
-
-/*
- * Timer module.
- */
-#define MCFTIMER_BASE1 (MCF_MBAR + 0x140) /* Base of TIMER1 */
-#define MCFTIMER_BASE2 (MCF_MBAR + 0x180) /* Base of TIMER2 */
-
-/*
- * UART module.
- */
-#define MCFUART_BASE0 (MCF_MBAR + 0x1c0) /* Base address UART0 */
-#define MCFUART_BASE1 (MCF_MBAR + 0x200) /* Base address UART1 */
-
-/*
- * QSPI module.
- */
-#define MCFQSPI_BASE (MCF_MBAR + 0x300) /* Base address QSPI */
-#define MCFQSPI_SIZE 0x40 /* Register set size */
-
-#define MCFQSPI_CS0 29
-#define MCFQSPI_CS1 24
-#define MCFQSPI_CS2 21
-#define MCFQSPI_CS3 22
-
-/*
- * DMA unit base addresses.
- */
-#define MCFDMA_BASE0 (MCF_MBAR + 0x300) /* Base address DMA 0 */
-#define MCFDMA_BASE1 (MCF_MBAR + 0x340) /* Base address DMA 1 */
-#define MCFDMA_BASE2 (MCF_MBAR + 0x380) /* Base address DMA 2 */
-#define MCFDMA_BASE3 (MCF_MBAR + 0x3C0) /* Base address DMA 3 */
-
-/*
- * Some symbol defines for the above...
- */
-#define MCFSIM_SWDICR MCFSIM_ICR0 /* Watchdog timer ICR */
-#define MCFSIM_TIMER1ICR MCFSIM_ICR1 /* Timer 1 ICR */
-#define MCFSIM_TIMER2ICR MCFSIM_ICR2 /* Timer 2 ICR */
-#define MCFSIM_UART1ICR MCFSIM_ICR4 /* UART 1 ICR */
-#define MCFSIM_UART2ICR MCFSIM_ICR5 /* UART 2 ICR */
-#define MCFSIM_DMA0ICR MCFSIM_ICR6 /* DMA 0 ICR */
-#define MCFSIM_DMA1ICR MCFSIM_ICR7 /* DMA 1 ICR */
-#define MCFSIM_DMA2ICR MCFSIM_ICR8 /* DMA 2 ICR */
-#define MCFSIM_DMA3ICR MCFSIM_ICR9 /* DMA 3 ICR */
-#define MCFSIM_QSPIICR MCFSIM_ICR10 /* QSPI ICR */
-
-/*
- * Define system peripheral IRQ usage.
- */
-#define MCF_IRQ_QSPI 28 /* QSPI, Level 4 */
-#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
-#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
-
-#define MCF_IRQ_UART0 73 /* UART0 */
-#define MCF_IRQ_UART1 74 /* UART1 */
-
-/*
- * General purpose IO registers (in MBAR2).
- */
-#define MCFSIM2_GPIOREAD (MCF_MBAR2 + 0x000) /* GPIO read values */
-#define MCFSIM2_GPIOWRITE (MCF_MBAR2 + 0x004) /* GPIO write values */
-#define MCFSIM2_GPIOENABLE (MCF_MBAR2 + 0x008) /* GPIO enabled */
-#define MCFSIM2_GPIOFUNC (MCF_MBAR2 + 0x00C) /* GPIO function */
-#define MCFSIM2_GPIO1READ (MCF_MBAR2 + 0x0B0) /* GPIO1 read values */
-#define MCFSIM2_GPIO1WRITE (MCF_MBAR2 + 0x0B4) /* GPIO1 write values */
-#define MCFSIM2_GPIO1ENABLE (MCF_MBAR2 + 0x0B8) /* GPIO1 enabled */
-#define MCFSIM2_GPIO1FUNC (MCF_MBAR2 + 0x0BC) /* GPIO1 function */
-
-#define MCFSIM2_GPIOINTSTAT (MCF_MBAR2 + 0xc0) /* GPIO intr status */
-#define MCFSIM2_GPIOINTCLEAR (MCF_MBAR2 + 0xc0) /* GPIO intr clear */
-#define MCFSIM2_GPIOINTENABLE (MCF_MBAR2 + 0xc4) /* GPIO intr enable */
-
-#define MCFSIM2_INTLEVEL1 (MCF_MBAR2 + 0x140) /* Intr level reg 1 */
-#define MCFSIM2_INTLEVEL2 (MCF_MBAR2 + 0x144) /* Intr level reg 2 */
-#define MCFSIM2_INTLEVEL3 (MCF_MBAR2 + 0x148) /* Intr level reg 3 */
-#define MCFSIM2_INTLEVEL4 (MCF_MBAR2 + 0x14c) /* Intr level reg 4 */
-#define MCFSIM2_INTLEVEL5 (MCF_MBAR2 + 0x150) /* Intr level reg 5 */
-#define MCFSIM2_INTLEVEL6 (MCF_MBAR2 + 0x154) /* Intr level reg 6 */
-#define MCFSIM2_INTLEVEL7 (MCF_MBAR2 + 0x158) /* Intr level reg 7 */
-#define MCFSIM2_INTLEVEL8 (MCF_MBAR2 + 0x15c) /* Intr level reg 8 */
-
-#define MCFSIM2_DMAROUTE (MCF_MBAR2 + 0x188) /* DMA routing */
-
-#define MCFSIM2_IDECONFIG1 (MCF_MBAR2 + 0x18c) /* IDEconfig1 */
-#define MCFSIM2_IDECONFIG2 (MCF_MBAR2 + 0x190) /* IDEconfig2 */
-
-/*
- * Define the base interrupt for the second interrupt controller.
- * We set it to 128, out of the way of the base interrupts, and plenty
- * of room for its 64 interrupts.
- */
-#define MCFINTC2_VECBASE 128
-
-#define MCFINTC2_GPIOIRQ0 (MCFINTC2_VECBASE + 32)
-#define MCFINTC2_GPIOIRQ1 (MCFINTC2_VECBASE + 33)
-#define MCFINTC2_GPIOIRQ2 (MCFINTC2_VECBASE + 34)
-#define MCFINTC2_GPIOIRQ3 (MCFINTC2_VECBASE + 35)
-#define MCFINTC2_GPIOIRQ4 (MCFINTC2_VECBASE + 36)
-#define MCFINTC2_GPIOIRQ5 (MCFINTC2_VECBASE + 37)
-#define MCFINTC2_GPIOIRQ6 (MCFINTC2_VECBASE + 38)
-#define MCFINTC2_GPIOIRQ7 (MCFINTC2_VECBASE + 39)
-
-/*
- * Generic GPIO support
- */
-#define MCFGPIO_PIN_MAX 64
-#define MCFGPIO_IRQ_MAX -1
-#define MCFGPIO_IRQ_VECBASE -1
-
-/****************************************************************************/
-
-#ifdef __ASSEMBLER__
-
-/*
- * The M5249C3 board needs a little help getting all its SIM devices
- * initialized at kernel start time. dBUG doesn't set much up, so
- * we need to do it manually.
- */
-.macro m5249c3_setup
- /*
- * Set MBAR1 and MBAR2, just incase they are not set.
- */
- movel #0x10000001,%a0
- movec %a0,%MBAR /* map MBAR region */
- subql #1,%a0 /* get MBAR address in a0 */
-
- movel #0x80000001,%a1
- movec %a1,#3086 /* map MBAR2 region */
- subql #1,%a1 /* get MBAR2 address in a1 */
-
- /*
- * Move secondary interrupts to their base (128).
- */
- moveb #MCFINTC2_VECBASE,%d0
- moveb %d0,0x16b(%a1) /* interrupt base register */
-
- /*
- * Work around broken CSMR0/DRAM vector problem.
- */
- movel #0x001F0021,%d0 /* disable C/I bit */
- movel %d0,0x84(%a0) /* set CSMR0 */
-
- /*
- * Disable the PLL firstly. (Who knows what state it is
- * in here!).
- */
- movel 0x180(%a1),%d0 /* get current PLL value */
- andl #0xfffffffe,%d0 /* PLL bypass first */
- movel %d0,0x180(%a1) /* set PLL register */
- nop
-
-#if CONFIG_CLOCK_FREQ == 140000000
- /*
- * Set initial clock frequency. This assumes M5249C3 board
- * is fitted with 11.2896MHz crystal. It will program the
- * PLL for 140MHz. Lets go fast :-)
- */
- movel #0x125a40f0,%d0 /* set for 140MHz */
- movel %d0,0x180(%a1) /* set PLL register */
- orl #0x1,%d0
- movel %d0,0x180(%a1) /* set PLL register */
-#endif
-
- /*
- * Setup CS1 for ethernet controller.
- * (Setup as per M5249C3 doco).
- */
- movel #0xe0000000,%d0 /* CS1 mapped at 0xe0000000 */
- movel %d0,0x8c(%a0)
- movel #0x001f0021,%d0 /* CS1 size of 1Mb */
- movel %d0,0x90(%a0)
- movew #0x0080,%d0 /* CS1 = 16bit port, AA */
- movew %d0,0x96(%a0)
-
- /*
- * Setup CS2 for IDE interface.
- */
- movel #0x50000000,%d0 /* CS2 mapped at 0x50000000 */
- movel %d0,0x98(%a0)
- movel #0x001f0001,%d0 /* CS2 size of 1MB */
- movel %d0,0x9c(%a0)
- movew #0x0080,%d0 /* CS2 = 16bit, TA */
- movew %d0,0xa2(%a0)
-
- movel #0x00107000,%d0 /* IDEconfig1 */
- movel %d0,0x18c(%a1)
- movel #0x000c0400,%d0 /* IDEconfig2 */
- movel %d0,0x190(%a1)
-
- movel #0x00080000,%d0 /* GPIO19, IDE reset bit */
- orl %d0,0xc(%a1) /* function GPIO19 */
- orl %d0,0x8(%a1) /* enable GPIO19 as output */
- orl %d0,0x4(%a1) /* de-assert IDE reset */
-.endm
-
-#define PLATFORM_SETUP m5249c3_setup
-
-#endif /* __ASSEMBLER__ */
-
-/****************************************************************************/
-#endif /* m5249sim_h */
diff --git a/trunk/arch/m68k/include/asm/m525xsim.h b/trunk/arch/m68k/include/asm/m525xsim.h
index acab61cb91ed..e33f5bb6aca8 100644
--- a/trunk/arch/m68k/include/asm/m525xsim.h
+++ b/trunk/arch/m68k/include/asm/m525xsim.h
@@ -12,6 +12,11 @@
#define m525xsim_h
/****************************************************************************/
+/*
+ * This header supports ColdFire 5249, 5251 and 5253. There are a few
+ * little differences between them, but most of the peripheral support
+ * can be used by all of them.
+ */
#define CPU_NAME "COLDFIRE(m525x)"
#define CPU_INSTR_PER_JIFFY 3
#define MCF_BUSCLK (MCF_CLK / 2)
@@ -65,6 +70,8 @@
#define MCFSIM_DCR (MCF_MBAR + 0x100) /* DRAM Control */
#define MCFSIM_DACR0 (MCF_MBAR + 0x108) /* DRAM 0 Addr/Ctrl */
#define MCFSIM_DMR0 (MCF_MBAR + 0x10c) /* DRAM 0 Mask */
+#define MCFSIM_DACR1 (MCF_MBAR + 0x110) /* DRAM 1 Addr/Ctrl */
+#define MCFSIM_DMR1 (MCF_MBAR + 0x114) /* DRAM 1 Mask */
/*
* Secondary Interrupt Controller (in MBAR2)
@@ -101,11 +108,17 @@
#define MCFQSPI_BASE (MCF_MBAR + 0x300) /* Base address QSPI */
#define MCFQSPI_SIZE 0x40 /* Register set size */
-
+#ifdef CONFIG_M5249
+#define MCFQSPI_CS0 29
+#define MCFQSPI_CS1 24
+#define MCFQSPI_CS2 21
+#define MCFQSPI_CS3 22
+#else
#define MCFQSPI_CS0 15
#define MCFQSPI_CS1 16
#define MCFQSPI_CS2 24
#define MCFQSPI_CS3 28
+#endif
/*
* I2C module.
@@ -115,6 +128,7 @@
#define MCFI2C_BASE1 (MCF_MBAR2 + 0x440) /* Base addreess I2C1 */
#define MCFI2C_SIZE1 0x20 /* Register set size */
+
/*
* DMA unit base addresses.
*/
@@ -163,6 +177,7 @@
#define MCF_IRQ_GPIO4 (MCFINTC2_VECBASE + 36)
#define MCF_IRQ_GPIO5 (MCFINTC2_VECBASE + 37)
#define MCF_IRQ_GPIO6 (MCFINTC2_VECBASE + 38)
+#define MCF_IRQ_GPIO7 (MCFINTC2_VECBASE + 39)
#define MCF_IRQ_USBWUP (MCFINTC2_VECBASE + 40)
#define MCF_IRQ_I2C1 (MCFINTC2_VECBASE + 62)
@@ -183,12 +198,111 @@
#define MCFSIM2_GPIOINTCLEAR (MCF_MBAR2 + 0xc0) /* GPIO intr clear */
#define MCFSIM2_GPIOINTENABLE (MCF_MBAR2 + 0xc4) /* GPIO intr enable */
+#define MCFSIM2_DMAROUTE (MCF_MBAR2 + 0x188) /* DMA routing */
+#define MCFSIM2_IDECONFIG1 (MCF_MBAR2 + 0x18c) /* IDEconfig1 */
+#define MCFSIM2_IDECONFIG2 (MCF_MBAR2 + 0x190) /* IDEconfig2 */
+
/*
* Generic GPIO support
*/
#define MCFGPIO_PIN_MAX 64
+#ifdef CONFIG_M5249
+#define MCFGPIO_IRQ_MAX -1
+#define MCFGPIO_IRQ_VECBASE -1
+#else
#define MCFGPIO_IRQ_MAX 7
#define MCFGPIO_IRQ_VECBASE MCF_IRQ_GPIO0
+#endif
+
+/****************************************************************************/
+
+#ifdef __ASSEMBLER__
+#ifdef CONFIG_M5249C3
+/*
+ * The M5249C3 board needs a little help getting all its SIM devices
+ * initialized at kernel start time. dBUG doesn't set much up, so
+ * we need to do it manually.
+ */
+.macro m5249c3_setup
+ /*
+ * Set MBAR1 and MBAR2, just incase they are not set.
+ */
+ movel #0x10000001,%a0
+ movec %a0,%MBAR /* map MBAR region */
+ subql #1,%a0 /* get MBAR address in a0 */
+
+ movel #0x80000001,%a1
+ movec %a1,#3086 /* map MBAR2 region */
+ subql #1,%a1 /* get MBAR2 address in a1 */
+
+ /*
+ * Move secondary interrupts to their base (128).
+ */
+ moveb #MCFINTC2_VECBASE,%d0
+ moveb %d0,0x16b(%a1) /* interrupt base register */
+
+ /*
+ * Work around broken CSMR0/DRAM vector problem.
+ */
+ movel #0x001F0021,%d0 /* disable C/I bit */
+ movel %d0,0x84(%a0) /* set CSMR0 */
+
+ /*
+ * Disable the PLL firstly. (Who knows what state it is
+ * in here!).
+ */
+ movel 0x180(%a1),%d0 /* get current PLL value */
+ andl #0xfffffffe,%d0 /* PLL bypass first */
+ movel %d0,0x180(%a1) /* set PLL register */
+ nop
+
+#if CONFIG_CLOCK_FREQ == 140000000
+ /*
+ * Set initial clock frequency. This assumes M5249C3 board
+ * is fitted with 11.2896MHz crystal. It will program the
+ * PLL for 140MHz. Lets go fast :-)
+ */
+ movel #0x125a40f0,%d0 /* set for 140MHz */
+ movel %d0,0x180(%a1) /* set PLL register */
+ orl #0x1,%d0
+ movel %d0,0x180(%a1) /* set PLL register */
+#endif
+
+ /*
+ * Setup CS1 for ethernet controller.
+ * (Setup as per M5249C3 doco).
+ */
+ movel #0xe0000000,%d0 /* CS1 mapped at 0xe0000000 */
+ movel %d0,0x8c(%a0)
+ movel #0x001f0021,%d0 /* CS1 size of 1Mb */
+ movel %d0,0x90(%a0)
+ movew #0x0080,%d0 /* CS1 = 16bit port, AA */
+ movew %d0,0x96(%a0)
+
+ /*
+ * Setup CS2 for IDE interface.
+ */
+ movel #0x50000000,%d0 /* CS2 mapped at 0x50000000 */
+ movel %d0,0x98(%a0)
+ movel #0x001f0001,%d0 /* CS2 size of 1MB */
+ movel %d0,0x9c(%a0)
+ movew #0x0080,%d0 /* CS2 = 16bit, TA */
+ movew %d0,0xa2(%a0)
+
+ movel #0x00107000,%d0 /* IDEconfig1 */
+ movel %d0,0x18c(%a1)
+ movel #0x000c0400,%d0 /* IDEconfig2 */
+ movel %d0,0x190(%a1)
+
+ movel #0x00080000,%d0 /* GPIO19, IDE reset bit */
+ orl %d0,0xc(%a1) /* function GPIO19 */
+ orl %d0,0x8(%a1) /* enable GPIO19 as output */
+ orl %d0,0x4(%a1) /* de-assert IDE reset */
+.endm
+
+#define PLATFORM_SETUP m5249c3_setup
+#endif /* CONFIG_M5249C3 */
+#endif /* __ASSEMBLER__ */
/****************************************************************************/
#endif /* m525xsim_h */
diff --git a/trunk/arch/m68k/include/asm/mcfclk.h b/trunk/arch/m68k/include/asm/mcfclk.h
index b676a02bb392..ea4791e3a557 100644
--- a/trunk/arch/m68k/include/asm/mcfclk.h
+++ b/trunk/arch/m68k/include/asm/mcfclk.h
@@ -8,7 +8,6 @@
struct clk;
-#ifdef MCFPM_PPMCR0
struct clk_ops {
void (*enable)(struct clk *);
void (*disable)(struct clk *);
@@ -23,6 +22,8 @@ struct clk {
};
extern struct clk *mcf_clks[];
+
+#ifdef MCFPM_PPMCR0
extern struct clk_ops clk_ops0;
#ifdef MCFPM_PPMCR1
extern struct clk_ops clk_ops1;
@@ -38,6 +39,12 @@ static struct clk __clk_##clk_bank##_##clk_slot = { \
void __clk_init_enabled(struct clk *);
void __clk_init_disabled(struct clk *);
+#else
+#define DEFINE_CLK(clk_ref, clk_name, clk_rate) \
+ static struct clk clk_##clk_ref = { \
+ .name = clk_name, \
+ .rate = clk_rate, \
+ }
#endif /* MCFPM_PPMCR0 */
#endif /* mcfclk_h */
diff --git a/trunk/arch/m68k/include/asm/mcfsim.h b/trunk/arch/m68k/include/asm/mcfsim.h
index 7a83e619e73b..a04fd9b2714c 100644
--- a/trunk/arch/m68k/include/asm/mcfsim.h
+++ b/trunk/arch/m68k/include/asm/mcfsim.h
@@ -24,10 +24,7 @@
#elif defined(CONFIG_M523x)
#include
#include
-#elif defined(CONFIG_M5249)
-#include
-#include
-#elif defined(CONFIG_M525x)
+#elif defined(CONFIG_M5249) || defined(CONFIG_M525x)
#include
#include
#elif defined(CONFIG_M527x)
diff --git a/trunk/arch/m68k/include/asm/page_no.h b/trunk/arch/m68k/include/asm/page_no.h
index 90595721185f..ef209169579a 100644
--- a/trunk/arch/m68k/include/asm/page_no.h
+++ b/trunk/arch/m68k/include/asm/page_no.h
@@ -26,7 +26,7 @@ extern unsigned long memory_end;
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
-#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
+#define page_to_virt(page) __va(((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET))
#define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn))
#define page_to_pfn(page) virt_to_pfn(page_to_virt(page))
diff --git a/trunk/arch/m68k/lib/memcpy.c b/trunk/arch/m68k/lib/memcpy.c
index 10ca051d56b8..c1e2dfb206f3 100644
--- a/trunk/arch/m68k/lib/memcpy.c
+++ b/trunk/arch/m68k/lib/memcpy.c
@@ -10,7 +10,7 @@
void *memcpy(void *to, const void *from, size_t n)
{
void *xto = to;
- size_t temp, temp1;
+ size_t temp;
if (!n)
return xto;
@@ -47,6 +47,7 @@ void *memcpy(void *to, const void *from, size_t n)
for (; temp; temp--)
*lto++ = *lfrom++;
#else
+ size_t temp1;
asm volatile (
" movel %2,%3\n"
" andw #7,%3\n"
diff --git a/trunk/arch/m68k/platform/68000/Makefile b/trunk/arch/m68k/platform/68000/Makefile
new file mode 100644
index 000000000000..1eab70c7194b
--- /dev/null
+++ b/trunk/arch/m68k/platform/68000/Makefile
@@ -0,0 +1,18 @@
+##################################################
+#
+# Makefile for 68000 core based cpus
+#
+# 2012.10.21, Luis Alves
+# Merged all 68000 based cpu's config
+# files into a single directory.
+#
+
+# 68328, 68EZ328, 68VZ328
+
+obj-y += entry.o ints.o timers.o
+obj-$(CONFIG_M68328) += m68328.o
+obj-$(CONFIG_M68EZ328) += m68EZ328.o
+obj-$(CONFIG_M68VZ328) += m68VZ328.o
+obj-$(CONFIG_ROM) += romvec.o
+
+extra-y := head.o
diff --git a/trunk/arch/m68k/platform/68VZ328/bootlogo.h b/trunk/arch/m68k/platform/68000/bootlogo-vz.h
similarity index 100%
rename from trunk/arch/m68k/platform/68VZ328/bootlogo.h
rename to trunk/arch/m68k/platform/68000/bootlogo-vz.h
diff --git a/trunk/arch/m68k/platform/68328/bootlogo.h b/trunk/arch/m68k/platform/68000/bootlogo.h
similarity index 100%
rename from trunk/arch/m68k/platform/68328/bootlogo.h
rename to trunk/arch/m68k/platform/68000/bootlogo.h
diff --git a/trunk/arch/m68k/platform/68328/entry.S b/trunk/arch/m68k/platform/68000/entry.S
similarity index 100%
rename from trunk/arch/m68k/platform/68328/entry.S
rename to trunk/arch/m68k/platform/68000/entry.S
diff --git a/trunk/arch/m68k/platform/68000/head.S b/trunk/arch/m68k/platform/68000/head.S
new file mode 100644
index 000000000000..536ef9616dad
--- /dev/null
+++ b/trunk/arch/m68k/platform/68000/head.S
@@ -0,0 +1,240 @@
+/*
+ * head.S - Common startup code for 68000 core based CPU's
+ *
+ * 2012.10.21, Luis Alves , Single head.S file for all
+ * 68000 core based CPU's. Based on the sources from:
+ * Coldfire by Greg Ungerer
+ * 68328 by D. Jeff Dionne ,
+ * Kenneth Albanowski ,
+ * The Silver Hammer Group, Ltd.
+ *
+ */
+
+#include
+#include
+#include
+#include
+
+
+/*****************************************************************************
+ * UCSIMM and UCDIMM use CONFIG_MEMORY_RESERVE to reserve some RAM
+ *****************************************************************************/
+#ifdef CONFIG_MEMORY_RESERVE
+#define RAMEND (CONFIG_RAMBASE+CONFIG_RAMSIZE)-(CONFIG_MEMORY_RESERVE*0x100000)
+#else
+#define RAMEND (CONFIG_RAMBASE+CONFIG_RAMSIZE)
+#endif
+/*****************************************************************************/
+
+.global _start
+.global _rambase
+.global _ramvec
+.global _ramstart
+.global _ramend
+
+#if defined(CONFIG_PILOT) || defined(CONFIG_INIT_LCD)
+.global bootlogo_bits
+#endif
+
+/* Defining DEBUG_HEAD_CODE, serial port in 68x328 is inited */
+/* #define DEBUG_HEAD_CODE */
+#undef DEBUG_HEAD_CODE
+
+.data
+
+/*****************************************************************************
+ * RAM setup pointers. Used by the kernel to determine RAM location and size.
+ *****************************************************************************/
+
+_rambase:
+ .long 0
+_ramvec:
+ .long 0
+_ramstart:
+ .long 0
+_ramend:
+ .long 0
+
+__HEAD
+
+/*****************************************************************************
+ * Entry point, where all begins!
+ *****************************************************************************/
+
+_start:
+
+/* Pilot need this specific signature at the start of ROM */
+#ifdef CONFIG_PILOT
+ .byte 0x4e, 0xfa, 0x00, 0x0a /* bra opcode (jmp 10 bytes) */
+ .byte 'b', 'o', 'o', 't'
+ .word 10000
+ nop
+ moveq #0, %d0
+ movew %d0, 0xfffff618 /* Watchdog off */
+ movel #0x00011f07, 0xfffff114 /* CS A1 Mask */
+#endif /* CONFIG_PILOT */
+
+ movew #0x2700, %sr /* disable all interrupts */
+
+/*****************************************************************************
+ * Setup PLL and wait for it to settle (in 68x328 cpu's).
+ * Also, if enabled, init serial port.
+ *****************************************************************************/
+#if defined(CONFIG_M68328) || \
+ defined(CONFIG_M68EZ328) || \
+ defined(CONFIG_M68VZ328)
+
+/* Serial port setup. Should only be needed if debugging this startup code. */
+#ifdef DEBUG_HEAD_CODE
+ movew #0x0800, 0xfffff906 /* Ignore CTS */
+ movew #0x010b, 0xfffff902 /* BAUD to 9600 */
+ movew #0xe100, 0xfffff900 /* enable */
+#endif /* DEBUG_HEAD */
+
+#ifdef CONFIG_PILOT
+ movew #0x2410, 0xfffff200 /* PLLCR */
+#else
+ movew #0x2400, 0xfffff200 /* PLLCR */
+#endif
+ movew #0x0123, 0xfffff202 /* PLLFSR */
+ moveq #0, %d0
+ movew #16384, %d0 /* PLL settle wait loop */
+_pll_settle:
+ subw #1, %d0
+ bne _pll_settle
+#endif /* CONFIG_M68x328 */
+
+
+/*****************************************************************************
+ * If running kernel from ROM some specific initialization has to be done.
+ * (Assuming that everything is already init'ed when running from RAM)
+ *****************************************************************************/
+#ifdef CONFIG_ROMKERNEL
+
+/*****************************************************************************
+ * Init chip registers (uCsimm specific)
+ *****************************************************************************/
+#ifdef CONFIG_UCSIMM
+ moveb #0x00, 0xfffffb0b /* Watchdog off */
+ moveb #0x10, 0xfffff000 /* SCR */
+ moveb #0x00, 0xfffff40b /* enable chip select */
+ moveb #0x00, 0xfffff423 /* enable /DWE */
+ moveb #0x08, 0xfffffd0d /* disable hardmap */
+ moveb #0x07, 0xfffffd0e /* level 7 interrupt clear */
+ movew #0x8600, 0xfffff100 /* FLASH at 0x10c00000 */
+ movew #0x018b, 0xfffff110 /* 2Meg, enable, 0ws */
+ movew #0x8f00, 0xfffffc00 /* DRAM configuration */
+ movew #0x9667, 0xfffffc02 /* DRAM control */
+ movew #0x0000, 0xfffff106 /* DRAM at 0x00000000 */
+ movew #0x068f, 0xfffff116 /* 8Meg, enable, 0ws */
+ moveb #0x40, 0xfffff300 /* IVR */
+ movel #0x007FFFFF, %d0 /* IMR */
+ movel %d0, 0xfffff304
+ moveb 0xfffff42b, %d0
+ andb #0xe0, %d0
+ moveb %d0, 0xfffff42b
+#endif
+
+/*****************************************************************************
+ * Init LCD controller.
+ * (Assuming that LCD controller is already init'ed when running from RAM)
+ *****************************************************************************/
+#ifdef CONFIG_INIT_LCD
+#ifdef CONFIG_PILOT
+ moveb #0, 0xfffffA27 /* LCKCON */
+ movel #_start, 0xfffffA00 /* LSSA */
+ moveb #0xa, 0xfffffA05 /* LVPW */
+ movew #0x9f, 0xFFFFFa08 /* LXMAX */
+ movew #0x9f, 0xFFFFFa0a /* LYMAX */
+ moveb #9, 0xfffffa29 /* LBAR */
+ moveb #0, 0xfffffa25 /* LPXCD */
+ moveb #0x04, 0xFFFFFa20 /* LPICF */
+ moveb #0x58, 0xfffffA27 /* LCKCON */
+ moveb #0x85, 0xfffff429 /* PFDATA */
+ moveb #0xd8, 0xfffffA27 /* LCKCON */
+ moveb #0xc5, 0xfffff429 /* PFDATA */
+ moveb #0xd5, 0xfffff429 /* PFDATA */
+ movel #bootlogo_bits, 0xFFFFFA00 /* LSSA */
+ moveb #10, 0xFFFFFA05 /* LVPW */
+ movew #160, 0xFFFFFA08 /* LXMAX */
+ movew #160, 0xFFFFFA0A /* LYMAX */
+#else /* CONFIG_PILOT */
+ movel #bootlogo_bits, 0xfffffA00 /* LSSA */
+ moveb #0x28, 0xfffffA05 /* LVPW */
+ movew #0x280, 0xFFFFFa08 /* LXMAX */
+ movew #0x1df, 0xFFFFFa0a /* LYMAX */
+ moveb #0, 0xfffffa29 /* LBAR */
+ moveb #0, 0xfffffa25 /* LPXCD */
+ moveb #0x08, 0xFFFFFa20 /* LPICF */
+ moveb #0x01, 0xFFFFFA21 /* -ve pol */
+ moveb #0x81, 0xfffffA27 /* LCKCON */
+ movew #0xff00, 0xfffff412 /* LCD pins */
+#endif /* CONFIG_PILOT */
+#endif /* CONFIG_INIT_LCD */
+
+/*****************************************************************************
+ * Kernel is running from FLASH/ROM (XIP)
+ * Copy init text & data to RAM
+ *****************************************************************************/
+ moveal #_etext, %a0
+ moveal #_sdata, %a1
+ moveal #__bss_start, %a2
+_copy_initmem:
+ movel %a0@+, %a1@+
+ cmpal %a1, %a2
+ bhi _copy_initmem
+#endif /* CONFIG_ROMKERNEL */
+
+/*****************************************************************************
+ * Setup basic memory information for kernel
+ *****************************************************************************/
+ movel #CONFIG_VECTORBASE,_ramvec /* set vector base location */
+ movel #CONFIG_RAMBASE,_rambase /* set the base of RAM */
+ movel #RAMEND, _ramend /* set end ram addr */
+ lea __bss_stop,%a1
+ movel %a1,_ramstart
+
+/*****************************************************************************
+ * If the kernel is in RAM, move romfs to right above bss and
+ * adjust _ramstart to where romfs ends.
+ *
+ * (Do this only if CONFIG_MTD_UCLINUX is true)
+ *****************************************************************************/
+
+#if defined(CONFIG_ROMFS_FS) && defined(CONFIG_RAMKERNEL) && \
+ defined(CONFIG_MTD_UCLINUX)
+ lea __bss_start, %a0 /* get start of bss */
+ lea __bss_stop, %a1 /* set up destination */
+ movel %a0, %a2 /* copy of bss start */
+
+ movel 8(%a0), %d0 /* get size of ROMFS */
+ addql #8, %d0 /* allow for rounding */
+ andl #0xfffffffc, %d0 /* whole words */
+
+ addl %d0, %a0 /* copy from end */
+ addl %d0, %a1 /* copy from end */
+ movel %a1, _ramstart /* set start of ram */
+_copy_romfs:
+ movel -(%a0), -(%a1) /* copy dword */
+ cmpl %a0, %a2 /* check if at end */
+ bne _copy_romfs
+#endif /* CONFIG_ROMFS_FS && CONFIG_RAMKERNEL && CONFIG_MTD_UCLINUX */
+
+/*****************************************************************************
+ * Clear bss region
+ *****************************************************************************/
+ lea __bss_start, %a0 /* get start of bss */
+ lea __bss_stop, %a1 /* get end of bss */
+_clear_bss:
+ movel #0, (%a0)+ /* clear each word */
+ cmpl %a0, %a1 /* check if at end */
+ bne _clear_bss
+
+/*****************************************************************************
+ * Load the current task pointer and stack.
+ *****************************************************************************/
+ lea init_thread_union,%a0
+ lea THREAD_SIZE(%a0),%sp
+ jsr start_kernel /* start Linux kernel */
+_exit:
+ jmp _exit /* should never get here */
diff --git a/trunk/arch/m68k/platform/68328/ints.c b/trunk/arch/m68k/platform/68000/ints.c
similarity index 98%
rename from trunk/arch/m68k/platform/68328/ints.c
rename to trunk/arch/m68k/platform/68000/ints.c
index b3810febb3e3..cda49b12d7be 100644
--- a/trunk/arch/m68k/platform/68328/ints.c
+++ b/trunk/arch/m68k/platform/68000/ints.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/m68knommu/platform/68328/ints.c
+ * ints.c - Generic interrupt controller support
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
diff --git a/trunk/arch/m68k/platform/68328/config.c b/trunk/arch/m68k/platform/68000/m68328.c
similarity index 97%
rename from trunk/arch/m68k/platform/68328/config.c
rename to trunk/arch/m68k/platform/68000/m68328.c
index 8c20e891e981..a86eb66835aa 100644
--- a/trunk/arch/m68k/platform/68328/config.c
+++ b/trunk/arch/m68k/platform/68000/m68328.c
@@ -1,7 +1,7 @@
/***************************************************************************/
/*
- * linux/arch/m68knommu/platform/68328/config.c
+ * m68328.c - 68328 specific config
*
* Copyright (C) 1993 Hamish Macdonald
* Copyright (C) 1999 D. Jeff Dionne
diff --git a/trunk/arch/m68k/platform/68EZ328/config.c b/trunk/arch/m68k/platform/68000/m68EZ328.c
similarity index 97%
rename from trunk/arch/m68k/platform/68EZ328/config.c
rename to trunk/arch/m68k/platform/68000/m68EZ328.c
index 4f158d551f02..a6eb72d75008 100644
--- a/trunk/arch/m68k/platform/68EZ328/config.c
+++ b/trunk/arch/m68k/platform/68000/m68EZ328.c
@@ -1,7 +1,7 @@
/***************************************************************************/
/*
- * linux/arch/m68knommu/platform/68EZ328/config.c
+ * m68EZ328.c - 68EZ328 specific config
*
* Copyright (C) 1993 Hamish Macdonald
* Copyright (C) 1999 D. Jeff Dionne
diff --git a/trunk/arch/m68k/platform/68VZ328/config.c b/trunk/arch/m68k/platform/68000/m68VZ328.c
similarity index 98%
rename from trunk/arch/m68k/platform/68VZ328/config.c
rename to trunk/arch/m68k/platform/68000/m68VZ328.c
index 2ed8dc305e42..eb6964fbec09 100644
--- a/trunk/arch/m68k/platform/68VZ328/config.c
+++ b/trunk/arch/m68k/platform/68000/m68VZ328.c
@@ -1,7 +1,7 @@
/***************************************************************************/
/*
- * linux/arch/m68knommu/platform/68VZ328/config.c
+ * m68VZ328.c - 68VZ328 specific config
*
* Copyright (C) 1993 Hamish Macdonald
* Copyright (C) 1999 D. Jeff Dionne
@@ -28,7 +28,7 @@
#include
#ifdef CONFIG_INIT_LCD
-#include "bootlogo.h"
+#include "bootlogo-vz.h"
#endif
/***************************************************************************/
diff --git a/trunk/arch/m68k/platform/68328/romvec.S b/trunk/arch/m68k/platform/68000/romvec.S
similarity index 94%
rename from trunk/arch/m68k/platform/68328/romvec.S
rename to trunk/arch/m68k/platform/68000/romvec.S
index 31084466eae8..15c70cd6453f 100644
--- a/trunk/arch/m68k/platform/68328/romvec.S
+++ b/trunk/arch/m68k/platform/68000/romvec.S
@@ -1,5 +1,5 @@
/*
- * linux/arch/m68knommu/platform/68328/romvec.S
+ * romvec.S - Vector table for 68000 cpus
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
diff --git a/trunk/arch/m68k/platform/68328/timers.c b/trunk/arch/m68k/platform/68000/timers.c
similarity index 98%
rename from trunk/arch/m68k/platform/68328/timers.c
rename to trunk/arch/m68k/platform/68000/timers.c
index f4dc9b295609..ec30acbfe6db 100644
--- a/trunk/arch/m68k/platform/68328/timers.c
+++ b/trunk/arch/m68k/platform/68000/timers.c
@@ -1,7 +1,7 @@
/***************************************************************************/
/*
- * linux/arch/m68knommu/platform/68328/timers.c
+ * timers.c - Generic hardware timer support.
*
* Copyright (C) 1993 Hamish Macdonald
* Copyright (C) 1999 D. Jeff Dionne
diff --git a/trunk/arch/m68k/platform/68328/Makefile b/trunk/arch/m68k/platform/68328/Makefile
deleted file mode 100644
index ee61bf84d4a0..000000000000
--- a/trunk/arch/m68k/platform/68328/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Makefile for arch/m68knommu/platform/68328.
-#
-
-model-y := ram
-model-$(CONFIG_ROMKERNEL) := rom
-
-head-y = head-$(model-y).o
-head-$(CONFIG_PILOT) = head-pilot.o
-head-$(CONFIG_DRAGEN2) = head-de2.o
-
-obj-y += entry.o ints.o timers.o
-obj-$(CONFIG_M68328) += config.o
-obj-$(CONFIG_ROM) += romvec.o
-
-extra-y := head.o
-
-$(obj)/head.o: $(obj)/$(head-y)
- ln -sf $(head-y) $(obj)/head.o
-
-clean-files := $(obj)/head.o $(head-y)
diff --git a/trunk/arch/m68k/platform/68328/head-de2.S b/trunk/arch/m68k/platform/68328/head-de2.S
deleted file mode 100644
index 537d3245b539..000000000000
--- a/trunk/arch/m68k/platform/68328/head-de2.S
+++ /dev/null
@@ -1,128 +0,0 @@
-
-#define MEM_END 0x00800000 /* Memory size 8Mb */
-
-#undef CRT_DEBUG
-
-.macro PUTC CHAR
-#ifdef CRT_DEBUG
- moveq #\CHAR, %d7
- jsr putc
-#endif
-.endm
-
- .global _start
- .global _rambase
- .global _ramvec
- .global _ramstart
- .global _ramend
-
- .data
-
-/*
- * Set up the usable of RAM stuff
- */
-_rambase:
- .long 0
-_ramvec:
- .long 0
-_ramstart:
- .long 0
-_ramend:
- .long 0
-
- .text
-
-_start:
-
-/*
- * Setup initial stack
- */
- /* disable all interrupts */
- movew #0x2700, %sr
- movel #-1, 0xfffff304
- movel #MEM_END-4, %sp
-
- PUTC '\r'
- PUTC '\n'
- PUTC 'A'
- PUTC 'B'
-
-/*
- * Determine end of RAM
- */
-
- movel #MEM_END, %a0
- movel %a0, _ramend
-
- PUTC 'C'
-
-/*
- * Move ROM filesystem above bss :-)
- */
-
- moveal #__bss_start, %a0 /* romfs at the start of bss */
- moveal #__bss_stop, %a1 /* Set up destination */
- movel %a0, %a2 /* Copy of bss start */
-
- movel 8(%a0), %d1 /* Get size of ROMFS */
- addql #8, %d1 /* Allow for rounding */
- andl #0xfffffffc, %d1 /* Whole words */
-
- addl %d1, %a0 /* Copy from end */
- addl %d1, %a1 /* Copy from end */
- movel %a1, _ramstart /* Set start of ram */
-
-1:
- movel -(%a0), %d0 /* Copy dword */
- movel %d0, -(%a1)
- cmpl %a0, %a2 /* Check if at end */
- bne 1b
-
- PUTC 'D'
-
-/*
- * Initialize BSS segment to 0
- */
-
- lea __bss_start, %a0
- lea __bss_stop, %a1
-
- /* Copy 0 to %a0 until %a0 == %a1 */
-2: cmpal %a0, %a1
- beq 1f
- clrl (%a0)+
- bra 2b
-1:
-
- PUTC 'E'
-
-/*
- * Load the current task pointer and stack
- */
-
- lea init_thread_union, %a0
- lea 0x2000(%a0), %sp
-
- PUTC 'F'
- PUTC '\r'
- PUTC '\n'
-
-/*
- * Go
- */
-
- jmp start_kernel
-
-/*
- * Local functions
- */
-
-#ifdef CRT_DEBUG
-putc:
- moveb %d7, 0xfffff907
-1:
- movew 0xfffff906, %d7
- andw #0x2000, %d7
- beq 1b
- rts
-#endif
diff --git a/trunk/arch/m68k/platform/68328/head-pilot.S b/trunk/arch/m68k/platform/68328/head-pilot.S
deleted file mode 100644
index 45a9dad29e3d..000000000000
--- a/trunk/arch/m68k/platform/68328/head-pilot.S
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * linux/arch/m68knommu/platform/68328/head-pilot.S
- * - A startup file for the MC68328
- *
- * Copyright (C) 1998 D. Jeff Dionne ,
- * Kenneth Albanowski ,
- * The Silver Hammer Group, Ltd.
- *
- * (c) 1995, Dionne & Associates
- * (c) 1995, DKG Display Tech.
- */
-
-#define ASSEMBLY
-
-#define IMMED #
-#define DBG_PUTC(x) moveb IMMED x, 0xfffff907
-
-
-.global _stext
-.global _start
-
-.global _rambase
-.global _ramvec
-.global _ramstart
-.global _ramend
-
-.global bootlogo_bits
-
-/*****************************************************************************/
-
-.data
-
-/*
- * Set up the usable of RAM stuff. Size of RAM is determined then
- * an initial stack set up at the end.
- */
-.align 4
-_ramvec:
-.long 0
-_rambase:
-.long 0
-_ramstart:
-.long 0
-_ramend:
-.long 0
-
-.text
-
-_start:
-_stext:
-
-
-#ifdef CONFIG_M68328
-
-#ifdef CONFIG_PILOT
- .byte 0x4e, 0xfa, 0x00, 0x0a /* Jmp +X bytes */
- .byte 'b', 'o', 'o', 't'
- .word 10000
-
- nop
-#endif
-
- moveq #0, %d0
- movew %d0, 0xfffff618 /* Watchdog off */
- movel #0x00011f07, 0xfffff114 /* CS A1 Mask */
-
- movew #0x0800, 0xfffff906 /* Ignore CTS */
- movew #0x010b, 0xfffff902 /* BAUD to 9600 */
-
- movew #0x2410, 0xfffff200 /* PLLCR */
- movew #0x123, 0xfffff202 /* PLLFSR */
-
-#ifdef CONFIG_PILOT
- moveb #0, 0xfffffA27 /* LCKCON */
- movel #_start, 0xfffffA00 /* LSSA */
- moveb #0xa, 0xfffffA05 /* LVPW */
- movew #0x9f, 0xFFFFFa08 /* LXMAX */
- movew #0x9f, 0xFFFFFa0a /* LYMAX */
- moveb #9, 0xfffffa29 /* LBAR */
- moveb #0, 0xfffffa25 /* LPXCD */
- moveb #0x04, 0xFFFFFa20 /* LPICF */
- moveb #0x58, 0xfffffA27 /* LCKCON */
- moveb #0x85, 0xfffff429 /* PFDATA */
- moveb #0xd8, 0xfffffA27 /* LCKCON */
- moveb #0xc5, 0xfffff429 /* PFDATA */
- moveb #0xd5, 0xfffff429 /* PFDATA */
-
- moveal #0x00100000, %a3
- moveal #0x100ffc00, %a4
-#endif /* CONFIG_PILOT */
-
-#endif /* CONFIG_M68328 */
-
- movew #0x2700, %sr
- lea %a4@(-4), %sp
-
- DBG_PUTC('\r')
- DBG_PUTC('\n')
- DBG_PUTC('A')
-
- moveq #0,%d0
- movew #16384, %d0 /* PLL settle wait loop */
-L0:
- subw #1, %d0
- bne L0
-
- DBG_PUTC('B')
-
- /* Copy command line from beginning of RAM (+16) to end of bss */
- movel #CONFIG_VECTORBASE, %d7
- addl #16, %d7
- moveal %d7, %a0
- moveal #__bss_stop, %a1
- lea %a1@(512), %a2
-
- DBG_PUTC('C')
-
- /* Copy %a0 to %a1 until %a1 == %a2 */
-L2:
- movel %a0@+, %d0
- movel %d0, %a1@+
- cmpal %a1, %a2
- bhi L2
-
- /* Copy data+init segment from ROM to RAM */
- moveal #_etext, %a0
- moveal #_sdata, %a1
- moveal #__init_end, %a2
-
- DBG_PUTC('D')
-
- /* Copy %a0 to %a1 until %a1 == %a2 */
-LD1:
- movel %a0@+, %d0
- movel %d0, %a1@+
- cmpal %a1, %a2
- bhi LD1
-
- DBG_PUTC('E')
-
- moveal #__bss_start, %a0
- moveal #__bss_stop, %a1
-
- /* Copy 0 to %a0 until %a0 == %a1 */
-L1:
- movel #0, %a0@+
- cmpal %a0, %a1
- bhi L1
-
- DBG_PUTC('F')
-
- /* Copy command line from end of bss to command line */
- moveal #__bss_stop, %a0
- moveal #command_line, %a1
- lea %a1@(512), %a2
-
- DBG_PUTC('G')
-
- /* Copy %a0 to %a1 until %a1 == %a2 */
-L3:
- movel %a0@+, %d0
- movel %d0, %a1@+
- cmpal %a1, %a2
- bhi L3
-
- movel #_sdata, %d0
- movel %d0, _rambase
- movel #__bss_stop, %d0
- movel %d0, _ramstart
-
- movel %a4, %d0
- subl #4096, %d0 /* Reserve 4K of stack */
- moveq #79, %d7
- movel %d0, _ramend
-
- pea 0
- pea env
- pea %sp@(4)
- pea 0
-
- DBG_PUTC('H')
-
-#ifdef CONFIG_PILOT
- movel #bootlogo_bits, 0xFFFFFA00
- moveb #10, 0xFFFFFA05
- movew #160, 0xFFFFFA08
- movew #160, 0xFFFFFA0A
-#endif /* CONFIG_PILOT */
-
- DBG_PUTC('I')
-
- lea init_thread_union, %a0
- lea 0x2000(%a0), %sp
-
- DBG_PUTC('J')
- DBG_PUTC('\r')
- DBG_PUTC('\n')
-
- jsr start_kernel
-_exit:
-
- jmp _exit
-
-
- .data
-env:
- .long 0
diff --git a/trunk/arch/m68k/platform/68328/head-ram.S b/trunk/arch/m68k/platform/68328/head-ram.S
deleted file mode 100644
index 5189ef926098..000000000000
--- a/trunk/arch/m68k/platform/68328/head-ram.S
+++ /dev/null
@@ -1,141 +0,0 @@
-
- .global __main
- .global __rom_start
-
- .global _rambase
- .global _ramstart
-
- .global splash_bits
- .global _start
- .global _stext
- .global _edata
-
-#define DEBUG
-#define ROM_OFFSET 0x10C00000
-#define STACK_GAURD 0x10
-
- .text
-
-_start:
-_stext:
- movew #0x2700, %sr /* Exceptions off! */
-
-#if 0
- /* Init chip registers. uCsimm specific */
- moveb #0x00, 0xfffffb0b /* Watchdog off */
- moveb #0x10, 0xfffff000 /* SCR */
-
- movew #0x2400, 0xfffff200 /* PLLCR */
- movew #0x0123, 0xfffff202 /* PLLFSR */
-
- moveb #0x00, 0xfffff40b /* enable chip select */
- moveb #0x00, 0xfffff423 /* enable /DWE */
- moveb #0x08, 0xfffffd0d /* disable hardmap */
- moveb #0x07, 0xfffffd0e /* level 7 interrupt clear */
-
- movew #0x8600, 0xfffff100 /* FLASH at 0x10c00000 */
- movew #0x018b, 0xfffff110 /* 2Meg, enable, 0ws */
-
- movew #0x8f00, 0xfffffc00 /* DRAM configuration */
- movew #0x9667, 0xfffffc02 /* DRAM control */
- movew #0x0000, 0xfffff106 /* DRAM at 0x00000000 */
- movew #0x068f, 0xfffff116 /* 8Meg, enable, 0ws */
-
- moveb #0x40, 0xfffff300 /* IVR */
- movel #0x007FFFFF, %d0 /* IMR */
- movel %d0, 0xfffff304
-
- moveb 0xfffff42b, %d0
- andb #0xe0, %d0
- moveb %d0, 0xfffff42b
-
- moveb #0x08, 0xfffff907 /* Ignore CTS */
- movew #0x010b, 0xfffff902 /* BAUD to 9600 */
- movew #0xe100, 0xfffff900 /* enable */
-#endif
-
- movew #16384, %d0 /* PLL settle wait loop */
-L0:
- subw #1, %d0
- bne L0
-#ifdef DEBUG
- moveq #70, %d7 /* 'F' */
- moveb %d7,0xfffff907 /* No absolute addresses */
-pclp1:
- movew 0xfffff906, %d7
- andw #0x2000, %d7
- beq pclp1
-#endif /* DEBUG */
-
-#ifdef DEBUG
- moveq #82, %d7 /* 'R' */
- moveb %d7,0xfffff907 /* No absolute addresses */
-pclp3:
- movew 0xfffff906, %d7
- andw #0x2000, %d7
- beq pclp3
-#endif /* DEBUG */
- moveal #0x007ffff0, %ssp
- moveal #__bss_start, %a0
- moveal #__bss_stop, %a1
-
- /* Copy 0 to %a0 until %a0 >= %a1 */
-L1:
- movel #0, %a0@+
- cmpal %a0, %a1
- bhi L1
-
-#ifdef DEBUG
- moveq #67, %d7 /* 'C' */
- jsr putc
-#endif /* DEBUG */
-
- pea 0
- pea env
- pea %sp@(4)
- pea 0
-
-#ifdef DEBUG
- moveq #70, %d7 /* 'F' */
- jsr putc
-#endif /* DEBUG */
-
-lp:
- jsr start_kernel
- jmp lp
-_exit:
-
- jmp _exit
-
-__main:
- /* nothing */
- rts
-
-#ifdef DEBUG
-putc:
- moveb %d7,0xfffff907
-pclp:
- movew 0xfffff906, %d7
- andw #0x2000, %d7
- beq pclp
- rts
-#endif /* DEBUG */
-
- .data
-
-/*
- * Set up the usable of RAM stuff. Size of RAM is determined then
- * an initial stack set up at the end.
- */
-.align 4
-_ramvec:
-.long 0
-_rambase:
-.long 0
-_ramstart:
-.long 0
-_ramend:
-.long 0
-
-env:
- .long 0
diff --git a/trunk/arch/m68k/platform/68328/head-rom.S b/trunk/arch/m68k/platform/68328/head-rom.S
deleted file mode 100644
index 3dff98ba2e97..000000000000
--- a/trunk/arch/m68k/platform/68328/head-rom.S
+++ /dev/null
@@ -1,105 +0,0 @@
-
- .global _start
- .global _stext
-
- .global _rambase
- .global _ramvec
- .global _ramstart
- .global _ramend
-
-#ifdef CONFIG_INIT_LCD
- .global bootlogo_bits
-#endif
-
- .data
-
-/*
- * Set up the usable of RAM stuff. Size of RAM is determined then
- * an initial stack set up at the end.
- */
-.align 4
-_ramvec:
-.long 0
-_rambase:
-.long 0
-_ramstart:
-.long 0
-_ramend:
-.long 0
-
-#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
-
- .text
-_start:
-_stext: movew #0x2700,%sr
-#ifdef CONFIG_INIT_LCD
- movel #bootlogo_bits, 0xfffffA00 /* LSSA */
- moveb #0x28, 0xfffffA05 /* LVPW */
- movew #0x280, 0xFFFFFa08 /* LXMAX */
- movew #0x1df, 0xFFFFFa0a /* LYMAX */
- moveb #0, 0xfffffa29 /* LBAR */
- moveb #0, 0xfffffa25 /* LPXCD */
- moveb #0x08, 0xFFFFFa20 /* LPICF */
- moveb #0x01, 0xFFFFFA21 /* -ve pol */
- moveb #0x81, 0xfffffA27 /* LCKCON */
- movew #0xff00, 0xfffff412 /* LCD pins */
-#endif
- moveal #RAMEND-CONFIG_MEMORY_RESERVE*0x100000 - 0x10, %sp
- movew #32767, %d0 /* PLL settle wait loop */
-1: subq #1, %d0
- bne 1b
-
- /* Copy data segment from ROM to RAM */
- moveal #_etext, %a0
- moveal #_sdata, %a1
- moveal #_edata, %a2
-
- /* Copy %a0 to %a1 until %a1 == %a2 */
-1: movel %a0@+, %a1@+
- cmpal %a1, %a2
- bhi 1b
-
- moveal #__bss_start, %a0
- moveal #__bss_stop, %a1
- /* Copy 0 to %a0 until %a0 == %a1 */
-
-1:
- clrl %a0@+
- cmpal %a0, %a1
- bhi 1b
-
- movel #_sdata, %d0
- movel %d0, _rambase
- movel #__bss_stop, %d0
- movel %d0, _ramstart
- movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0
- movel %d0, _ramend
- movel #CONFIG_VECTORBASE, %d0
- movel %d0, _ramvec
-
-/*
- * load the current task pointer and stack
- */
- lea init_thread_union, %a0
- lea 0x2000(%a0), %sp
-
-1: jsr start_kernel
- bra 1b
-_exit:
-
- jmp _exit
-
-
-putc:
- moveb %d7,0xfffff907
-1:
- movew 0xfffff906, %d7
- andw #0x2000, %d7
- beq 1b
- rts
-
- .data
-env:
- .long 0
- .text
-
diff --git a/trunk/arch/m68k/platform/68EZ328/Makefile b/trunk/arch/m68k/platform/68EZ328/Makefile
deleted file mode 100644
index b44d799b1115..000000000000
--- a/trunk/arch/m68k/platform/68EZ328/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for arch/m68knommu/platform/68EZ328.
-#
-
-obj-y := config.o
diff --git a/trunk/arch/m68k/platform/68VZ328/Makefile b/trunk/arch/m68k/platform/68VZ328/Makefile
deleted file mode 100644
index 816674164682..000000000000
--- a/trunk/arch/m68k/platform/68VZ328/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for arch/m68k/platform/68VZ328.
-#
-
-obj-y := config.o
diff --git a/trunk/arch/m68k/platform/coldfire/clk.c b/trunk/arch/m68k/platform/coldfire/clk.c
index 9cd13b4ce42b..fddfdccae63b 100644
--- a/trunk/arch/m68k/platform/coldfire/clk.c
+++ b/trunk/arch/m68k/platform/coldfire/clk.c
@@ -19,37 +19,58 @@
#include
#include
-/***************************************************************************/
-#ifndef MCFPM_PPMCR0
-struct clk *clk_get(struct device *dev, const char *id)
+static DEFINE_SPINLOCK(clk_lock);
+
+#ifdef MCFPM_PPMCR0
+/*
+ * For more advanced ColdFire parts that have clocks that can be enabled
+ * we supply enable/disable functions. These must properly define their
+ * clocks in their platform specific code.
+ */
+void __clk_init_enabled(struct clk *clk)
{
- return NULL;
+ clk->enabled = 1;
+ clk->clk_ops->enable(clk);
}
-EXPORT_SYMBOL(clk_get);
-int clk_enable(struct clk *clk)
+void __clk_init_disabled(struct clk *clk)
{
- return 0;
+ clk->enabled = 0;
+ clk->clk_ops->disable(clk);
}
-EXPORT_SYMBOL(clk_enable);
-void clk_disable(struct clk *clk)
+static void __clk_enable0(struct clk *clk)
{
+ __raw_writeb(clk->slot, MCFPM_PPMCR0);
}
-EXPORT_SYMBOL(clk_disable);
-void clk_put(struct clk *clk)
+static void __clk_disable0(struct clk *clk)
+{
+ __raw_writeb(clk->slot, MCFPM_PPMSR0);
+}
+
+struct clk_ops clk_ops0 = {
+ .enable = __clk_enable0,
+ .disable = __clk_disable0,
+};
+
+#ifdef MCFPM_PPMCR1
+static void __clk_enable1(struct clk *clk)
{
+ __raw_writeb(clk->slot, MCFPM_PPMCR1);
}
-EXPORT_SYMBOL(clk_put);
-unsigned long clk_get_rate(struct clk *clk)
+static void __clk_disable1(struct clk *clk)
{
- return MCF_CLK;
+ __raw_writeb(clk->slot, MCFPM_PPMSR1);
}
-EXPORT_SYMBOL(clk_get_rate);
-#else
-static DEFINE_SPINLOCK(clk_lock);
+
+struct clk_ops clk_ops1 = {
+ .enable = __clk_enable1,
+ .disable = __clk_disable1,
+};
+#endif /* MCFPM_PPMCR1 */
+#endif /* MCFPM_PPMCR0 */
struct clk *clk_get(struct device *dev, const char *id)
{
@@ -101,48 +122,3 @@ unsigned long clk_get_rate(struct clk *clk)
EXPORT_SYMBOL(clk_get_rate);
/***************************************************************************/
-
-void __clk_init_enabled(struct clk *clk)
-{
- clk->enabled = 1;
- clk->clk_ops->enable(clk);
-}
-
-void __clk_init_disabled(struct clk *clk)
-{
- clk->enabled = 0;
- clk->clk_ops->disable(clk);
-}
-
-static void __clk_enable0(struct clk *clk)
-{
- __raw_writeb(clk->slot, MCFPM_PPMCR0);
-}
-
-static void __clk_disable0(struct clk *clk)
-{
- __raw_writeb(clk->slot, MCFPM_PPMSR0);
-}
-
-struct clk_ops clk_ops0 = {
- .enable = __clk_enable0,
- .disable = __clk_disable0,
-};
-
-#ifdef MCFPM_PPMCR1
-static void __clk_enable1(struct clk *clk)
-{
- __raw_writeb(clk->slot, MCFPM_PPMCR1);
-}
-
-static void __clk_disable1(struct clk *clk)
-{
- __raw_writeb(clk->slot, MCFPM_PPMSR1);
-}
-
-struct clk_ops clk_ops1 = {
- .enable = __clk_enable1,
- .disable = __clk_disable1,
-};
-#endif /* MCFPM_PPMCR1 */
-#endif /* MCFPM_PPMCR0 */
diff --git a/trunk/arch/m68k/platform/coldfire/intc-5249.c b/trunk/arch/m68k/platform/coldfire/intc-5249.c
index 0864b836699a..b0d1641053e4 100644
--- a/trunk/arch/m68k/platform/coldfire/intc-5249.c
+++ b/trunk/arch/m68k/platform/coldfire/intc-5249.c
@@ -21,7 +21,7 @@ static void intc2_irq_gpio_mask(struct irq_data *d)
{
u32 imr;
imr = readl(MCFSIM2_GPIOINTENABLE);
- imr &= ~(0x1 << (d->irq - MCFINTC2_GPIOIRQ0));
+ imr &= ~(0x1 << (d->irq - MCF_IRQ_GPIO0));
writel(imr, MCFSIM2_GPIOINTENABLE);
}
@@ -29,13 +29,13 @@ static void intc2_irq_gpio_unmask(struct irq_data *d)
{
u32 imr;
imr = readl(MCFSIM2_GPIOINTENABLE);
- imr |= (0x1 << (d->irq - MCFINTC2_GPIOIRQ0));
+ imr |= (0x1 << (d->irq - MCF_IRQ_GPIO0));
writel(imr, MCFSIM2_GPIOINTENABLE);
}
static void intc2_irq_gpio_ack(struct irq_data *d)
{
- writel(0x1 << (d->irq - MCFINTC2_GPIOIRQ0), MCFSIM2_GPIOINTCLEAR);
+ writel(0x1 << (d->irq - MCF_IRQ_GPIO0), MCFSIM2_GPIOINTCLEAR);
}
static struct irq_chip intc2_irq_gpio_chip = {
@@ -50,7 +50,7 @@ static int __init mcf_intc2_init(void)
int irq;
/* GPIO interrupt sources */
- for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) {
+ for (irq = MCF_IRQ_GPIO0; (irq <= MCF_IRQ_GPIO7); irq++) {
irq_set_chip(irq, &intc2_irq_gpio_chip);
irq_set_handler(irq, handle_edge_irq);
}
diff --git a/trunk/arch/m68k/platform/coldfire/m5206.c b/trunk/arch/m68k/platform/coldfire/m5206.c
index 6bfbeebd231b..0e55f449a88c 100644
--- a/trunk/arch/m68k/platform/coldfire/m5206.c
+++ b/trunk/arch/m68k/platform/coldfire/m5206.c
@@ -16,6 +16,26 @@
#include
#include
#include
+#include
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcftmr0,
+ &clk_mcftmr1,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ NULL
+};
/***************************************************************************/
diff --git a/trunk/arch/m68k/platform/coldfire/m523x.c b/trunk/arch/m68k/platform/coldfire/m523x.c
index ff37fe9553ea..2b10e9f198cd 100644
--- a/trunk/arch/m68k/platform/coldfire/m523x.c
+++ b/trunk/arch/m68k/platform/coldfire/m523x.c
@@ -19,6 +19,34 @@
#include
#include
#include
+#include
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
+DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
+DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
+DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
+DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcfpit0,
+ &clk_mcfpit1,
+ &clk_mcfpit2,
+ &clk_mcfpit3,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ &clk_mcfuart2,
+ &clk_fec0,
+ NULL
+};
/***************************************************************************/
diff --git a/trunk/arch/m68k/platform/coldfire/m5249.c b/trunk/arch/m68k/platform/coldfire/m5249.c
index 23b19cb7ab50..c80b5e51d29a 100644
--- a/trunk/arch/m68k/platform/coldfire/m5249.c
+++ b/trunk/arch/m68k/platform/coldfire/m5249.c
@@ -16,6 +16,26 @@
#include
#include
#include
+#include
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcftmr0,
+ &clk_mcftmr1,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ NULL
+};
/***************************************************************************/
@@ -28,8 +48,8 @@ static struct resource m5249_smc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = MCFINTC2_GPIOIRQ6,
- .end = MCFINTC2_GPIOIRQ6,
+ .start = MCF_IRQ_GPIO6,
+ .end = MCF_IRQ_GPIO6,
.flags = IORESOURCE_IRQ,
},
};
@@ -75,8 +95,8 @@ static void __init m5249_smc91x_init(void)
gpio = readl(MCFSIM2_GPIOINTENABLE);
writel(gpio | 0x40, MCFSIM2_GPIOINTENABLE);
- gpio = readl(MCFSIM2_INTLEVEL5);
- writel(gpio | 0x04000000, MCFSIM2_INTLEVEL5);
+ gpio = readl(MCFINTC2_INTPRI5);
+ writel(gpio | 0x04000000, MCFINTC2_INTPRI5);
}
#endif /* CONFIG_M5249C3 */
diff --git a/trunk/arch/m68k/platform/coldfire/m525x.c b/trunk/arch/m68k/platform/coldfire/m525x.c
index fce8f8a45bf0..5b9f657b2df0 100644
--- a/trunk/arch/m68k/platform/coldfire/m525x.c
+++ b/trunk/arch/m68k/platform/coldfire/m525x.c
@@ -16,6 +16,26 @@
#include
#include
#include
+#include
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcftmr0,
+ &clk_mcftmr1,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ NULL
+};
/***************************************************************************/
diff --git a/trunk/arch/m68k/platform/coldfire/m5272.c b/trunk/arch/m68k/platform/coldfire/m5272.c
index 45b246d052ef..a8c5856fe5ec 100644
--- a/trunk/arch/m68k/platform/coldfire/m5272.c
+++ b/trunk/arch/m68k/platform/coldfire/m5272.c
@@ -19,6 +19,7 @@
#include
#include
#include
+#include
/***************************************************************************/
@@ -30,6 +31,31 @@ unsigned char ledbank = 0xff;
/***************************************************************************/
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcftmr2, "mcftmr.2", MCF_BUSCLK);
+DEFINE_CLK(mcftmr3, "mcftmr.3", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcftmr0,
+ &clk_mcftmr1,
+ &clk_mcftmr2,
+ &clk_mcftmr3,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ &clk_fec0,
+ NULL
+};
+
+/***************************************************************************/
+
static void __init m5272_uarts_init(void)
{
u32 v;
diff --git a/trunk/arch/m68k/platform/coldfire/m527x.c b/trunk/arch/m68k/platform/coldfire/m527x.c
index 1431ba03c602..6fbfe9096c3e 100644
--- a/trunk/arch/m68k/platform/coldfire/m527x.c
+++ b/trunk/arch/m68k/platform/coldfire/m527x.c
@@ -20,6 +20,36 @@
#include
#include
#include
+#include
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
+DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
+DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
+DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
+DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
+DEFINE_CLK(fec1, "fec.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcfpit0,
+ &clk_mcfpit1,
+ &clk_mcfpit2,
+ &clk_mcfpit3,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ &clk_mcfuart2,
+ &clk_fec0,
+ &clk_fec1,
+ NULL
+};
/***************************************************************************/
diff --git a/trunk/arch/m68k/platform/coldfire/m528x.c b/trunk/arch/m68k/platform/coldfire/m528x.c
index f9f7e6a13d04..83b7dad7a84e 100644
--- a/trunk/arch/m68k/platform/coldfire/m528x.c
+++ b/trunk/arch/m68k/platform/coldfire/m528x.c
@@ -21,6 +21,34 @@
#include
#include
#include
+#include
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
+DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
+DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
+DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
+DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcfpit0,
+ &clk_mcfpit1,
+ &clk_mcfpit2,
+ &clk_mcfpit3,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ &clk_mcfuart2,
+ &clk_fec0,
+ NULL
+};
/***************************************************************************/
diff --git a/trunk/arch/m68k/platform/coldfire/m5307.c b/trunk/arch/m68k/platform/coldfire/m5307.c
index a568d2870d15..887435361386 100644
--- a/trunk/arch/m68k/platform/coldfire/m5307.c
+++ b/trunk/arch/m68k/platform/coldfire/m5307.c
@@ -17,6 +17,7 @@
#include
#include
#include
+#include
/***************************************************************************/
@@ -28,6 +29,25 @@ unsigned char ledbank = 0xff;
/***************************************************************************/
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcftmr0,
+ &clk_mcftmr1,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ NULL
+};
+
+/***************************************************************************/
+
void __init config_BSP(char *commandp, int size)
{
#if defined(CONFIG_NETtel) || \
diff --git a/trunk/arch/m68k/platform/coldfire/m5407.c b/trunk/arch/m68k/platform/coldfire/m5407.c
index bb6c746ae819..2fb3cdbfde30 100644
--- a/trunk/arch/m68k/platform/coldfire/m5407.c
+++ b/trunk/arch/m68k/platform/coldfire/m5407.c
@@ -16,6 +16,26 @@
#include
#include
#include
+#include
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcftmr0,
+ &clk_mcftmr1,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ NULL
+};
/***************************************************************************/
diff --git a/trunk/arch/m68k/platform/coldfire/m54xx.c b/trunk/arch/m68k/platform/coldfire/m54xx.c
index b587bf35175b..952da53aa0bc 100644
--- a/trunk/arch/m68k/platform/coldfire/m54xx.c
+++ b/trunk/arch/m68k/platform/coldfire/m54xx.c
@@ -14,19 +14,45 @@
#include
#include
#include
+#include
#include
#include
#include
#include
#include
#include
+#include
#include
+#include
#ifdef CONFIG_MMU
#include
#endif
/***************************************************************************/
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcfslt0, "mcfslt.0", MCF_BUSCLK);
+DEFINE_CLK(mcfslt1, "mcfslt.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
+DEFINE_CLK(mcfuart3, "mcfuart.3", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &clk_pll,
+ &clk_sys,
+ &clk_mcfslt0,
+ &clk_mcfslt1,
+ &clk_mcfuart0,
+ &clk_mcfuart1,
+ &clk_mcfuart2,
+ &clk_mcfuart3,
+ NULL
+};
+
+/***************************************************************************/
+
static void __init m54xx_uarts_init(void)
{
/* enable io pins */
diff --git a/trunk/arch/sh/mm/Kconfig b/trunk/arch/sh/mm/Kconfig
index cb8f9920f4dd..0f7c852f355c 100644
--- a/trunk/arch/sh/mm/Kconfig
+++ b/trunk/arch/sh/mm/Kconfig
@@ -111,6 +111,7 @@ config VSYSCALL
config NUMA
bool "Non Uniform Memory Access (NUMA) Support"
depends on MMU && SYS_SUPPORTS_NUMA && EXPERIMENTAL
+ select ARCH_WANT_NUMA_VARIABLE_LOCALITY
default n
help
Some SH systems have many various memories scattered around
diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig
index 65a872bf72f9..97f8c5ad8c2d 100644
--- a/trunk/arch/x86/Kconfig
+++ b/trunk/arch/x86/Kconfig
@@ -22,6 +22,8 @@ config X86
def_bool y
select HAVE_AOUT if X86_32
select HAVE_UNSTABLE_SCHED_CLOCK
+ select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_WANTS_PROT_NUMA_PROT_NONE
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_PCSPKR_PLATFORM
diff --git a/trunk/arch/x86/include/asm/efi.h b/trunk/arch/x86/include/asm/efi.h
index fd13815fe85c..6e8fdf5ad113 100644
--- a/trunk/arch/x86/include/asm/efi.h
+++ b/trunk/arch/x86/include/asm/efi.h
@@ -69,37 +69,23 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
efi_call6((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
(u64)(a4), (u64)(a5), (u64)(a6))
-extern unsigned long efi_call_virt_prelog(void);
-extern void efi_call_virt_epilog(unsigned long);
-
-#define efi_callx(x, func, ...) \
- ({ \
- efi_status_t __status; \
- unsigned long __pgd; \
- \
- __pgd = efi_call_virt_prelog(); \
- __status = efi_call##x(func, __VA_ARGS__); \
- efi_call_virt_epilog(__pgd); \
- __status; \
- })
-
#define efi_call_virt0(f) \
- efi_callx(0, (void *)(efi.systab->runtime->f))
+ efi_call0((void *)(efi.systab->runtime->f))
#define efi_call_virt1(f, a1) \
- efi_callx(1, (void *)(efi.systab->runtime->f), (u64)(a1))
+ efi_call1((void *)(efi.systab->runtime->f), (u64)(a1))
#define efi_call_virt2(f, a1, a2) \
- efi_callx(2, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2))
+ efi_call2((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2))
#define efi_call_virt3(f, a1, a2, a3) \
- efi_callx(3, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
+ efi_call3((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3))
#define efi_call_virt4(f, a1, a2, a3, a4) \
- efi_callx(4, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
+ efi_call4((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4))
#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
- efi_callx(5, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
+ efi_call5((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5))
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
- efi_callx(6, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
+ efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
diff --git a/trunk/arch/x86/include/asm/pgtable.h b/trunk/arch/x86/include/asm/pgtable.h
index a1f780d45f76..5199db2923d3 100644
--- a/trunk/arch/x86/include/asm/pgtable.h
+++ b/trunk/arch/x86/include/asm/pgtable.h
@@ -404,7 +404,14 @@ static inline int pte_same(pte_t a, pte_t b)
static inline int pte_present(pte_t a)
{
- return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
+ return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
+ _PAGE_NUMA);
+}
+
+#define pte_accessible pte_accessible
+static inline int pte_accessible(pte_t a)
+{
+ return pte_flags(a) & _PAGE_PRESENT;
}
static inline int pte_hidden(pte_t pte)
@@ -420,7 +427,8 @@ static inline int pmd_present(pmd_t pmd)
* the _PAGE_PSE flag will remain set at all times while the
* _PAGE_PRESENT bit is clear).
*/
- return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
+ return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
+ _PAGE_NUMA);
}
static inline int pmd_none(pmd_t pmd)
@@ -479,6 +487,11 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
static inline int pmd_bad(pmd_t pmd)
{
+#ifdef CONFIG_NUMA_BALANCING
+ /* pmd_numa check */
+ if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
+ return 0;
+#endif
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
}
diff --git a/trunk/arch/x86/include/asm/pgtable_types.h b/trunk/arch/x86/include/asm/pgtable_types.h
index ec8a1fc9505d..3c32db8c539d 100644
--- a/trunk/arch/x86/include/asm/pgtable_types.h
+++ b/trunk/arch/x86/include/asm/pgtable_types.h
@@ -64,6 +64,26 @@
#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
+/*
+ * _PAGE_NUMA indicates that this page will trigger a numa hinting
+ * minor page fault to gather numa placement statistics (see
+ * pte_numa()). The bit picked (8) is within the range between
+ * _PAGE_FILE (6) and _PAGE_PROTNONE (8) bits. Therefore, it doesn't
+ * require changes to the swp entry format because that bit is always
+ * zero when the pte is not present.
+ *
+ * The bit picked must be always zero when the pmd is present and not
+ * present, so that we don't lose information when we set it while
+ * atomically clearing the present bit.
+ *
+ * Because we shared the same bit (8) with _PAGE_PROTNONE this can be
+ * interpreted as _PAGE_NUMA only in places that _PAGE_PROTNONE
+ * couldn't reach, like handle_mm_fault() (see access_error in
+ * arch/x86/mm/fault.c, the vma protection must not be PROT_NONE for
+ * handle_mm_fault() to be invoked).
+ */
+#define _PAGE_NUMA _PAGE_PROTNONE
+
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
_PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
diff --git a/trunk/arch/x86/kernel/tboot.c b/trunk/arch/x86/kernel/tboot.c
index d4f460f962ee..f84fe00fad48 100644
--- a/trunk/arch/x86/kernel/tboot.c
+++ b/trunk/arch/x86/kernel/tboot.c
@@ -103,13 +103,71 @@ void __init tboot_probe(void)
pr_debug("tboot_size: 0x%x\n", tboot->tboot_size);
}
+static pgd_t *tboot_pg_dir;
+static struct mm_struct tboot_mm = {
+ .mm_rb = RB_ROOT,
+ .pgd = swapper_pg_dir,
+ .mm_users = ATOMIC_INIT(2),
+ .mm_count = ATOMIC_INIT(1),
+ .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
+ .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
+ .mmlist = LIST_HEAD_INIT(init_mm.mmlist),
+};
+
static inline void switch_to_tboot_pt(void)
{
-#ifdef CONFIG_X86_32
- load_cr3(initial_page_table);
-#else
- write_cr3(real_mode_header->trampoline_pgd);
-#endif
+ write_cr3(virt_to_phys(tboot_pg_dir));
+}
+
+static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
+ pgprot_t prot)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = pgd_offset(&tboot_mm, vaddr);
+ pud = pud_alloc(&tboot_mm, pgd, vaddr);
+ if (!pud)
+ return -1;
+ pmd = pmd_alloc(&tboot_mm, pud, vaddr);
+ if (!pmd)
+ return -1;
+ pte = pte_alloc_map(&tboot_mm, NULL, pmd, vaddr);
+ if (!pte)
+ return -1;
+ set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
+ pte_unmap(pte);
+ return 0;
+}
+
+static int map_tboot_pages(unsigned long vaddr, unsigned long start_pfn,
+ unsigned long nr)
+{
+ /* Reuse the original kernel mapping */
+ tboot_pg_dir = pgd_alloc(&tboot_mm);
+ if (!tboot_pg_dir)
+ return -1;
+
+ for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) {
+ if (map_tboot_page(vaddr, start_pfn, PAGE_KERNEL_EXEC))
+ return -1;
+ }
+
+ return 0;
+}
+
+static void tboot_create_trampoline(void)
+{
+ u32 map_base, map_size;
+
+ /* Create identity map for tboot shutdown code. */
+ map_base = PFN_DOWN(tboot->tboot_base);
+ map_size = PFN_UP(tboot->tboot_size);
+ if (map_tboot_pages(map_base << PAGE_SHIFT, map_base, map_size))
+ panic("tboot: Error mapping tboot pages (mfns) @ 0x%x, 0x%x\n",
+ map_base, map_size);
}
#ifdef CONFIG_ACPI_SLEEP
@@ -167,6 +225,14 @@ void tboot_shutdown(u32 shutdown_type)
if (!tboot_enabled())
return;
+ /*
+ * if we're being called before the 1:1 mapping is set up then just
+ * return and let the normal shutdown happen; this should only be
+ * due to very early panic()
+ */
+ if (!tboot_pg_dir)
+ return;
+
/* if this is S3 then set regions to MAC */
if (shutdown_type == TB_SHUTDOWN_S3)
if (tboot_setup_sleep())
@@ -277,6 +343,8 @@ static __init int tboot_late_init(void)
if (!tboot_enabled())
return 0;
+ tboot_create_trampoline();
+
atomic_set(&ap_wfs_count, 0);
register_hotcpu_notifier(&tboot_cpu_notifier);
diff --git a/trunk/arch/x86/kernel/vsyscall_64.c b/trunk/arch/x86/kernel/vsyscall_64.c
index 3a3e8c9e280d..9a907a67be8f 100644
--- a/trunk/arch/x86/kernel/vsyscall_64.c
+++ b/trunk/arch/x86/kernel/vsyscall_64.c
@@ -145,19 +145,6 @@ static int addr_to_vsyscall_nr(unsigned long addr)
return nr;
}
-#ifdef CONFIG_SECCOMP
-static int vsyscall_seccomp(struct task_struct *tsk, int syscall_nr)
-{
- if (!seccomp_mode(&tsk->seccomp))
- return 0;
- task_pt_regs(tsk)->orig_ax = syscall_nr;
- task_pt_regs(tsk)->ax = syscall_nr;
- return __secure_computing(syscall_nr);
-}
-#else
-#define vsyscall_seccomp(_tsk, _nr) 0
-#endif
-
static bool write_ok_or_segv(unsigned long ptr, size_t size)
{
/*
@@ -190,10 +177,9 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
{
struct task_struct *tsk;
unsigned long caller;
- int vsyscall_nr;
+ int vsyscall_nr, syscall_nr, tmp;
int prev_sig_on_uaccess_error;
long ret;
- int skip;
/*
* No point in checking CS -- the only way to get here is a user mode
@@ -225,56 +211,84 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
}
tsk = current;
- /*
- * With a real vsyscall, page faults cause SIGSEGV. We want to
- * preserve that behavior to make writing exploits harder.
- */
- prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
- current_thread_info()->sig_on_uaccess_error = 1;
/*
+ * Check for access_ok violations and find the syscall nr.
+ *
* NULL is a valid user pointer (in the access_ok sense) on 32-bit and
* 64-bit, so we don't need to special-case it here. For all the
* vsyscalls, NULL means "don't write anything" not "write it at
* address 0".
*/
- ret = -EFAULT;
- skip = 0;
switch (vsyscall_nr) {
case 0:
- skip = vsyscall_seccomp(tsk, __NR_gettimeofday);
- if (skip)
- break;
-
if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
- !write_ok_or_segv(regs->si, sizeof(struct timezone)))
- break;
+ !write_ok_or_segv(regs->si, sizeof(struct timezone))) {
+ ret = -EFAULT;
+ goto check_fault;
+ }
+
+ syscall_nr = __NR_gettimeofday;
+ break;
+
+ case 1:
+ if (!write_ok_or_segv(regs->di, sizeof(time_t))) {
+ ret = -EFAULT;
+ goto check_fault;
+ }
+
+ syscall_nr = __NR_time;
+ break;
+
+ case 2:
+ if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
+ !write_ok_or_segv(regs->si, sizeof(unsigned))) {
+ ret = -EFAULT;
+ goto check_fault;
+ }
+
+ syscall_nr = __NR_getcpu;
+ break;
+ }
+
+ /*
+ * Handle seccomp. regs->ip must be the original value.
+ * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt.
+ *
+ * We could optimize the seccomp disabled case, but performance
+ * here doesn't matter.
+ */
+ regs->orig_ax = syscall_nr;
+ regs->ax = -ENOSYS;
+ tmp = secure_computing(syscall_nr);
+ if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
+ warn_bad_vsyscall(KERN_DEBUG, regs,
+ "seccomp tried to change syscall nr or ip");
+ do_exit(SIGSYS);
+ }
+ if (tmp)
+ goto do_ret; /* skip requested */
+ /*
+ * With a real vsyscall, page faults cause SIGSEGV. We want to
+ * preserve that behavior to make writing exploits harder.
+ */
+ prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
+ current_thread_info()->sig_on_uaccess_error = 1;
+
+ ret = -EFAULT;
+ switch (vsyscall_nr) {
+ case 0:
ret = sys_gettimeofday(
(struct timeval __user *)regs->di,
(struct timezone __user *)regs->si);
break;
case 1:
- skip = vsyscall_seccomp(tsk, __NR_time);
- if (skip)
- break;
-
- if (!write_ok_or_segv(regs->di, sizeof(time_t)))
- break;
-
ret = sys_time((time_t __user *)regs->di);
break;
case 2:
- skip = vsyscall_seccomp(tsk, __NR_getcpu);
- if (skip)
- break;
-
- if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
- !write_ok_or_segv(regs->si, sizeof(unsigned)))
- break;
-
ret = sys_getcpu((unsigned __user *)regs->di,
(unsigned __user *)regs->si,
NULL);
@@ -283,12 +297,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
- if (skip) {
- if ((long)regs->ax <= 0L) /* seccomp errno emulation */
- goto do_ret;
- goto done; /* seccomp trace/trap */
- }
-
+check_fault:
if (ret == -EFAULT) {
/* Bad news -- userspace fed a bad pointer to a vsyscall. */
warn_bad_vsyscall(KERN_INFO, regs,
@@ -311,7 +320,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
/* Emulate a ret instruction. */
regs->ip = caller;
regs->sp += 8;
-done:
return true;
sigsegv:
diff --git a/trunk/arch/x86/mm/pageattr.c b/trunk/arch/x86/mm/pageattr.c
index 931930a96160..a718e0d23503 100644
--- a/trunk/arch/x86/mm/pageattr.c
+++ b/trunk/arch/x86/mm/pageattr.c
@@ -919,13 +919,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
/*
* On success we use clflush, when the CPU supports it to
- * avoid the wbindv. If the CPU does not support it, in the
- * error case, and during early boot (for EFI) we fall back
- * to cpa_flush_all (which uses wbinvd):
+ * avoid the wbindv. If the CPU does not support it and in the
+ * error case we fall back to cpa_flush_all (which uses
+ * wbindv):
*/
- if (early_boot_irqs_disabled)
- __cpa_flush_all((void *)(long)cache);
- else if (!ret && cpu_has_clflush) {
+ if (!ret && cpu_has_clflush) {
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
cpa_flush_array(addr, numpages, cache,
cpa.flags, pages);
diff --git a/trunk/arch/x86/mm/pgtable.c b/trunk/arch/x86/mm/pgtable.c
index 217eb705fac0..e27fbf887f3b 100644
--- a/trunk/arch/x86/mm/pgtable.c
+++ b/trunk/arch/x86/mm/pgtable.c
@@ -301,6 +301,13 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_page((unsigned long)pgd);
}
+/*
+ * Used to set accessed or dirty bits in the page table entries
+ * on other architectures. On x86, the accessed and dirty bits
+ * are tracked by hardware. However, do_wp_page calls this function
+ * to also make the pte writeable at the same time the dirty bit is
+ * set. In that case we do actually need to write the PTE.
+ */
int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
@@ -310,7 +317,6 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
if (changed && dirty) {
*ptep = entry;
pte_update_defer(vma->vm_mm, address, ptep);
- flush_tlb_page(vma, address);
}
return changed;
diff --git a/trunk/arch/x86/platform/efi/efi.c b/trunk/arch/x86/platform/efi/efi.c
index 0a34d9e9c263..ad4439145f85 100644
--- a/trunk/arch/x86/platform/efi/efi.c
+++ b/trunk/arch/x86/platform/efi/efi.c
@@ -239,7 +239,22 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
return status;
}
-static int efi_set_rtc_mmss(unsigned long nowtime)
+static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
+ efi_time_cap_t *tc)
+{
+ unsigned long flags;
+ efi_status_t status;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ efi_call_phys_prelog();
+ status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm),
+ virt_to_phys(tc));
+ efi_call_phys_epilog();
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return status;
+}
+
+int efi_set_rtc_mmss(unsigned long nowtime)
{
int real_seconds, real_minutes;
efi_status_t status;
@@ -268,7 +283,7 @@ static int efi_set_rtc_mmss(unsigned long nowtime)
return 0;
}
-static unsigned long efi_get_time(void)
+unsigned long efi_get_time(void)
{
efi_status_t status;
efi_time_t eft;
@@ -624,13 +639,18 @@ static int __init efi_runtime_init(void)
}
/*
* We will only need *early* access to the following
- * EFI runtime service before set_virtual_address_map
+ * two EFI runtime services before set_virtual_address_map
* is invoked.
*/
+ efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
efi_phys.set_virtual_address_map =
(efi_set_virtual_address_map_t *)
runtime->set_virtual_address_map;
-
+ /*
+ * Make efi_get_time can be called before entering
+ * virtual mode.
+ */
+ efi.get_time = phys_efi_get_time;
early_iounmap(runtime, sizeof(efi_runtime_services_t));
return 0;
@@ -716,10 +736,12 @@ void __init efi_init(void)
efi_enabled = 0;
return;
}
+#ifdef CONFIG_X86_32
if (efi_is_native()) {
x86_platform.get_wallclock = efi_get_time;
x86_platform.set_wallclock = efi_set_rtc_mmss;
}
+#endif
#if EFI_DEBUG
print_efi_memmap();
diff --git a/trunk/arch/x86/platform/efi/efi_64.c b/trunk/arch/x86/platform/efi/efi_64.c
index 06c8b2e662ab..95fd505dfeb6 100644
--- a/trunk/arch/x86/platform/efi/efi_64.c
+++ b/trunk/arch/x86/platform/efi/efi_64.c
@@ -58,21 +58,6 @@ static void __init early_code_mapping_set_exec(int executable)
}
}
-unsigned long efi_call_virt_prelog(void)
-{
- unsigned long saved;
-
- saved = read_cr3();
- write_cr3(real_mode_header->trampoline_pgd);
-
- return saved;
-}
-
-void efi_call_virt_epilog(unsigned long saved)
-{
- write_cr3(saved);
-}
-
void __init efi_call_phys_prelog(void)
{
unsigned long vaddress;
diff --git a/trunk/block/blk-cgroup.c b/trunk/block/blk-cgroup.c
index 3f6d39d23bb6..b8858fb0cafa 100644
--- a/trunk/block/blk-cgroup.c
+++ b/trunk/block/blk-cgroup.c
@@ -231,7 +231,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
- return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
+ return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
return __blkg_lookup_create(blkcg, q, NULL);
}
EXPORT_SYMBOL_GPL(blkg_lookup_create);
diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c
index 3c95c4d6e31a..c973249d68cd 100644
--- a/trunk/block/blk-core.c
+++ b/trunk/block/blk-core.c
@@ -40,6 +40,7 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
DEFINE_IDA(blk_queue_ida);
@@ -219,12 +220,13 @@ static void blk_delay_work(struct work_struct *work)
* Description:
* Sometimes queueing needs to be postponed for a little while, to allow
* resources to come back. This function will make sure that queueing is
- * restarted around the specified time.
+ * restarted around the specified time. Queue lock must be held.
*/
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
- queue_delayed_work(kblockd_workqueue, &q->delay_work,
- msecs_to_jiffies(msecs));
+ if (likely(!blk_queue_dead(q)))
+ queue_delayed_work(kblockd_workqueue, &q->delay_work,
+ msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_delay_queue);
@@ -292,6 +294,34 @@ void blk_sync_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_sync_queue);
+/**
+ * __blk_run_queue_uncond - run a queue whether or not it has been stopped
+ * @q: The queue to run
+ *
+ * Description:
+ * Invoke request handling on a queue if there are any pending requests.
+ * May be used to restart request handling after a request has completed.
+ * This variant runs the queue whether or not the queue has been
+ * stopped. Must be called with the queue lock held and interrupts
+ * disabled. See also @blk_run_queue.
+ */
+inline void __blk_run_queue_uncond(struct request_queue *q)
+{
+ if (unlikely(blk_queue_dead(q)))
+ return;
+
+ /*
+ * Some request_fn implementations, e.g. scsi_request_fn(), unlock
+ * the queue lock internally. As a result multiple threads may be
+ * running such a request function concurrently. Keep track of the
+ * number of active request_fn invocations such that blk_drain_queue()
+ * can wait until all these request_fn calls have finished.
+ */
+ q->request_fn_active++;
+ q->request_fn(q);
+ q->request_fn_active--;
+}
+
/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
@@ -305,7 +335,7 @@ void __blk_run_queue(struct request_queue *q)
if (unlikely(blk_queue_stopped(q)))
return;
- q->request_fn(q);
+ __blk_run_queue_uncond(q);
}
EXPORT_SYMBOL(__blk_run_queue);
@@ -315,11 +345,11 @@ EXPORT_SYMBOL(__blk_run_queue);
*
* Description:
* Tells kblockd to perform the equivalent of @blk_run_queue on behalf
- * of us.
+ * of us. The caller must hold the queue lock.
*/
void blk_run_queue_async(struct request_queue *q)
{
- if (likely(!blk_queue_stopped(q)))
+ if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
EXPORT_SYMBOL(blk_run_queue_async);
@@ -349,7 +379,7 @@ void blk_put_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_put_queue);
/**
- * blk_drain_queue - drain requests from request_queue
+ * __blk_drain_queue - drain requests from request_queue
* @q: queue to drain
* @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
*
@@ -357,15 +387,17 @@ EXPORT_SYMBOL(blk_put_queue);
* If not, only ELVPRIV requests are drained. The caller is responsible
* for ensuring that no new requests which need to be drained are queued.
*/
-void blk_drain_queue(struct request_queue *q, bool drain_all)
+static void __blk_drain_queue(struct request_queue *q, bool drain_all)
+ __releases(q->queue_lock)
+ __acquires(q->queue_lock)
{
int i;
+ lockdep_assert_held(q->queue_lock);
+
while (true) {
bool drain = false;
- spin_lock_irq(q->queue_lock);
-
/*
* The caller might be trying to drain @q before its
* elevator is initialized.
@@ -386,6 +418,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
__blk_run_queue(q);
drain |= q->nr_rqs_elvpriv;
+ drain |= q->request_fn_active;
/*
* Unfortunately, requests are queued at and tracked from
@@ -401,11 +434,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
}
}
- spin_unlock_irq(q->queue_lock);
-
if (!drain)
break;
+
+ spin_unlock_irq(q->queue_lock);
+
msleep(10);
+
+ spin_lock_irq(q->queue_lock);
}
/*
@@ -416,13 +452,9 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (q->request_fn) {
struct request_list *rl;
- spin_lock_irq(q->queue_lock);
-
blk_queue_for_each_rl(rl, q)
for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
wake_up_all(&rl->wait[i]);
-
- spin_unlock_irq(q->queue_lock);
}
}
@@ -446,7 +478,10 @@ void blk_queue_bypass_start(struct request_queue *q)
spin_unlock_irq(q->queue_lock);
if (drain) {
- blk_drain_queue(q, false);
+ spin_lock_irq(q->queue_lock);
+ __blk_drain_queue(q, false);
+ spin_unlock_irq(q->queue_lock);
+
/* ensure blk_queue_bypass() is %true inside RCU read lock */
synchronize_rcu();
}
@@ -473,20 +508,20 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
- * Mark @q DEAD, drain all pending requests, destroy and put it. All
- * future requests will be failed immediately with -ENODEV.
+ * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
+ * put it. All future requests will be failed immediately with -ENODEV.
*/
void blk_cleanup_queue(struct request_queue *q)
{
spinlock_t *lock = q->queue_lock;
- /* mark @q DEAD, no new request or merges will be allowed afterwards */
+ /* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
- queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
+ queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
spin_lock_irq(lock);
/*
- * Dead queue is permanently in bypass mode till released. Note
+ * A dying queue is permanently in bypass mode till released. Note
* that, unlike blk_queue_bypass_start(), we aren't performing
* synchronize_rcu() after entering bypass mode to avoid the delay
* as some drivers create and destroy a lot of queues while
@@ -499,12 +534,18 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
- queue_flag_set(QUEUE_FLAG_DEAD, q);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);
- /* drain all requests queued before DEAD marking */
- blk_drain_queue(q, true);
+ /*
+ * Drain all requests queued before DYING marking. Set DEAD flag to
+ * prevent that q->request_fn() gets invoked after draining finished.
+ */
+ spin_lock_irq(lock);
+ __blk_drain_queue(q, true);
+ queue_flag_set(QUEUE_FLAG_DEAD, q);
+ spin_unlock_irq(lock);
/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
@@ -549,7 +590,7 @@ void blk_exit_rl(struct request_list *rl)
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{
- return blk_alloc_queue_node(gfp_mask, -1);
+ return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
}
EXPORT_SYMBOL(blk_alloc_queue);
@@ -660,7 +701,7 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
{
- return blk_init_queue_node(rfn, lock, -1);
+ return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
}
EXPORT_SYMBOL(blk_init_queue);
@@ -716,7 +757,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
bool blk_get_queue(struct request_queue *q)
{
- if (likely(!blk_queue_dead(q))) {
+ if (likely(!blk_queue_dying(q))) {
__blk_get_queue(q);
return true;
}
@@ -870,7 +911,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
const bool is_sync = rw_is_sync(rw_flags) != 0;
int may_queue;
- if (unlikely(blk_queue_dead(q)))
+ if (unlikely(blk_queue_dying(q)))
return NULL;
may_queue = elv_may_queue(q, rw_flags);
@@ -1050,7 +1091,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
if (rq)
return rq;
- if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) {
+ if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
blk_put_rl(rl);
return NULL;
}
@@ -1910,7 +1951,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
return -EIO;
spin_lock_irqsave(q->queue_lock, flags);
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
spin_unlock_irqrestore(q->queue_lock, flags);
return -ENODEV;
}
@@ -2884,27 +2925,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
{
trace_block_unplug(q, depth, !from_schedule);
- /*
- * Don't mess with dead queue.
- */
- if (unlikely(blk_queue_dead(q))) {
- spin_unlock(q->queue_lock);
- return;
- }
-
- /*
- * If we are punting this to kblockd, then we can safely drop
- * the queue_lock before waking kblockd (which needs to take
- * this lock).
- */
- if (from_schedule) {
- spin_unlock(q->queue_lock);
+ if (from_schedule)
blk_run_queue_async(q);
- } else {
+ else
__blk_run_queue(q);
- spin_unlock(q->queue_lock);
- }
-
+ spin_unlock(q->queue_lock);
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
@@ -2996,7 +3021,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/*
* Short-circuit if @q is dead
*/
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
__blk_end_request_all(rq, -ENODEV);
continue;
}
diff --git a/trunk/block/blk-exec.c b/trunk/block/blk-exec.c
index f71eac35c1b9..74638ec234c8 100644
--- a/trunk/block/blk-exec.c
+++ b/trunk/block/blk-exec.c
@@ -66,7 +66,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
spin_lock_irq(q->queue_lock);
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
rq->errors = -ENXIO;
if (rq->end_io)
rq->end_io(rq, rq->errors);
@@ -78,7 +78,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
__blk_run_queue(q);
/* the queue is stopped so it won't be run */
if (is_pm_resume)
- q->request_fn(q);
+ __blk_run_queue_uncond(q);
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
diff --git a/trunk/block/blk-lib.c b/trunk/block/blk-lib.c
index 9373b58dfab1..b3a1f2b70b31 100644
--- a/trunk/block/blk-lib.c
+++ b/trunk/block/blk-lib.c
@@ -43,11 +43,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD;
- unsigned int max_discard_sectors;
- unsigned int granularity, alignment, mask;
+ sector_t max_discard_sectors;
+ sector_t granularity, alignment;
struct bio_batch bb;
struct bio *bio;
int ret = 0;
+ struct blk_plug plug;
if (!q)
return -ENXIO;
@@ -57,15 +58,16 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
- mask = granularity - 1;
- alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
+ alignment = bdev_discard_alignment(bdev) >> 9;
+ alignment = sector_div(alignment, granularity);
/*
* Ensure that max_discard_sectors is of the proper
* granularity, so that requests stay aligned after a split.
*/
max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
- max_discard_sectors = round_down(max_discard_sectors, granularity);
+ sector_div(max_discard_sectors, granularity);
+ max_discard_sectors *= granularity;
if (unlikely(!max_discard_sectors)) {
/* Avoid infinite loop below. Being cautious never hurts. */
return -EOPNOTSUPP;
@@ -81,9 +83,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bb.flags = 1 << BIO_UPTODATE;
bb.wait = &wait;
+ blk_start_plug(&plug);
while (nr_sects) {
unsigned int req_sects;
- sector_t end_sect;
+ sector_t end_sect, tmp;
bio = bio_alloc(gfp_mask, 1);
if (!bio) {
@@ -98,10 +101,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
* misaligned, stop the discard at the previous aligned sector.
*/
end_sect = sector + req_sects;
- if (req_sects < nr_sects && (end_sect & mask) != alignment) {
- end_sect =
- round_down(end_sect - alignment, granularity)
- + alignment;
+ tmp = end_sect;
+ if (req_sects < nr_sects &&
+ sector_div(tmp, granularity) != alignment) {
+ end_sect = end_sect - alignment;
+ sector_div(end_sect, granularity);
+ end_sect = end_sect * granularity + alignment;
req_sects = end_sect - sector;
}
@@ -117,6 +122,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
atomic_inc(&bb.done);
submit_bio(type, bio);
}
+ blk_finish_plug(&plug);
/* Wait for bios in-flight */
if (!atomic_dec_and_test(&bb.done))
diff --git a/trunk/block/blk-settings.c b/trunk/block/blk-settings.c
index 779bb7646bcd..c50ecf0ea3b1 100644
--- a/trunk/block/blk-settings.c
+++ b/trunk/block/blk-settings.c
@@ -611,7 +611,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
bottom = b->discard_granularity + alignment;
/* Verify that top and bottom intervals line up */
- if (max(top, bottom) & (min(top, bottom) - 1))
+ if ((max(top, bottom) % min(top, bottom)) != 0)
t->discard_misaligned = 1;
}
@@ -619,8 +619,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->max_discard_sectors);
t->discard_granularity = max(t->discard_granularity,
b->discard_granularity);
- t->discard_alignment = lcm(t->discard_alignment, alignment) &
- (t->discard_granularity - 1);
+ t->discard_alignment = lcm(t->discard_alignment, alignment) %
+ t->discard_granularity;
}
return ret;
diff --git a/trunk/block/blk-sysfs.c b/trunk/block/blk-sysfs.c
index ce6204608822..788147797a79 100644
--- a/trunk/block/blk-sysfs.c
+++ b/trunk/block/blk-sysfs.c
@@ -466,7 +466,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
mutex_lock(&q->sysfs_lock);
- if (blk_queue_dead(q)) {
+ if (blk_queue_dying(q)) {
mutex_unlock(&q->sysfs_lock);
return -ENOENT;
}
@@ -488,7 +488,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock);
- if (blk_queue_dead(q)) {
+ if (blk_queue_dying(q)) {
mutex_unlock(&q->sysfs_lock);
return -ENOENT;
}
diff --git a/trunk/block/blk-throttle.c b/trunk/block/blk-throttle.c
index a9664fa0b609..31146225f3d0 100644
--- a/trunk/block/blk-throttle.c
+++ b/trunk/block/blk-throttle.c
@@ -302,7 +302,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
/* if %NULL and @q is alive, fall back to root_tg */
if (!IS_ERR(blkg))
tg = blkg_to_tg(blkg);
- else if (!blk_queue_dead(q))
+ else if (!blk_queue_dying(q))
tg = td_root_tg(td);
}
diff --git a/trunk/block/blk.h b/trunk/block/blk.h
index ca51543b248c..47fdfdd41520 100644
--- a/trunk/block/blk.h
+++ b/trunk/block/blk.h
@@ -96,7 +96,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
q->flush_queue_delayed = 1;
return NULL;
}
- if (unlikely(blk_queue_dead(q)) ||
+ if (unlikely(blk_queue_dying(q)) ||
!q->elevator->type->ops.elevator_dispatch_fn(q, 0))
return NULL;
}
@@ -145,6 +145,8 @@ int blk_try_merge(struct request *rq, struct bio *bio);
void blk_queue_congestion_threshold(struct request_queue *q);
+void __blk_run_queue_uncond(struct request_queue *q);
+
int blk_dev_init(void);
diff --git a/trunk/block/bsg-lib.c b/trunk/block/bsg-lib.c
index deee61fbb741..650f427d915b 100644
--- a/trunk/block/bsg-lib.c
+++ b/trunk/block/bsg-lib.c
@@ -151,19 +151,6 @@ static int bsg_create_job(struct device *dev, struct request *req)
return -ENOMEM;
}
-/*
- * bsg_goose_queue - restart queue in case it was stopped
- * @q: request q to be restarted
- */
-void bsg_goose_queue(struct request_queue *q)
-{
- if (!q)
- return;
-
- blk_run_queue_async(q);
-}
-EXPORT_SYMBOL_GPL(bsg_goose_queue);
-
/**
* bsg_request_fn - generic handler for bsg requests
* @q: request queue to manage
diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c
index fb52df9744f5..e62e9205b80a 100644
--- a/trunk/block/cfq-iosched.c
+++ b/trunk/block/cfq-iosched.c
@@ -1973,7 +1973,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
* reposition in fifo if next is older than rq
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
- time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
+ time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
+ cfqq == RQ_CFQQ(next)) {
list_move(&rq->queuelist, &next->queuelist);
rq_set_fifo_time(rq, rq_fifo_time(next));
}
diff --git a/trunk/block/deadline-iosched.c b/trunk/block/deadline-iosched.c
index 599b12e5380f..90037b5eb17f 100644
--- a/trunk/block/deadline-iosched.c
+++ b/trunk/block/deadline-iosched.c
@@ -230,7 +230,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
/*
* rq is expired!
*/
- if (time_after(jiffies, rq_fifo_time(rq)))
+ if (time_after_eq(jiffies, rq_fifo_time(rq)))
return 1;
return 0;
diff --git a/trunk/block/elevator.c b/trunk/block/elevator.c
index 9b1d42b62f20..9edba1b8323e 100644
--- a/trunk/block/elevator.c
+++ b/trunk/block/elevator.c
@@ -458,6 +458,7 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
struct request *rq)
{
struct request *__rq;
+ bool ret;
if (blk_queue_nomerges(q))
return false;
@@ -471,14 +472,21 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
if (blk_queue_noxmerges(q))
return false;
+ ret = false;
/*
* See if our hash lookup can find a potential backmerge.
*/
- __rq = elv_rqhash_find(q, blk_rq_pos(rq));
- if (__rq && blk_attempt_req_merge(q, __rq, rq))
- return true;
+ while (1) {
+ __rq = elv_rqhash_find(q, blk_rq_pos(rq));
+ if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
+ break;
- return false;
+ /* The merged request could be merged with others, try again */
+ ret = true;
+ rq = __rq;
+ }
+
+ return ret;
}
void elv_merged_request(struct request_queue *q, struct request *rq, int type)
diff --git a/trunk/block/genhd.c b/trunk/block/genhd.c
index 6cace663a80e..2a6fdf539a69 100644
--- a/trunk/block/genhd.c
+++ b/trunk/block/genhd.c
@@ -1245,7 +1245,7 @@ EXPORT_SYMBOL(blk_lookup_devt);
struct gendisk *alloc_disk(int minors)
{
- return alloc_disk_node(minors, -1);
+ return alloc_disk_node(minors, NUMA_NO_NODE);
}
EXPORT_SYMBOL(alloc_disk);
diff --git a/trunk/block/partitions/Kconfig b/trunk/block/partitions/Kconfig
index cb5f0a3f1b03..75a54e1adbb5 100644
--- a/trunk/block/partitions/Kconfig
+++ b/trunk/block/partitions/Kconfig
@@ -234,8 +234,8 @@ config KARMA_PARTITION
uses a proprietary partition table.
config EFI_PARTITION
- bool "EFI GUID Partition support"
- depends on PARTITION_ADVANCED
+ bool "EFI GUID Partition support" if PARTITION_ADVANCED
+ default y
select CRC32
help
Say Y here if you would like to use hard disks under Linux which
diff --git a/trunk/drivers/amba/tegra-ahb.c b/trunk/drivers/amba/tegra-ahb.c
index bd5de08ad6fd..0576a7dd32a5 100644
--- a/trunk/drivers/amba/tegra-ahb.c
+++ b/trunk/drivers/amba/tegra-ahb.c
@@ -157,6 +157,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
EXPORT_SYMBOL(tegra_ahb_enable_smmu);
#endif
+#ifdef CONFIG_PM_SLEEP
static int tegra_ahb_suspend(struct device *dev)
{
int i;
@@ -176,6 +177,7 @@ static int tegra_ahb_resume(struct device *dev)
gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
return 0;
}
+#endif
static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
tegra_ahb_suspend,
diff --git a/trunk/drivers/bus/Kconfig b/trunk/drivers/bus/Kconfig
index bbec35d21fe5..0f51ed687dc8 100644
--- a/trunk/drivers/bus/Kconfig
+++ b/trunk/drivers/bus/Kconfig
@@ -6,6 +6,7 @@ menu "Bus devices"
config OMAP_OCP2SCP
tristate "OMAP OCP2SCP DRIVER"
+ depends on ARCH_OMAP2PLUS
help
Driver to enable ocp2scp module which transforms ocp interface
protocol to scp protocol. In OMAP4, USB PHY is connected via
diff --git a/trunk/drivers/char/agp/intel-agp.h b/trunk/drivers/char/agp/intel-agp.h
index 6ec0fff79bc2..1042c1b90376 100644
--- a/trunk/drivers/char/agp/intel-agp.h
+++ b/trunk/drivers/char/agp/intel-agp.h
@@ -62,12 +62,6 @@
#define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006
-/* GT PTE cache control fields */
-#define GEN6_PTE_UNCACHED 0x00000002
-#define HSW_PTE_UNCACHED 0x00000000
-#define GEN6_PTE_LLC 0x00000004
-#define GEN6_PTE_LLC_MLC 0x00000006
-#define GEN6_PTE_GFDT 0x00000008
#define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000
@@ -97,7 +91,6 @@
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
-#define GFX_FLSH_CNTL_VLV 0x101008
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
@@ -148,29 +141,6 @@
#define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
-#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
-#define SNB_GTT_SIZE_0M (0 << 8)
-#define SNB_GTT_SIZE_1M (1 << 8)
-#define SNB_GTT_SIZE_2M (2 << 8)
-#define SNB_GTT_SIZE_MASK (3 << 8)
-
/* pci devices ids */
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
@@ -219,66 +189,5 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
-#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
-#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
-#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
-#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
#endif
diff --git a/trunk/drivers/char/agp/intel-gtt.c b/trunk/drivers/char/agp/intel-gtt.c
index 38390f7c6ab6..dbd901e94ea6 100644
--- a/trunk/drivers/char/agp/intel-gtt.c
+++ b/trunk/drivers/char/agp/intel-gtt.c
@@ -367,62 +367,6 @@ static unsigned int intel_gtt_stolen_size(void)
stolen_size = 0;
break;
}
- } else if (INTEL_GTT_GEN == 6) {
- /*
- * SandyBridge has new memory control reg at 0x50.w
- */
- u16 snb_gmch_ctl;
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
- case SNB_GMCH_GMS_STOLEN_32M:
- stolen_size = MB(32);
- break;
- case SNB_GMCH_GMS_STOLEN_64M:
- stolen_size = MB(64);
- break;
- case SNB_GMCH_GMS_STOLEN_96M:
- stolen_size = MB(96);
- break;
- case SNB_GMCH_GMS_STOLEN_128M:
- stolen_size = MB(128);
- break;
- case SNB_GMCH_GMS_STOLEN_160M:
- stolen_size = MB(160);
- break;
- case SNB_GMCH_GMS_STOLEN_192M:
- stolen_size = MB(192);
- break;
- case SNB_GMCH_GMS_STOLEN_224M:
- stolen_size = MB(224);
- break;
- case SNB_GMCH_GMS_STOLEN_256M:
- stolen_size = MB(256);
- break;
- case SNB_GMCH_GMS_STOLEN_288M:
- stolen_size = MB(288);
- break;
- case SNB_GMCH_GMS_STOLEN_320M:
- stolen_size = MB(320);
- break;
- case SNB_GMCH_GMS_STOLEN_352M:
- stolen_size = MB(352);
- break;
- case SNB_GMCH_GMS_STOLEN_384M:
- stolen_size = MB(384);
- break;
- case SNB_GMCH_GMS_STOLEN_416M:
- stolen_size = MB(416);
- break;
- case SNB_GMCH_GMS_STOLEN_448M:
- stolen_size = MB(448);
- break;
- case SNB_GMCH_GMS_STOLEN_480M:
- stolen_size = MB(480);
- break;
- case SNB_GMCH_GMS_STOLEN_512M:
- stolen_size = MB(512);
- break;
- }
} else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
@@ -556,29 +500,9 @@ static unsigned int i965_gtt_total_entries(void)
static unsigned int intel_gtt_total_entries(void)
{
- int size;
-
if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
return i965_gtt_total_entries();
- else if (INTEL_GTT_GEN == 6) {
- u16 snb_gmch_ctl;
-
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
- default:
- case SNB_GTT_SIZE_0M:
- printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
- size = MB(0);
- break;
- case SNB_GTT_SIZE_1M:
- size = MB(1);
- break;
- case SNB_GTT_SIZE_2M:
- size = MB(2);
- break;
- }
- return size/4;
- } else {
+ else {
/* On previous hardware, the GTT size was just what was
* required to map the aperture.
*/
@@ -778,9 +702,6 @@ bool intel_enable_gtt(void)
{
u8 __iomem *reg;
- if (INTEL_GTT_GEN >= 6)
- return true;
-
if (INTEL_GTT_GEN == 2) {
u16 gmch_ctrl;
@@ -1149,85 +1070,6 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry);
}
-static bool gen6_check_flags(unsigned int flags)
-{
- return true;
-}
-
-static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
- pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- } else { /* set 'normal'/'cached' to LLC by default */
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-}
-
-static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
- pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- } else { /* set 'normal'/'cached' to LLC by default */
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-}
-
-static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
- unsigned int flags)
-{
- unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
- u32 pte_flags;
-
- if (type_mask == AGP_USER_MEMORY)
- pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
- else {
- pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
- if (gfdt)
- pte_flags |= GEN6_PTE_GFDT;
- }
-
- /* gen6 has bit11-4 for physical addr bit39-32 */
- addr |= (addr >> 28) & 0xff0;
- writel(addr | pte_flags, intel_private.gtt + entry);
-
- writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
-}
-
-static void gen6_cleanup(void)
-{
-}
-
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
@@ -1249,41 +1091,29 @@ static inline int needs_idle_maps(void)
static int i9xx_setup(void)
{
- u32 reg_addr;
+ u32 reg_addr, gtt_addr;
int size = KB(512);
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr);
reg_addr &= 0xfff80000;
- if (INTEL_GTT_GEN >= 7)
- size = MB(2);
-
intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
return -ENOMEM;
- if (INTEL_GTT_GEN == 3) {
- u32 gtt_addr;
-
+ switch (INTEL_GTT_GEN) {
+ case 3:
pci_read_config_dword(intel_private.pcidev,
I915_PTEADDR, >t_addr);
intel_private.gtt_bus_addr = gtt_addr;
- } else {
- u32 gtt_offset;
-
- switch (INTEL_GTT_GEN) {
- case 5:
- case 6:
- case 7:
- gtt_offset = MB(2);
- break;
- case 4:
- default:
- gtt_offset = KB(512);
- break;
- }
- intel_private.gtt_bus_addr = reg_addr + gtt_offset;
+ break;
+ case 5:
+ intel_private.gtt_bus_addr = reg_addr + MB(2);
+ break;
+ default:
+ intel_private.gtt_bus_addr = reg_addr + KB(512);
+ break;
}
if (needs_idle_maps())
@@ -1395,32 +1225,6 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
-static const struct intel_gtt_driver sandybridge_gtt_driver = {
- .gen = 6,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = gen6_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
- .chipset_flush = i9xx_chipset_flush,
-};
-static const struct intel_gtt_driver haswell_gtt_driver = {
- .gen = 6,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = haswell_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
- .chipset_flush = i9xx_chipset_flush,
-};
-static const struct intel_gtt_driver valleyview_gtt_driver = {
- .gen = 7,
- .setup = i9xx_setup,
- .cleanup = gen6_cleanup,
- .write_entry = valleyview_write_entry,
- .dma_mask_size = 40,
- .check_flags = gen6_check_flags,
-};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1501,106 +1305,6 @@ static const struct intel_gtt_driver_description {
"HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", &ironlake_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
- "Sandybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
- "Ivybridge", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
- "ValleyView", &valleyview_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
- "Haswell", &haswell_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
- "Haswell", &haswell_gtt_driver },
{ 0, NULL, NULL }
};
@@ -1686,7 +1390,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-const struct intel_gtt *intel_gtt_get(void)
+struct intel_gtt *intel_gtt_get(void)
{
return &intel_private.base;
}
diff --git a/trunk/drivers/char/tpm/tpm_ibmvtpm.c b/trunk/drivers/char/tpm/tpm_ibmvtpm.c
index 7da840d487d2..9978609d93b2 100644
--- a/trunk/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/trunk/drivers/char/tpm/tpm_ibmvtpm.c
@@ -38,8 +38,6 @@ static struct vio_device_id tpm_ibmvtpm_device_table[] = {
};
MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
-DECLARE_WAIT_QUEUE_HEAD(wq);
-
/**
* ibmvtpm_send_crq - Send a CRQ request
* @vdev: vio device struct
@@ -83,6 +81,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct ibmvtpm_dev *ibmvtpm;
u16 len;
+ int sig;
ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
@@ -91,22 +90,23 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return 0;
}
- wait_event_interruptible(wq, ibmvtpm->crq_res.len != 0);
+ sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0);
+ if (sig)
+ return -EINTR;
+
+ len = ibmvtpm->res_len;
- if (count < ibmvtpm->crq_res.len) {
+ if (count < len) {
dev_err(ibmvtpm->dev,
"Invalid size in recv: count=%ld, crq_size=%d\n",
- count, ibmvtpm->crq_res.len);
+ count, len);
return -EIO;
}
spin_lock(&ibmvtpm->rtce_lock);
- memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, ibmvtpm->crq_res.len);
- memset(ibmvtpm->rtce_buf, 0, ibmvtpm->crq_res.len);
- ibmvtpm->crq_res.valid = 0;
- ibmvtpm->crq_res.msg = 0;
- len = ibmvtpm->crq_res.len;
- ibmvtpm->crq_res.len = 0;
+ memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
+ memset(ibmvtpm->rtce_buf, 0, len);
+ ibmvtpm->res_len = 0;
spin_unlock(&ibmvtpm->rtce_lock);
return len;
}
@@ -273,7 +273,6 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
int rc = 0;
free_irq(vdev->irq, ibmvtpm);
- tasklet_kill(&ibmvtpm->tasklet);
do {
if (rc)
@@ -372,7 +371,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
static int tpm_ibmvtpm_resume(struct device *dev)
{
struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
- unsigned long flags;
int rc = 0;
do {
@@ -387,10 +385,11 @@ static int tpm_ibmvtpm_resume(struct device *dev)
return rc;
}
- spin_lock_irqsave(&ibmvtpm->lock, flags);
- vio_disable_interrupts(ibmvtpm->vdev);
- tasklet_schedule(&ibmvtpm->tasklet);
- spin_unlock_irqrestore(&ibmvtpm->lock, flags);
+ rc = vio_enable_interrupts(ibmvtpm->vdev);
+ if (rc) {
+ dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
+ return rc;
+ }
rc = ibmvtpm_crq_send_init(ibmvtpm);
if (rc)
@@ -467,7 +466,7 @@ static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
if (crq->valid & VTPM_MSG_RES) {
if (++crq_q->index == crq_q->num_entry)
crq_q->index = 0;
- rmb();
+ smp_rmb();
} else
crq = NULL;
return crq;
@@ -535,11 +534,9 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
ibmvtpm->vtpm_version = crq->data;
return;
case VTPM_TPM_COMMAND_RES:
- ibmvtpm->crq_res.valid = crq->valid;
- ibmvtpm->crq_res.msg = crq->msg;
- ibmvtpm->crq_res.len = crq->len;
- ibmvtpm->crq_res.data = crq->data;
- wake_up_interruptible(&wq);
+ /* len of the data in rtce buffer */
+ ibmvtpm->res_len = crq->len;
+ wake_up_interruptible(&ibmvtpm->wq);
return;
default:
return;
@@ -559,38 +556,19 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
{
struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
- unsigned long flags;
-
- spin_lock_irqsave(&ibmvtpm->lock, flags);
- vio_disable_interrupts(ibmvtpm->vdev);
- tasklet_schedule(&ibmvtpm->tasklet);
- spin_unlock_irqrestore(&ibmvtpm->lock, flags);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ibmvtpm_tasklet - Interrupt handler tasklet
- * @data: ibm vtpm device struct
- *
- * Returns:
- * Nothing
- **/
-static void ibmvtpm_tasklet(void *data)
-{
- struct ibmvtpm_dev *ibmvtpm = data;
struct ibmvtpm_crq *crq;
- unsigned long flags;
- spin_lock_irqsave(&ibmvtpm->lock, flags);
+ /* while loop is needed for initial setup (get version and
+ * get rtce_size). There should be only one tpm request at any
+ * given time.
+ */
while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
ibmvtpm_crq_process(crq, ibmvtpm);
crq->valid = 0;
- wmb();
+ smp_wmb();
}
- vio_enable_interrupts(ibmvtpm->vdev);
- spin_unlock_irqrestore(&ibmvtpm->lock, flags);
+ return IRQ_HANDLED;
}
/**
@@ -650,9 +628,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
goto reg_crq_cleanup;
}
- tasklet_init(&ibmvtpm->tasklet, (void *)ibmvtpm_tasklet,
- (unsigned long)ibmvtpm);
-
rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
tpm_ibmvtpm_driver_name, ibmvtpm);
if (rc) {
@@ -666,13 +641,14 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
goto init_irq_cleanup;
}
+ init_waitqueue_head(&ibmvtpm->wq);
+
crq_q->index = 0;
ibmvtpm->dev = dev;
ibmvtpm->vdev = vio_dev;
chip->vendor.data = (void *)ibmvtpm;
- spin_lock_init(&ibmvtpm->lock);
spin_lock_init(&ibmvtpm->rtce_lock);
rc = ibmvtpm_crq_send_init(ibmvtpm);
@@ -689,7 +665,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
return rc;
init_irq_cleanup:
- tasklet_kill(&ibmvtpm->tasklet);
do {
rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
} while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
diff --git a/trunk/drivers/char/tpm/tpm_ibmvtpm.h b/trunk/drivers/char/tpm/tpm_ibmvtpm.h
index 4296eb4b4d82..bd82a791f995 100644
--- a/trunk/drivers/char/tpm/tpm_ibmvtpm.h
+++ b/trunk/drivers/char/tpm/tpm_ibmvtpm.h
@@ -38,13 +38,12 @@ struct ibmvtpm_dev {
struct vio_dev *vdev;
struct ibmvtpm_crq_queue crq_queue;
dma_addr_t crq_dma_handle;
- spinlock_t lock;
- struct tasklet_struct tasklet;
u32 rtce_size;
void __iomem *rtce_buf;
dma_addr_t rtce_dma_handle;
spinlock_t rtce_lock;
- struct ibmvtpm_crq crq_res;
+ wait_queue_head_t wq;
+ u16 res_len;
u32 vtpm_version;
};
diff --git a/trunk/drivers/extcon/extcon-arizona.c b/trunk/drivers/extcon/extcon-arizona.c
index f10f05d4ee9c..414aed50b1bc 100644
--- a/trunk/drivers/extcon/extcon-arizona.c
+++ b/trunk/drivers/extcon/extcon-arizona.c
@@ -166,6 +166,7 @@ static irqreturn_t arizona_micdet(int irq, void *data)
ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val);
if (ret != 0) {
dev_err(arizona->dev, "Failed to read MICDET: %d\n", ret);
+ mutex_unlock(&info->lock);
return IRQ_NONE;
}
diff --git a/trunk/drivers/extcon/extcon-class.c b/trunk/drivers/extcon/extcon-class.c
index d398821097f3..60adc04b0561 100644
--- a/trunk/drivers/extcon/extcon-class.c
+++ b/trunk/drivers/extcon/extcon-class.c
@@ -472,7 +472,7 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
obj->cable_index = extcon_find_cable_index(obj->edev, cable_name);
if (obj->cable_index < 0)
- return -ENODEV;
+ return obj->cable_index;
obj->user_nb = nb;
diff --git a/trunk/drivers/extcon/extcon-max77693.c b/trunk/drivers/extcon/extcon-max77693.c
index b656dfa401a6..8c17b65eb74d 100644
--- a/trunk/drivers/extcon/extcon-max77693.c
+++ b/trunk/drivers/extcon/extcon-max77693.c
@@ -657,17 +657,17 @@ static int max77693_muic_probe(struct platform_device *pdev)
int ret, i;
u8 id;
- info = kzalloc(sizeof(struct max77693_muic_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct max77693_muic_info),
+ GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "failed to allocate memory\n");
- ret = -ENOMEM;
- goto err_kfree;
+ return -ENOMEM;
}
info->dev = &pdev->dev;
info->max77693 = max77693;
- if (info->max77693->regmap_muic)
+ if (info->max77693->regmap_muic) {
dev_dbg(&pdev->dev, "allocate register map\n");
- else {
+ } else {
info->max77693->regmap_muic = devm_regmap_init_i2c(
info->max77693->muic,
&max77693_muic_regmap_config);
@@ -675,7 +675,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
ret = PTR_ERR(info->max77693->regmap_muic);
dev_err(max77693->dev,
"failed to allocate register map: %d\n", ret);
- goto err_regmap;
+ return ret;
}
}
platform_set_drvdata(pdev, info);
@@ -686,11 +686,13 @@ static int max77693_muic_probe(struct platform_device *pdev)
/* Support irq domain for MAX77693 MUIC device */
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
struct max77693_muic_irq *muic_irq = &muic_irqs[i];
- int virq = 0;
+ unsigned int virq = 0;
virq = irq_create_mapping(max77693->irq_domain, muic_irq->irq);
- if (!virq)
+ if (!virq) {
+ ret = -EINVAL;
goto err_irq;
+ }
muic_irq->virq = virq;
ret = request_threaded_irq(virq, NULL,
@@ -702,14 +704,13 @@ static int max77693_muic_probe(struct platform_device *pdev)
" error :%d)\n",
muic_irq->irq, ret);
- for (i = i - 1; i >= 0; i--)
- free_irq(muic_irq->virq, info);
goto err_irq;
}
}
/* Initialize extcon device */
- info->edev = kzalloc(sizeof(struct extcon_dev), GFP_KERNEL);
+ info->edev = devm_kzalloc(&pdev->dev, sizeof(struct extcon_dev),
+ GFP_KERNEL);
if (!info->edev) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
ret = -ENOMEM;
@@ -720,7 +721,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
ret = extcon_dev_register(info->edev, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
- goto err_extcon;
+ goto err_irq;
}
/* Initialize MUIC register by using platform data */
@@ -753,7 +754,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
MAX77693_MUIC_REG_ID, &id);
if (ret < 0) {
dev_err(&pdev->dev, "failed to read revision number\n");
- goto err_extcon;
+ goto err_irq;
}
dev_info(info->dev, "device ID : 0x%x\n", id);
@@ -765,12 +766,9 @@ static int max77693_muic_probe(struct platform_device *pdev)
return ret;
-err_extcon:
- kfree(info->edev);
err_irq:
-err_regmap:
- kfree(info);
-err_kfree:
+ while (--i >= 0)
+ free_irq(muic_irqs[i].virq, info);
return ret;
}
@@ -783,8 +781,6 @@ static int max77693_muic_remove(struct platform_device *pdev)
free_irq(muic_irqs[i].virq, info);
cancel_work_sync(&info->irq_work);
extcon_dev_unregister(info->edev);
- kfree(info->edev);
- kfree(info);
return 0;
}
diff --git a/trunk/drivers/extcon/extcon-max8997.c b/trunk/drivers/extcon/extcon-max8997.c
index bad76f51161b..93009fe6ef05 100644
--- a/trunk/drivers/extcon/extcon-max8997.c
+++ b/trunk/drivers/extcon/extcon-max8997.c
@@ -1,7 +1,7 @@
/*
* extcon-max8997.c - MAX8997 extcon driver to support MAX8997 MUIC
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* Donggeun Kim
*
* This program is free software; you can redistribute it and/or modify
@@ -433,11 +433,11 @@ static int max8997_muic_probe(struct platform_device *pdev)
struct max8997_muic_info *info;
int ret, i;
- info = kzalloc(sizeof(struct max8997_muic_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info),
+ GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "failed to allocate memory\n");
- ret = -ENOMEM;
- goto err_kfree;
+ return -ENOMEM;
}
info->dev = &pdev->dev;
@@ -450,14 +450,16 @@ static int max8997_muic_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
struct max8997_muic_irq *muic_irq = &muic_irqs[i];
- int virq = 0;
+ unsigned int virq = 0;
virq = irq_create_mapping(max8997->irq_domain, muic_irq->irq);
- if (!virq)
+ if (!virq) {
+ ret = -EINVAL;
goto err_irq;
+ }
muic_irq->virq = virq;
- ret = request_threaded_irq(virq, NULL,max8997_muic_irq_handler,
+ ret = request_threaded_irq(virq, NULL, max8997_muic_irq_handler,
0, muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
@@ -469,7 +471,8 @@ static int max8997_muic_probe(struct platform_device *pdev)
}
/* External connector */
- info->edev = kzalloc(sizeof(struct extcon_dev), GFP_KERNEL);
+ info->edev = devm_kzalloc(&pdev->dev, sizeof(struct extcon_dev),
+ GFP_KERNEL);
if (!info->edev) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
ret = -ENOMEM;
@@ -480,7 +483,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
ret = extcon_dev_register(info->edev, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register extcon device\n");
- goto err_extcon;
+ goto err_irq;
}
/* Initialize registers according to platform data */
@@ -498,13 +501,9 @@ static int max8997_muic_probe(struct platform_device *pdev)
return ret;
-err_extcon:
- kfree(info->edev);
err_irq:
while (--i >= 0)
free_irq(muic_irqs[i].virq, info);
- kfree(info);
-err_kfree:
return ret;
}
@@ -519,9 +518,6 @@ static int max8997_muic_remove(struct platform_device *pdev)
extcon_dev_unregister(info->edev);
- kfree(info->edev);
- kfree(info);
-
return 0;
}
diff --git a/trunk/drivers/gpio/Kconfig b/trunk/drivers/gpio/Kconfig
index bf892bd68c17..8ae1f5b19669 100644
--- a/trunk/drivers/gpio/Kconfig
+++ b/trunk/drivers/gpio/Kconfig
@@ -683,4 +683,17 @@ config GPIO_MSIC
Enable support for GPIO on intel MSIC controllers found in
intel MID devices
+comment "USB GPIO expanders:"
+
+config GPIO_VIPERBOARD
+ tristate "Viperboard GPIO a & b support"
+ depends on MFD_VIPERBOARD && USB
+ help
+ Say yes here to access the GPIO signals of Nano River
+ Technologies Viperboard. There are two GPIO chips on the
+ board: gpioa and gpiob.
+ See viperboard API specification and Nano
+ River Tech's viperboard.h for detailed meaning
+ of the module parameters.
+
endif
diff --git a/trunk/drivers/gpio/Makefile b/trunk/drivers/gpio/Makefile
index 76b344683251..c5aebd008dde 100644
--- a/trunk/drivers/gpio/Makefile
+++ b/trunk/drivers/gpio/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
+obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VT8500) += gpio-vt8500.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
diff --git a/trunk/drivers/gpio/gpio-da9052.c b/trunk/drivers/gpio/gpio-da9052.c
index a05aacd2777a..29b11e9b6a78 100644
--- a/trunk/drivers/gpio/gpio-da9052.c
+++ b/trunk/drivers/gpio/gpio-da9052.c
@@ -185,7 +185,11 @@ static int da9052_gpio_to_irq(struct gpio_chip *gc, u32 offset)
struct da9052_gpio *gpio = to_da9052_gpio(gc);
struct da9052 *da9052 = gpio->da9052;
- return da9052->irq_base + DA9052_IRQ_GPI0 + offset;
+ int irq;
+
+ irq = regmap_irq_get_virq(da9052->irq_data, DA9052_IRQ_GPI0 + offset);
+
+ return irq;
}
static struct gpio_chip reference_gp = {
diff --git a/trunk/drivers/gpio/gpio-tps6586x.c b/trunk/drivers/gpio/gpio-tps6586x.c
index c1b82da56504..29e8e750bd49 100644
--- a/trunk/drivers/gpio/gpio-tps6586x.c
+++ b/trunk/drivers/gpio/gpio-tps6586x.c
@@ -80,6 +80,14 @@ static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
val, mask);
}
+static int tps6586x_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps6586x_gpio *tps6586x_gpio = to_tps6586x_gpio(gc);
+
+ return tps6586x_irq_get_virq(tps6586x_gpio->parent,
+ TPS6586X_INT_PLDO_0 + offset);
+}
+
static int tps6586x_gpio_probe(struct platform_device *pdev)
{
struct tps6586x_platform_data *pdata;
@@ -106,6 +114,7 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
tps6586x_gpio->gpio_chip.direction_output = tps6586x_gpio_output;
tps6586x_gpio->gpio_chip.set = tps6586x_gpio_set;
tps6586x_gpio->gpio_chip.get = tps6586x_gpio_get;
+ tps6586x_gpio->gpio_chip.to_irq = tps6586x_gpio_to_irq;
#ifdef CONFIG_OF_GPIO
tps6586x_gpio->gpio_chip.of_node = pdev->dev.parent->of_node;
diff --git a/trunk/drivers/gpio/gpio-twl4030.c b/trunk/drivers/gpio/gpio-twl4030.c
index 00329f2fc05b..9572aa137e6f 100644
--- a/trunk/drivers/gpio/gpio-twl4030.c
+++ b/trunk/drivers/gpio/gpio-twl4030.c
@@ -355,13 +355,13 @@ static struct gpio_chip twl_gpiochip = {
static int gpio_twl4030_pulls(u32 ups, u32 downs)
{
- u8 message[6];
+ u8 message[5];
unsigned i, gpio_bit;
/* For most pins, a pulldown was enabled by default.
* We should have data that's specific to this board.
*/
- for (gpio_bit = 1, i = 1; i < 6; i++) {
+ for (gpio_bit = 1, i = 0; i < 5; i++) {
u8 bit_mask;
unsigned j;
@@ -380,16 +380,16 @@ static int gpio_twl4030_pulls(u32 ups, u32 downs)
static int gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
{
- u8 message[4];
+ u8 message[3];
/* 30 msec of debouncing is always used for MMC card detect,
* and is optional for everything else.
*/
- message[1] = (debounce & 0xff) | (mmc_cd & 0x03);
+ message[0] = (debounce & 0xff) | (mmc_cd & 0x03);
debounce >>= 8;
- message[2] = (debounce & 0xff);
+ message[1] = (debounce & 0xff);
debounce >>= 8;
- message[3] = (debounce & 0x03);
+ message[2] = (debounce & 0x03);
return twl_i2c_write(TWL4030_MODULE_GPIO, message,
REG_GPIO_DEBEN1, 3);
diff --git a/trunk/drivers/gpio/gpio-viperboard.c b/trunk/drivers/gpio/gpio-viperboard.c
new file mode 100644
index 000000000000..13772996cf24
--- /dev/null
+++ b/trunk/drivers/gpio/gpio-viperboard.c
@@ -0,0 +1,517 @@
+/*
+ * Nano River Technologies viperboard GPIO lib driver
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+
+#define VPRBRD_GPIOA_CLK_1MHZ 0
+#define VPRBRD_GPIOA_CLK_100KHZ 1
+#define VPRBRD_GPIOA_CLK_10KHZ 2
+#define VPRBRD_GPIOA_CLK_1KHZ 3
+#define VPRBRD_GPIOA_CLK_100HZ 4
+#define VPRBRD_GPIOA_CLK_10HZ 5
+
+#define VPRBRD_GPIOA_FREQ_DEFAULT 1000
+
+#define VPRBRD_GPIOA_CMD_CONT 0x00
+#define VPRBRD_GPIOA_CMD_PULSE 0x01
+#define VPRBRD_GPIOA_CMD_PWM 0x02
+#define VPRBRD_GPIOA_CMD_SETOUT 0x03
+#define VPRBRD_GPIOA_CMD_SETIN 0x04
+#define VPRBRD_GPIOA_CMD_SETINT 0x05
+#define VPRBRD_GPIOA_CMD_GETIN 0x06
+
+#define VPRBRD_GPIOB_CMD_SETDIR 0x00
+#define VPRBRD_GPIOB_CMD_SETVAL 0x01
+
+struct vprbrd_gpioa_msg {
+ u8 cmd;
+ u8 clk;
+ u8 offset;
+ u8 t1;
+ u8 t2;
+ u8 invert;
+ u8 pwmlevel;
+ u8 outval;
+ u8 risefall;
+ u8 answer;
+ u8 __fill;
+} __packed;
+
+struct vprbrd_gpiob_msg {
+ u8 cmd;
+ u16 val;
+ u16 mask;
+} __packed;
+
+struct vprbrd_gpio {
+ struct gpio_chip gpioa; /* gpio a related things */
+ u32 gpioa_out;
+ u32 gpioa_val;
+ struct gpio_chip gpiob; /* gpio b related things */
+ u32 gpiob_out;
+ u32 gpiob_val;
+ struct vprbrd *vb;
+};
+
+/* gpioa sampling clock module parameter */
+static unsigned char gpioa_clk;
+static unsigned int gpioa_freq = VPRBRD_GPIOA_FREQ_DEFAULT;
+module_param(gpioa_freq, uint, 0);
+MODULE_PARM_DESC(gpioa_freq,
+ "gpio-a sampling freq in Hz (default is 1000Hz) valid values: 10, 100, 1000, 10000, 100000, 1000000");
+
+/* ----- begin of gipo a chip -------------------------------------------- */
+
+static int vprbrd_gpioa_get(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret, answer, error = 0;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ /* if io is set to output, just return the saved value */
+ if (gpio->gpioa_out & (1 << offset))
+ return gpio->gpioa_val & (1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_GETIN;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = 0x00;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ error = -EREMOTEIO;
+
+ ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_IN, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+ answer = gamsg->answer & 0x01;
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ error = -EREMOTEIO;
+
+ if (error)
+ return error;
+
+ return answer;
+}
+
+static void vprbrd_gpioa_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ if (gpio->gpioa_out & (1 << offset)) {
+ if (value)
+ gpio->gpioa_val |= (1 << offset);
+ else
+ gpio->gpioa_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = value;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, gamsg,
+ sizeof(struct vprbrd_gpioa_msg), VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ dev_err(chip->dev, "usb error setting pin value\n");
+ }
+}
+
+static int vprbrd_gpioa_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ gpio->gpioa_out &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETIN;
+ gamsg->clk = gpioa_clk;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = 0x00;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+static int vprbrd_gpioa_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpioa);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+ gpio->gpioa_out |= (1 << offset);
+ if (value)
+ gpio->gpioa_val |= (1 << offset);
+ else
+ gpio->gpioa_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = value;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg))
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+/* ----- end of gpio a chip ---------------------------------------------- */
+
+/* ----- begin of gipo b chip -------------------------------------------- */
+
+static int vprbrd_gpiob_setdir(struct vprbrd *vb, unsigned offset,
+ unsigned dir)
+{
+ struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+ int ret;
+
+ gbmsg->cmd = VPRBRD_GPIOB_CMD_SETDIR;
+ gbmsg->val = cpu_to_be16(dir << offset);
+ gbmsg->mask = cpu_to_be16(0x0001 << offset);
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT, 0x0000,
+ 0x0000, gbmsg, sizeof(struct vprbrd_gpiob_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ if (ret != sizeof(struct vprbrd_gpiob_msg))
+ return -EREMOTEIO;
+
+ return 0;
+}
+
+static int vprbrd_gpiob_get(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret;
+ u16 val;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+
+ /* if io is set to output, just return the saved value */
+ if (gpio->gpiob_out & (1 << offset))
+ return gpio->gpiob_val & (1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_IN, 0x0000,
+ 0x0000, gbmsg, sizeof(struct vprbrd_gpiob_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+ val = gbmsg->val;
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpiob_msg))
+ return ret;
+
+ /* cache the read values */
+ gpio->gpiob_val = be16_to_cpu(val);
+
+ return (gpio->gpiob_val >> offset) & 0x1;
+}
+
+static void vprbrd_gpiob_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+ struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+
+ if (gpio->gpiob_out & (1 << offset)) {
+ if (value)
+ gpio->gpiob_val |= (1 << offset);
+ else
+ gpio->gpiob_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gbmsg->cmd = VPRBRD_GPIOB_CMD_SETVAL;
+ gbmsg->val = cpu_to_be16(value << offset);
+ gbmsg->mask = cpu_to_be16(0x0001 << offset);
+
+ ret = usb_control_msg(vb->usb_dev,
+ usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, gbmsg,
+ sizeof(struct vprbrd_gpiob_msg), VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpiob_msg))
+ dev_err(chip->dev, "usb error setting pin value\n");
+ }
+}
+
+static int vprbrd_gpiob_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+
+ gpio->gpiob_out &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ ret = vprbrd_gpiob_setdir(vb, offset, 0);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret)
+ dev_err(chip->dev, "usb error setting pin to input\n");
+
+ return ret;
+}
+
+static int vprbrd_gpiob_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int ret;
+ struct vprbrd_gpio *gpio =
+ container_of(chip, struct vprbrd_gpio, gpiob);
+ struct vprbrd *vb = gpio->vb;
+
+ gpio->gpiob_out |= (1 << offset);
+ if (value)
+ gpio->gpiob_val |= (1 << offset);
+ else
+ gpio->gpiob_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ ret = vprbrd_gpiob_setdir(vb, offset, 1);
+ if (ret)
+ dev_err(chip->dev, "usb error setting pin to output\n");
+
+ mutex_unlock(&vb->lock);
+
+ vprbrd_gpiob_set(chip, offset, value);
+
+ return ret;
+}
+
+/* ----- end of gpio b chip ---------------------------------------------- */
+
+static int __devinit vprbrd_gpio_probe(struct platform_device *pdev)
+{
+ struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
+ struct vprbrd_gpio *vb_gpio;
+ int ret;
+
+ vb_gpio = devm_kzalloc(&pdev->dev, sizeof(*vb_gpio), GFP_KERNEL);
+ if (vb_gpio == NULL)
+ return -ENOMEM;
+
+ vb_gpio->vb = vb;
+ /* registering gpio a */
+ vb_gpio->gpioa.label = "viperboard gpio a";
+ vb_gpio->gpioa.dev = &pdev->dev;
+ vb_gpio->gpioa.owner = THIS_MODULE;
+ vb_gpio->gpioa.base = -1;
+ vb_gpio->gpioa.ngpio = 16;
+ vb_gpio->gpioa.can_sleep = 1;
+ vb_gpio->gpioa.set = vprbrd_gpioa_set;
+ vb_gpio->gpioa.get = vprbrd_gpioa_get;
+ vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input;
+ vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output;
+ ret = gpiochip_add(&vb_gpio->gpioa);
+ if (ret < 0) {
+ dev_err(vb_gpio->gpioa.dev, "could not add gpio a");
+ goto err_gpioa;
+ }
+
+ /* registering gpio b */
+ vb_gpio->gpiob.label = "viperboard gpio b";
+ vb_gpio->gpiob.dev = &pdev->dev;
+ vb_gpio->gpiob.owner = THIS_MODULE;
+ vb_gpio->gpiob.base = -1;
+ vb_gpio->gpiob.ngpio = 16;
+ vb_gpio->gpiob.can_sleep = 1;
+ vb_gpio->gpiob.set = vprbrd_gpiob_set;
+ vb_gpio->gpiob.get = vprbrd_gpiob_get;
+ vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input;
+ vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output;
+ ret = gpiochip_add(&vb_gpio->gpiob);
+ if (ret < 0) {
+ dev_err(vb_gpio->gpiob.dev, "could not add gpio b");
+ goto err_gpiob;
+ }
+
+ platform_set_drvdata(pdev, vb_gpio);
+
+ return ret;
+
+err_gpiob:
+ ret = gpiochip_remove(&vb_gpio->gpioa);
+
+err_gpioa:
+ return ret;
+}
+
+static int __devexit vprbrd_gpio_remove(struct platform_device *pdev)
+{
+ struct vprbrd_gpio *vb_gpio = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&vb_gpio->gpiob);
+ if (ret == 0)
+ ret = gpiochip_remove(&vb_gpio->gpioa);
+
+ return ret;
+}
+
+static struct platform_driver vprbrd_gpio_driver = {
+ .driver.name = "viperboard-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = vprbrd_gpio_probe,
+ .remove = __devexit_p(vprbrd_gpio_remove),
+};
+
+static int __init vprbrd_gpio_init(void)
+{
+ switch (gpioa_freq) {
+ case 1000000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_1MHZ;
+ break;
+ case 100000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_100KHZ;
+ break;
+ case 10000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_10KHZ;
+ break;
+ case 1000:
+ gpioa_clk = VPRBRD_GPIOA_CLK_1KHZ;
+ break;
+ case 100:
+ gpioa_clk = VPRBRD_GPIOA_CLK_100HZ;
+ break;
+ case 10:
+ gpioa_clk = VPRBRD_GPIOA_CLK_10HZ;
+ break;
+ default:
+ pr_warn("invalid gpioa_freq (%d)\n", gpioa_freq);
+ gpioa_clk = VPRBRD_GPIOA_CLK_1KHZ;
+ }
+
+ return platform_driver_register(&vprbrd_gpio_driver);
+}
+subsys_initcall(vprbrd_gpio_init);
+
+static void __exit vprbrd_gpio_exit(void)
+{
+ platform_driver_unregister(&vprbrd_gpio_driver);
+}
+module_exit(vprbrd_gpio_exit);
+
+MODULE_AUTHOR("Lars Poeschel ");
+MODULE_DESCRIPTION("GPIO driver for Nano River Techs Viperboard");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:viperboard-gpio");
diff --git a/trunk/drivers/gpu/drm/Kconfig b/trunk/drivers/gpu/drm/Kconfig
index 18321b68b880..983201b450f1 100644
--- a/trunk/drivers/gpu/drm/Kconfig
+++ b/trunk/drivers/gpu/drm/Kconfig
@@ -210,3 +210,5 @@ source "drivers/gpu/drm/mgag200/Kconfig"
source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
+
+source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/trunk/drivers/gpu/drm/Makefile b/trunk/drivers/gpu/drm/Makefile
index 2ff5cefe9ead..6f58c81cfcbc 100644
--- a/trunk/drivers/gpu/drm/Makefile
+++ b/trunk/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
- drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+ drm_agpsupport.o drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
@@ -16,10 +16,11 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+drm-$(CONFIG_PCI) += ati_pcigart.o
drm-usb-y := drm_usb.o
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@@ -48,4 +49,5 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
+obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-y += i2c/
diff --git a/trunk/drivers/gpu/drm/ast/ast_ttm.c b/trunk/drivers/gpu/drm/ast/ast_ttm.c
index 1a026ac2dfb4..3602731a6112 100644
--- a/trunk/drivers/gpu/drm/ast/ast_ttm.c
+++ b/trunk/drivers/gpu/drm/ast/ast_ttm.c
@@ -186,11 +186,11 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
static int ast_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -356,7 +356,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
ttm_bo_type_device, &astbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, ast_bo_ttm_destroy);
if (ret)
return ret;
@@ -383,7 +383,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
ast_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -406,7 +406,7 @@ int ast_bo_unpin(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -431,7 +431,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c
index 101e423c8991..dcd1a8c029eb 100644
--- a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,12 +35,15 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
};
-static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
@@ -49,12 +52,18 @@ static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
#endif
remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
kfree(ap);
+
+ return 0;
}
static int __devinit
cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- cirrus_kick_out_firmware_fb(pdev);
+ int ret;
+
+ ret = cirrus_kick_out_firmware_fb(pdev);
+ if (ret)
+ return ret;
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c b/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bc83f835c830..1413a26e4905 100644
--- a/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/trunk/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -186,11 +186,11 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static int cirrus_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
@@ -361,7 +361,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
ttm_bo_type_device, &cirrusbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, cirrus_bo_ttm_destroy);
if (ret)
return ret;
@@ -388,7 +388,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
cirrus_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -411,7 +411,7 @@ int cirrus_bo_unpin(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
@@ -436,7 +436,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c
index ef1b22144d37..f2d667b8bee2 100644
--- a/trunk/drivers/gpu/drm/drm_crtc.c
+++ b/trunk/drivers/gpu/drm/drm_crtc.c
@@ -470,10 +470,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- if (crtc->gamma_store) {
- kfree(crtc->gamma_store);
- crtc->gamma_store = NULL;
- }
+ kfree(crtc->gamma_store);
+ crtc->gamma_store = NULL;
drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head);
@@ -555,16 +553,17 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
+ connector->status = connector_status_unknown;
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.edid_property,
0);
- drm_connector_attach_property(connector,
+ drm_object_attach_property(&connector->base,
dev->mode_config.dpms_property, 0);
out:
@@ -2280,13 +2279,21 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
for (i = 0; i < num_planes; i++) {
unsigned int width = r->width / (i != 0 ? hsub : 1);
+ unsigned int height = r->height / (i != 0 ? vsub : 1);
+ unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
return -EINVAL;
}
- if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
+ if ((uint64_t) width * cpp > UINT_MAX)
+ return -ERANGE;
+
+ if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+ return -ERANGE;
+
+ if (r->pitches[i] < width * cpp) {
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
@@ -2323,6 +2330,11 @@ int drm_mode_addfb2(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
+ if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+ DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+ return -EINVAL;
+ }
+
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
@@ -2916,27 +2928,6 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
}
EXPORT_SYMBOL(drm_property_destroy);
-void drm_connector_attach_property(struct drm_connector *connector,
- struct drm_property *property, uint64_t init_val)
-{
- drm_object_attach_property(&connector->base, property, init_val);
-}
-EXPORT_SYMBOL(drm_connector_attach_property);
-
-int drm_connector_property_set_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t value)
-{
- return drm_object_property_set_value(&connector->base, property, value);
-}
-EXPORT_SYMBOL(drm_connector_property_set_value);
-
-int drm_connector_property_get_value(struct drm_connector *connector,
- struct drm_property *property, uint64_t *val)
-{
- return drm_object_property_get_value(&connector->base, property, val);
-}
-EXPORT_SYMBOL(drm_connector_property_get_value);
-
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
@@ -3173,15 +3164,17 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
/* Delete edid, when there is none. */
if (!edid) {
connector->edid_blob_ptr = NULL;
- ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+ ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
return ret;
}
size = EDID_LENGTH * (1 + edid->extensions);
connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
size, edid);
+ if (!connector->edid_blob_ptr)
+ return -EINVAL;
- ret = drm_connector_property_set_value(connector,
+ ret = drm_object_property_set_value(&connector->base,
dev->mode_config.edid_property,
connector->edid_blob_ptr->base.id);
@@ -3204,6 +3197,9 @@ static bool drm_property_change_is_valid(struct drm_property *property,
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
+ } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ /* Only the driver knows */
+ return true;
} else {
int i;
for (i = 0; i < property->num_values; i++)
@@ -3245,7 +3241,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
/* store the property value if successful */
if (!ret)
- drm_connector_property_set_value(connector, property, value);
+ drm_object_property_set_value(&connector->base, property, value);
return ret;
}
@@ -3656,9 +3652,12 @@ void drm_mode_config_reset(struct drm_device *dev)
if (encoder->funcs->reset)
encoder->funcs->reset(encoder);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ connector->status = connector_status_unknown;
+
if (connector->funcs->reset)
connector->funcs->reset(connector);
+ }
}
EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/trunk/drivers/gpu/drm/drm_crtc_helper.c b/trunk/drivers/gpu/drm/drm_crtc_helper.c
index 1227adf74dbc..7b2d378b2576 100644
--- a/trunk/drivers/gpu/drm/drm_crtc_helper.c
+++ b/trunk/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,35 @@
#include
#include
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+ struct drm_connector *connector, *tmp;
+ struct list_head panel_list;
+
+ INIT_LIST_HEAD(&panel_list);
+
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ list_move_tail(&connector->head, &panel_list);
+ }
+
+ list_splice(&panel_list, &dev->mode_config.connector_list);
+}
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
+
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
@@ -64,22 +93,21 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
- * @dev: DRM device
+ * @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* LOCKING:
* Caller must hold mode config lock.
*
- * Based on @dev's mode_config layout, scan all the connectors and try to detect
- * modes on them. Modes will first be added to the connector's probed_modes
- * list, then culled (based on validity and the @maxX, @maxY parameters) and
- * put into the normal modes list.
+ * Based on the helper callbacks implemented by @connector try to detect all
+ * valid modes. Modes will first be added to the connector's probed_modes list,
+ * then culled (based on validity and the @maxX, @maxY parameters) and put into
+ * the normal modes list.
*
- * Intended to be used either at bootup time or when major configuration
- * changes have occurred.
- *
- * FIXME: take into account monitor limits
+ * Intended to be use as a generic implementation of the ->probe() @connector
+ * callback for drivers that use the crtc helpers for output mode filtering and
+ * detection.
*
* RETURNS:
* Number of modes found on @connector.
@@ -109,9 +137,14 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
connector->funcs->force(connector);
} else {
connector->status = connector->funcs->detect(connector, true);
- drm_kms_helper_poll_enable(dev);
}
+ /* Re-enable polling in case the global poll config changed. */
+ if (drm_kms_helper_poll != dev->mode_config.poll_running)
+ drm_kms_helper_poll_enable(dev);
+
+ dev->mode_config.poll_running = drm_kms_helper_poll;
+
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, drm_get_connector_name(connector));
@@ -325,17 +358,24 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
}
/**
- * drm_crtc_set_mode - set a mode
+ * drm_crtc_helper_set_mode - internal helper to set a mode
* @crtc: CRTC to program
* @mode: mode to use
- * @x: width of mode
- * @y: height of mode
+ * @x: horizontal offset into the surface
+ * @y: vertical offset into the surface
+ * @old_fb: old framebuffer, for cleanup
*
* LOCKING:
* Caller must hold mode config lock.
*
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
- * to fixup or reject the mode prior to trying to set it.
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
*
* RETURNS:
* True if the mode was set successfully, or false otherwise.
@@ -491,20 +531,19 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
/**
* drm_crtc_helper_set_config - set a new config from userspace
- * @crtc: CRTC to setup
- * @crtc_info: user provided configuration
- * @new_mode: new mode to set
- * @connector_set: set of connectors for the new config
- * @fb: new framebuffer
+ * @set: mode set configuration
*
* LOCKING:
* Caller must hold mode config lock.
*
- * Setup a new configuration, provided by the user in @crtc_info, and enable
- * it.
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
*
* RETURNS:
- * Zero. (FIXME)
+ * Returns 0 on success, -ERRNO on failure.
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
@@ -800,12 +839,14 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
}
/**
- * drm_helper_connector_dpms
- * @connector affected connector
- * @mode DPMS mode
+ * drm_helper_connector_dpms() - connector dpms helper implementation
+ * @connector: affected connector
+ * @mode: DPMS mode
*
- * Calls the low-level connector DPMS function, then
- * calls appropriate encoder and crtc DPMS functions as well
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
*/
void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
@@ -918,6 +959,15 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
+void drm_kms_helper_hotplug_event(struct drm_device *dev)
+{
+ /* send a uevent + call fbdev */
+ drm_sysfs_hotplug_event(dev);
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
+
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct work_struct *work)
{
@@ -933,20 +983,22 @@ static void output_poll_execute(struct work_struct *work)
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- /* if this is HPD or polled don't check it -
- TV out for instance */
- if (!connector->polled)
+ /* Ignore forced connectors. */
+ if (connector->force)
continue;
- else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
- repoll = true;
+ /* Ignore HPD capable connectors and connectors where we don't
+ * want any hotplug detection at all for polling. */
+ if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
+ continue;
+
+ repoll = true;
old_status = connector->status;
/* if we are connected and don't want to poll for disconnect
skip it */
if (old_status == connector_status_connected &&
- !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
- !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
continue;
connector->status = connector->funcs->detect(connector, false);
@@ -960,12 +1012,8 @@ static void output_poll_execute(struct work_struct *work)
mutex_unlock(&dev->mode_config.mutex);
- if (changed) {
- /* send a uevent + call fbdev */
- drm_sysfs_hotplug_event(dev);
- if (dev->mode_config.funcs->output_poll_changed)
- dev->mode_config.funcs->output_poll_changed(dev);
- }
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
if (repoll)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
@@ -988,7 +1036,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
return;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->polled)
+ if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
}
@@ -1014,12 +1063,34 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
void drm_helper_hpd_irq_event(struct drm_device *dev)
{
+ struct drm_connector *connector;
+ enum drm_connector_status old_status;
+ bool changed = false;
+
if (!dev->mode_config.poll_enabled)
return;
- /* kill timer and schedule immediate execution, this doesn't block */
- cancel_delayed_work(&dev->mode_config.output_poll_work);
- if (drm_kms_helper_poll)
- schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ /* Only handle HPD capable connectors. */
+ if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ old_status = connector->status;
+
+ connector->status = connector->funcs->detect(connector, false);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ old_status, connector->status);
+ if (old_status != connector->status)
+ changed = true;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/trunk/drivers/gpu/drm/drm_dp_i2c_helper.c b/trunk/drivers/gpu/drm/drm_dp_helper.c
similarity index 58%
rename from trunk/drivers/gpu/drm/drm_dp_i2c_helper.c
rename to trunk/drivers/gpu/drm/drm_dp_helper.c
index 7f246f212457..89e196627160 100644
--- a/trunk/drivers/gpu/drm/drm_dp_i2c_helper.c
+++ b/trunk/drivers/gpu/drm/drm_dp_helper.c
@@ -30,6 +30,15 @@
#include
#include
+/**
+ * DOC: dp helpers
+ *
+ * These functions contain some common logic and helpers at various abstraction
+ * levels to deal with Display Port sink devices and related things like DP aux
+ * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
+ * blocks, ...
+ */
+
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
@@ -37,7 +46,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
-
+
ret = (*algo_data->aux_ch)(adapter, mode,
write_byte, read_byte);
return ret;
@@ -182,7 +191,6 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
{
(void) i2c_algo_dp_aux_address(adapter, 0, false);
(void) i2c_algo_dp_aux_stop(adapter, false);
-
}
static int
@@ -194,11 +202,23 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
return 0;
}
+/**
+ * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
+ * @adapter: i2c adapter to register
+ *
+ * This registers an i2c adapater that uses dp aux channel as it's underlaying
+ * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
+ * and store it in the algo_data member of the @adapter argument. This will be
+ * used by the i2c over dp aux algorithm to drive the hardware.
+ *
+ * RETURNS:
+ * 0 on success, -ERRNO on failure.
+ */
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
-
+
error = i2c_dp_aux_prepare_bus(adapter);
if (error)
return error;
@@ -206,3 +226,123 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
return error;
}
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
+
+/* Helpers for DP link training */
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ u8 l = dp_link_status(link_status, i);
+ return (l >> s) & 0xf;
+}
+
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_align;
+ u8 lane_status;
+ int lane;
+
+ lane_align = dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_channel_eq_ok);
+
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
+
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
+
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(100);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
+
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(400);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate)
+{
+ switch (link_rate) {
+ case 162000:
+ default:
+ return DP_LINK_BW_1_62;
+ case 270000:
+ return DP_LINK_BW_2_7;
+ case 540000:
+ return DP_LINK_BW_5_4;
+ }
+}
+EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
+
+int drm_dp_bw_code_to_link_rate(u8 link_bw)
+{
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ return 162000;
+ case DP_LINK_BW_2_7:
+ return 270000;
+ case DP_LINK_BW_5_4:
+ return 540000;
+ }
+}
+EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
diff --git a/trunk/drivers/gpu/drm/drm_edid.c b/trunk/drivers/gpu/drm/drm_edid.c
index fadcd44ff196..5a3770fbd770 100644
--- a/trunk/drivers/gpu/drm/drm_edid.c
+++ b/trunk/drivers/gpu/drm/drm_edid.c
@@ -307,12 +307,9 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
static bool drm_edid_is_zero(u8 *in_edid, int length)
{
- int i;
- u32 *raw_edid = (u32 *)in_edid;
+ if (memchr_inv(in_edid, 0, length))
+ return false;
- for (i = 0; i < length / 4; i++)
- if (*(raw_edid + i) != 0)
- return false;
return true;
}
@@ -1516,6 +1513,26 @@ u8 *drm_find_cea_extension(struct edid *edid)
}
EXPORT_SYMBOL(drm_find_cea_extension);
+/*
+ * Looks for a CEA mode matching given drm_display_mode.
+ * Returns its CEA Video ID code, or 0 if not found.
+ */
+u8 drm_match_cea_mode(struct drm_display_mode *to_match)
+{
+ struct drm_display_mode *cea_mode;
+ u8 mode;
+
+ for (mode = 0; mode < drm_num_cea_modes; mode++) {
+ cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
+
+ if (drm_mode_equal(to_match, cea_mode))
+ return mode + 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_match_cea_mode);
+
+
static int
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
{
@@ -1622,7 +1639,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
if (len >= 12)
connector->audio_latency[1] = db[12];
- DRM_LOG_KMS("HDMI: DVI dual %d, "
+ DRM_DEBUG_KMS("HDMI: DVI dual %d, "
"max TMDS clock %d, "
"latency present %d %d, "
"video latency %d %d, "
@@ -2062,3 +2079,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
return num_modes;
}
EXPORT_SYMBOL(drm_add_modes_noedid);
+
+/**
+ * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
+ * @mode: mode
+ *
+ * RETURNS:
+ * The VIC number, 0 in case it's not a CEA-861 mode.
+ */
+uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
+{
+ uint8_t i;
+
+ for (i = 0; i < drm_num_cea_modes; i++)
+ if (drm_mode_equal(mode, &edid_cea_modes[i]))
+ return i + 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_cea_vic);
diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c
index 4d58d7e6af3f..954d175bd7fa 100644
--- a/trunk/drivers/gpu/drm/drm_fb_helper.c
+++ b/trunk/drivers/gpu/drm/drm_fb_helper.c
@@ -27,6 +27,8 @@
* Dave Airlie
* Jesse Barnes
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include
#include
#include
@@ -43,6 +45,15 @@ MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independantely from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ */
+
/* simple single crtc case helper function */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
@@ -95,10 +106,16 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
if (mode->force) {
const char *s;
switch (mode->force) {
- case DRM_FORCE_OFF: s = "OFF"; break;
- case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
+ case DRM_FORCE_OFF:
+ s = "OFF";
+ break;
+ case DRM_FORCE_ON_DIGITAL:
+ s = "ON - dig";
+ break;
default:
- case DRM_FORCE_ON: s = "ON"; break;
+ case DRM_FORCE_ON:
+ s = "ON";
+ break;
}
DRM_INFO("forcing %s connector %s\n",
@@ -265,7 +282,7 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
if (panic_timeout < 0)
return 0;
- printk(KERN_ERR "panic occurred, switching back to text console\n");
+ pr_err("panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
}
EXPORT_SYMBOL(drm_fb_helper_panic);
@@ -331,7 +348,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->funcs->dpms(connector, dpms_mode);
- drm_connector_property_set_value(connector,
+ drm_object_property_set_value(&connector->base,
dev->mode_config.dpms_property, dpms_mode);
}
}
@@ -433,7 +450,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "drm: unregistered panic notifier\n");
+ pr_info("drm: unregistered panic notifier\n");
atomic_notifier_chain_unregister(&panic_notifier_list,
&paniced);
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -724,9 +741,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
- if (preferred_bpp != sizes.surface_bpp) {
+ if (preferred_bpp != sizes.surface_bpp)
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
- }
+
/* first up get a count of crtcs now in use and new min/maxes width/heights */
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
@@ -794,18 +811,16 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
info = fb_helper->fbdev;
/* set the fb pointer */
- for (i = 0; i < fb_helper->crtc_count; i++) {
+ for (i = 0; i < fb_helper->crtc_count; i++)
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
- }
if (new_fb) {
info->var.pixclock = 0;
- if (register_framebuffer(info) < 0) {
+ if (register_framebuffer(info) < 0)
return -EINVAL;
- }
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
+ dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
} else {
drm_fb_helper_set_par(info);
@@ -814,7 +829,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "drm: registered panic notifier\n");
+ dev_info(fb_helper->dev->dev, "registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -1002,11 +1017,11 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
- if (strict) {
+ if (strict)
enable = connector->status == connector_status_connected;
- } else {
+ else
enable = connector->status != connector_status_disconnected;
- }
+
return enable;
}
@@ -1191,9 +1206,8 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
for (c = 0; c < fb_helper->crtc_count; c++) {
crtc = &fb_helper->crtc_info[c];
- if ((encoder->possible_crtcs & (1 << c)) == 0) {
+ if ((encoder->possible_crtcs & (1 << c)) == 0)
continue;
- }
for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc)
@@ -1246,6 +1260,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
sizeof(struct drm_display_mode *), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
+ if (!crtcs || !modes || !enabled) {
+ DRM_ERROR("Memory allocation failed\n");
+ goto out;
+ }
+
drm_enable_connectors(fb_helper, enabled);
@@ -1284,6 +1303,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
}
}
+out:
kfree(crtcs);
kfree(modes);
kfree(enabled);
@@ -1291,12 +1311,14 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
/**
* drm_helper_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
+ * @fb_helper: fb_helper device struct
+ * @bpp_sel: bpp value to use for the framebuffer configuration
*
* LOCKING:
- * Called at init time, must take mode config lock.
+ * Called at init time by the driver to set up the @fb_helper initial
+ * configuration, must take the mode config lock.
*
- * Scan the CRTCs and connectors and try to put together an initial setup.
+ * Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
@@ -1319,9 +1341,9 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
/*
* we shouldn't end up with no modes here.
*/
- if (count == 0) {
- printk(KERN_INFO "No connectors reported connected with modes\n");
- }
+ if (count == 0)
+ dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
+
drm_setup_crtcs(fb_helper);
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
@@ -1330,7 +1352,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
/**
* drm_fb_helper_hotplug_event - respond to a hotplug notification by
- * probing all the outputs attached to the fb.
+ * probing all the outputs attached to the fb
* @fb_helper: the drm_fb_helper
*
* LOCKING:
diff --git a/trunk/drivers/gpu/drm/drm_hashtab.c b/trunk/drivers/gpu/drm/drm_hashtab.c
index c3745c4d46d8..80254547a3f8 100644
--- a/trunk/drivers/gpu/drm/drm_hashtab.c
+++ b/trunk/drivers/gpu/drm/drm_hashtab.c
@@ -67,10 +67,8 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
- }
}
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
@@ -83,8 +81,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head) {
if (entry->key == key)
return list;
if (entry->key > key)
@@ -93,6 +90,24 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
return NULL;
}
+static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
+ unsigned long key)
+{
+ struct drm_hash_item *entry;
+ struct hlist_head *h_list;
+ struct hlist_node *list;
+ unsigned int hashed_key;
+
+ hashed_key = hash_long(key, ht->order);
+ h_list = &ht->table[hashed_key];
+ hlist_for_each_entry_rcu(entry, list, h_list, head) {
+ if (entry->key == key)
+ return list;
+ if (entry->key > key)
+ break;
+ }
+ return NULL;
+}
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
@@ -105,8 +120,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
- hlist_for_each(list, h_list) {
- entry = hlist_entry(list, struct drm_hash_item, head);
+ hlist_for_each_entry(entry, list, h_list, head) {
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
@@ -114,9 +128,9 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
parent = list;
}
if (parent) {
- hlist_add_after(parent, &item->head);
+ hlist_add_after_rcu(parent, &item->head);
} else {
- hlist_add_head(&item->head, h_list);
+ hlist_add_head_rcu(&item->head, h_list);
}
return 0;
}
@@ -156,7 +170,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
{
struct hlist_node *list;
- list = drm_ht_find_key(ht, key);
+ list = drm_ht_find_key_rcu(ht, key);
if (!list)
return -EINVAL;
@@ -171,7 +185,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
list = drm_ht_find_key(ht, key);
if (list) {
- hlist_del_init(list);
+ hlist_del_init_rcu(list);
return 0;
}
return -EINVAL;
@@ -179,7 +193,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
- hlist_del_init(&item->head);
+ hlist_del_init_rcu(&item->head);
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
diff --git a/trunk/drivers/gpu/drm/drm_ioctl.c b/trunk/drivers/gpu/drm/drm_ioctl.c
index 23dd97506f28..e77bd8b57df2 100644
--- a/trunk/drivers/gpu/drm/drm_ioctl.c
+++ b/trunk/drivers/gpu/drm/drm_ioctl.c
@@ -287,6 +287,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
break;
+ case DRM_CAP_TIMESTAMP_MONOTONIC:
+ req->value = drm_timestamp_monotonic;
+ break;
default:
return -EINVAL;
}
diff --git a/trunk/drivers/gpu/drm/drm_irq.c b/trunk/drivers/gpu/drm/drm_irq.c
index 3a3d0ce891b9..19c01ca3cc76 100644
--- a/trunk/drivers/gpu/drm/drm_irq.c
+++ b/trunk/drivers/gpu/drm/drm_irq.c
@@ -106,6 +106,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
s64 diff_ns;
int vblrc;
struct timeval tvblank;
+ int count = DRM_TIMESTAMP_MAXRETRIES;
/* Prevent vblank irq processing while disabling vblank irqs,
* so no updates of timestamps or count can happen after we've
@@ -131,7 +132,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
do {
dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
- } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
+ } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+
+ if (!count)
+ vblrc = 0;
/* Compute time difference to stored timestamp of last vblank
* as updated by last invocation of drm_handle_vblank() in vblank irq.
@@ -576,7 +580,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
unsigned flags,
struct drm_crtc *refcrtc)
{
- struct timeval stime, raw_time;
+ ktime_t stime, etime, mono_time_offset;
+ struct timeval tv_etime;
struct drm_display_mode *mode;
int vbl_status, vtotal, vdisplay;
int vpos, hpos, i;
@@ -625,13 +630,15 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
preempt_disable();
/* Get system timestamp before query. */
- do_gettimeofday(&stime);
+ stime = ktime_get();
/* Get vertical and horizontal scanout pos. vpos, hpos. */
vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
/* Get system timestamp after query. */
- do_gettimeofday(&raw_time);
+ etime = ktime_get();
+ if (!drm_timestamp_monotonic)
+ mono_time_offset = ktime_get_monotonic_offset();
preempt_enable();
@@ -642,7 +649,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EIO;
}
- duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+ duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
/* Accept result with < max_error nsecs timing uncertainty. */
if (duration_ns <= (s64) *max_error)
@@ -689,14 +696,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
vbl_status |= 0x8;
}
+ if (!drm_timestamp_monotonic)
+ etime = ktime_sub(etime, mono_time_offset);
+
+ /* save this only for debugging purposes */
+ tv_etime = ktime_to_timeval(etime);
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
- *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
+ etime = ktime_sub_ns(etime, delta_ns);
+ *vblank_time = ktime_to_timeval(etime);
DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
crtc, (int)vbl_status, hpos, vpos,
- (long)raw_time.tv_sec, (long)raw_time.tv_usec,
+ (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
(int)duration_ns/1000, i);
@@ -708,6 +721,17 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+static struct timeval get_drm_timestamp(void)
+{
+ ktime_t now;
+
+ now = ktime_get();
+ if (!drm_timestamp_monotonic)
+ now = ktime_sub(now, ktime_get_monotonic_offset());
+
+ return ktime_to_timeval(now);
+}
+
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval.
@@ -745,9 +769,9 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
}
/* GPU high precision timestamp query unsupported or failed.
- * Return gettimeofday timestamp as best estimate.
+ * Return current monotonic/gettimeofday timestamp as best estimate.
*/
- do_gettimeofday(tvblank);
+ *tvblank = get_drm_timestamp();
return 0;
}
@@ -802,6 +826,47 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
}
EXPORT_SYMBOL(drm_vblank_count_and_time);
+static void send_vblank_event(struct drm_device *dev,
+ struct drm_pending_vblank_event *e,
+ unsigned long seq, struct timeval *now)
+{
+ WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+ e->event.sequence = seq;
+ e->event.tv_sec = now->tv_sec;
+ e->event.tv_usec = now->tv_usec;
+
+ list_add_tail(&e->base.link,
+ &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+ e->event.sequence);
+}
+
+/**
+ * drm_send_vblank_event - helper to send vblank event after pageflip
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ */
+void drm_send_vblank_event(struct drm_device *dev, int crtc,
+ struct drm_pending_vblank_event *e)
+{
+ struct timeval now;
+ unsigned int seq;
+ if (crtc >= 0) {
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+ } else {
+ seq = 0;
+
+ now = get_drm_timestamp();
+ }
+ send_vblank_event(dev, e, seq, &now);
+}
+EXPORT_SYMBOL(drm_send_vblank_event);
+
/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
@@ -936,6 +1001,13 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_put);
+/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Caller must hold event lock.
+ */
void drm_vblank_off(struct drm_device *dev, int crtc)
{
struct drm_pending_vblank_event *e, *t;
@@ -949,22 +1021,19 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+ spin_lock(&dev->event_lock);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
continue;
DRM_DEBUG("Sending premature vblank event on disable: \
wanted %d, current %d\n",
e->event.sequence, seq);
-
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ list_del(&e->base.link);
drm_vblank_put(dev, e->pipe);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
- e->event.sequence);
+ send_vblank_event(dev, e, seq, &now);
}
+ spin_unlock(&dev->event_lock);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -1107,15 +1176,9 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->event.sequence = vblwait->request.sequence;
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, pipe);
- list_add_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
+ send_vblank_event(dev, e, seq, &now);
vblwait->reply.sequence = seq;
- trace_drm_vblank_event_delivered(current->pid, pipe,
- vblwait->request.sequence);
} else {
/* drm_handle_vblank_events will call drm_vblank_put */
list_add_tail(&e->base.link, &dev->vblank_event_list);
@@ -1256,14 +1319,9 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
DRM_DEBUG("vblank event on %d, current %d\n",
e->event.sequence, seq);
- e->event.sequence = seq;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ list_del(&e->base.link);
drm_vblank_put(dev, e->pipe);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
- e->event.sequence);
+ send_vblank_event(dev, e, seq, &now);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/trunk/drivers/gpu/drm/drm_modes.c b/trunk/drivers/gpu/drm/drm_modes.c
index 59450f39bf96..d8da30e90db5 100644
--- a/trunk/drivers/gpu/drm/drm_modes.c
+++ b/trunk/drivers/gpu/drm/drm_modes.c
@@ -46,7 +46,7 @@
*
* Describe @mode using DRM_DEBUG.
*/
-void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
{
DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
"0x%x 0x%x\n",
@@ -558,7 +558,7 @@ EXPORT_SYMBOL(drm_mode_list_concat);
* RETURNS:
* @mode->hdisplay
*/
-int drm_mode_width(struct drm_display_mode *mode)
+int drm_mode_width(const struct drm_display_mode *mode)
{
return mode->hdisplay;
@@ -579,7 +579,7 @@ EXPORT_SYMBOL(drm_mode_width);
* RETURNS:
* @mode->vdisplay
*/
-int drm_mode_height(struct drm_display_mode *mode)
+int drm_mode_height(const struct drm_display_mode *mode)
{
return mode->vdisplay;
}
@@ -768,7 +768,7 @@ EXPORT_SYMBOL(drm_mode_duplicate);
* RETURNS:
* True if the modes are equal, false otherwise.
*/
-bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{
/* do clock check convert to PICOS so fb modes get matched
* the same */
diff --git a/trunk/drivers/gpu/drm/drm_pci.c b/trunk/drivers/gpu/drm/drm_pci.c
index ba33144257e5..754bc96e10c7 100644
--- a/trunk/drivers/gpu/drm/drm_pci.c
+++ b/trunk/drivers/gpu/drm/drm_pci.c
@@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
int pos;
- u32 lnkcap, lnkcap2;
+ u32 lnkcap = 0, lnkcap2 = 0;
*mask = 0;
if (!dev->pdev)
diff --git a/trunk/drivers/gpu/drm/drm_stub.c b/trunk/drivers/gpu/drm/drm_stub.c
index c236fd27eba6..200e104f1fa0 100644
--- a/trunk/drivers/gpu/drm/drm_stub.c
+++ b/trunk/drivers/gpu/drm/drm_stub.c
@@ -46,16 +46,24 @@ EXPORT_SYMBOL(drm_vblank_offdelay);
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
+
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
struct idr drm_minors_idr;
@@ -221,20 +229,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
if (!file_priv->master)
return -EINVAL;
- if (!file_priv->minor->master &&
- file_priv->minor->master != file_priv->master) {
- mutex_lock(&dev->struct_mutex);
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
- }
+ if (file_priv->minor->master)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ file_priv->minor->master = drm_master_get(file_priv->master);
+ file_priv->is_master = 1;
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, file_priv, false);
+ if (unlikely(ret != 0)) {
+ file_priv->is_master = 0;
+ drm_master_put(&file_priv->minor->master);
}
- mutex_unlock(&dev->struct_mutex);
}
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -492,10 +500,7 @@ void drm_put_dev(struct drm_device *dev)
drm_put_minor(&dev->primary);
list_del(&dev->driver_item);
- if (dev->devname) {
- kfree(dev->devname);
- dev->devname = NULL;
- }
+ kfree(dev->devname);
kfree(dev);
}
EXPORT_SYMBOL(drm_put_dev);
diff --git a/trunk/drivers/gpu/drm/drm_sysfs.c b/trunk/drivers/gpu/drm/drm_sysfs.c
index 05cd8fe062af..02296653a058 100644
--- a/trunk/drivers/gpu/drm/drm_sysfs.c
+++ b/trunk/drivers/gpu/drm/drm_sysfs.c
@@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device,
uint64_t dpms_status;
int ret;
- ret = drm_connector_property_get_value(connector,
+ ret = drm_object_property_get_value(&connector->base,
dev->mode_config.dpms_property,
&dpms_status);
if (ret)
@@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device,
return 0;
}
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret)
return 0;
@@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device,
return 0;
}
- ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret)
return 0;
diff --git a/trunk/drivers/gpu/drm/exynos/Kconfig b/trunk/drivers/gpu/drm/exynos/Kconfig
index fc345d4ebb03..1d1f1e5e33f0 100644
--- a/trunk/drivers/gpu/drm/exynos/Kconfig
+++ b/trunk/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
+config DRM_EXYNOS_IOMMU
+ bool "EXYNOS DRM IOMMU Support"
+ depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+ help
+ Choose this option if you want to use IOMMU feature for DRM.
+
config DRM_EXYNOS_DMABUF
bool "EXYNOS DRM DMABUF"
depends on DRM_EXYNOS
@@ -39,3 +45,27 @@ config DRM_EXYNOS_G2D
depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
help
Choose this option if you want to use Exynos G2D for DRM.
+
+config DRM_EXYNOS_IPP
+ bool "Exynos DRM IPP"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use IPP feature for DRM.
+
+config DRM_EXYNOS_FIMC
+ bool "Exynos DRM FIMC"
+ depends on DRM_EXYNOS_IPP
+ help
+ Choose this option if you want to use Exynos FIMC for DRM.
+
+config DRM_EXYNOS_ROTATOR
+ bool "Exynos DRM Rotator"
+ depends on DRM_EXYNOS_IPP
+ help
+ Choose this option if you want to use Exynos Rotator for DRM.
+
+config DRM_EXYNOS_GSC
+ bool "Exynos DRM GSC"
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+ help
+ Choose this option if you want to use Exynos GSC for DRM.
diff --git a/trunk/drivers/gpu/drm/exynos/Makefile b/trunk/drivers/gpu/drm/exynos/Makefile
index eb651ca8e2a8..639b49e1ec05 100644
--- a/trunk/drivers/gpu/drm/exynos/Makefile
+++ b/trunk/drivers/gpu/drm/exynos/Makefile
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
@@ -15,5 +16,9 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
exynos_drm_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_ddc.c b/trunk/drivers/gpu/drm/exynos/exynos_ddc.c
index 37e6ec704e1d..bef43e0342a6 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -48,6 +48,7 @@ static struct i2c_device_id ddc_idtable[] = {
{ },
};
+#ifdef CONFIG_OF
static struct of_device_id hdmiddc_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiddc",
@@ -55,12 +56,13 @@ static struct of_device_id hdmiddc_match_types[] = {
/* end node */
}
};
+#endif
struct i2c_driver ddc_driver = {
.driver = {
.name = "exynos-hdmiddc",
.owner = THIS_MODULE,
- .of_match_table = hdmiddc_match_types,
+ .of_match_table = of_match_ptr(hdmiddc_match_types),
},
.id_table = ddc_idtable,
.probe = s5p_ddc_probe,
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 118c117b3226..9601bad47a2e 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -33,89 +33,64 @@
static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
- dma_addr_t start_addr;
- unsigned int npages, i = 0;
- struct scatterlist *sgl;
int ret = 0;
+ enum dma_attr attr;
+ unsigned int nr_pages;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_DEBUG_KMS("not support allocation type.\n");
- return -EINVAL;
- }
-
if (buf->dma_addr) {
DRM_DEBUG_KMS("already allocated.\n");
return 0;
}
- if (buf->size >= SZ_1M) {
- npages = buf->size >> SECTION_SHIFT;
- buf->page_size = SECTION_SIZE;
- } else if (buf->size >= SZ_64K) {
- npages = buf->size >> 16;
- buf->page_size = SZ_64K;
- } else {
- npages = buf->size >> PAGE_SHIFT;
- buf->page_size = PAGE_SIZE;
- }
+ init_dma_attrs(&buf->dma_attrs);
- buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!buf->sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
- return -ENOMEM;
- }
+ /*
+ * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+ * region will be allocated else physically contiguous
+ * as possible.
+ */
+ if (flags & EXYNOS_BO_CONTIG)
+ dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
- ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
- if (ret < 0) {
- DRM_ERROR("failed to initialize sg table.\n");
- kfree(buf->sgt);
- buf->sgt = NULL;
- return -ENOMEM;
- }
+ /*
+ * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+ * else cachable mapping.
+ */
+ if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
+ attr = DMA_ATTR_WRITE_COMBINE;
+ else
+ attr = DMA_ATTR_NON_CONSISTENT;
- buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
- &buf->dma_addr, GFP_KERNEL);
- if (!buf->kvaddr) {
- DRM_ERROR("failed to allocate buffer.\n");
- ret = -ENOMEM;
- goto err1;
- }
+ dma_set_attr(attr, &buf->dma_attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
- buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
+ buf->pages = dma_alloc_attrs(dev->dev, buf->size,
+ &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
if (!buf->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- ret = -ENOMEM;
- goto err2;
+ DRM_ERROR("failed to allocate buffer.\n");
+ return -ENOMEM;
}
- sgl = buf->sgt->sgl;
- start_addr = buf->dma_addr;
-
- while (i < npages) {
- buf->pages[i] = phys_to_page(start_addr);
- sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
- sg_dma_address(sgl) = start_addr;
- start_addr += buf->page_size;
- sgl = sg_next(sgl);
- i++;
+ nr_pages = buf->size >> PAGE_SHIFT;
+ buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
+ if (!buf->sgt) {
+ DRM_ERROR("failed to get sg table.\n");
+ ret = -ENOMEM;
+ goto err_free_attrs;
}
- DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->kvaddr,
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->dma_addr,
buf->size);
return ret;
-err2:
- dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
+
+err_free_attrs:
+ dma_free_attrs(dev->dev, buf->size, buf->pages,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
-err1:
- sg_free_table(buf->sgt);
- kfree(buf->sgt);
- buf->sgt = NULL;
return ret;
}
@@ -125,23 +100,12 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- /*
- * release only physically continuous memory and
- * non-continuous memory would be released by exynos
- * gem framework.
- */
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_DEBUG_KMS("not support allocation type.\n");
- return;
- }
-
if (!buf->dma_addr) {
DRM_DEBUG_KMS("dma_addr is invalid.\n");
return;
}
- DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
- (unsigned long)buf->kvaddr,
+ DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->dma_addr,
buf->size);
@@ -150,11 +114,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
kfree(buf->sgt);
buf->sgt = NULL;
- kfree(buf->pages);
- buf->pages = NULL;
-
- dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
+ dma_free_attrs(dev->dev, buf->size, buf->pages,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
}
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 3388e4eb4ba2..25cf16285033 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer);
-/* allocate physical memory region and setup sgt and pages. */
+/* allocate physical memory region and setup sgt. */
int exynos_drm_alloc_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buf,
unsigned int flags);
-/* release physical memory region, sgt and pages. */
+/* release physical memory region, and sgt. */
void exynos_drm_free_buf(struct drm_device *dev,
unsigned int flags,
struct exynos_drm_gem_buf *buffer);
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index fce245f64c4f..2efa4b031d73 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
goto out;
}
+ spin_lock_irq(&dev->event_lock);
list_add_tail(&event->base.link,
&dev_priv->pageflip_event_list);
+ spin_unlock_irq(&dev->event_lock);
crtc->fb = fb;
ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
NULL);
if (ret) {
crtc->fb = old_fb;
+
+ spin_lock_irq(&dev->event_lock);
drm_vblank_put(dev, exynos_crtc->pipe);
list_del(&event->base.link);
+ spin_unlock_irq(&dev->event_lock);
goto out;
}
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index fae1f2ec886c..61d5a8402eb8 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -30,70 +30,108 @@
#include
-static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
- unsigned int page_size)
+struct exynos_drm_dmabuf_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dir;
+};
+
+static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
+ struct device *dev,
+ struct dma_buf_attachment *attach)
{
- struct sg_table *sgt = NULL;
- struct scatterlist *sgl;
- int i, ret;
+ struct exynos_drm_dmabuf_attachment *exynos_attach;
- sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
- if (!sgt)
- goto out;
+ exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
+ if (!exynos_attach)
+ return -ENOMEM;
- ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
- if (ret)
- goto err_free_sgt;
+ exynos_attach->dir = DMA_NONE;
+ attach->priv = exynos_attach;
- if (page_size < PAGE_SIZE)
- page_size = PAGE_SIZE;
+ return 0;
+}
- for_each_sg(sgt->sgl, sgl, nr_pages, i)
- sg_set_page(sgl, pages[i], page_size, 0);
+static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
+ struct sg_table *sgt;
- return sgt;
+ if (!exynos_attach)
+ return;
-err_free_sgt:
- kfree(sgt);
- sgt = NULL;
-out:
- return NULL;
+ sgt = &exynos_attach->sgt;
+
+ if (exynos_attach->dir != DMA_NONE)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+ exynos_attach->dir);
+
+ sg_free_table(sgt);
+ kfree(exynos_attach);
+ attach->priv = NULL;
}
static struct sg_table *
exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
+ struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
+ struct scatterlist *rd, *wr;
struct sg_table *sgt = NULL;
- unsigned int npages;
- int nents;
+ unsigned int i;
+ int nents, ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
- mutex_lock(&dev->struct_mutex);
+ if (WARN_ON(dir == DMA_NONE))
+ return ERR_PTR(-EINVAL);
+
+ /* just return current sgt if already requested. */
+ if (exynos_attach->dir == dir)
+ return &exynos_attach->sgt;
+
+ /* reattaching is not allowed. */
+ if (WARN_ON(exynos_attach->dir != DMA_NONE))
+ return ERR_PTR(-EBUSY);
buf = gem_obj->buffer;
+ if (!buf) {
+ DRM_ERROR("buffer is null.\n");
+ return ERR_PTR(-ENOMEM);
+ }
- /* there should always be pages allocated. */
- if (!buf->pages) {
- DRM_ERROR("pages is null.\n");
- goto err_unlock;
+ sgt = &exynos_attach->sgt;
+
+ ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
+ if (ret) {
+ DRM_ERROR("failed to alloc sgt.\n");
+ return ERR_PTR(-ENOMEM);
}
- npages = buf->size / buf->page_size;
+ mutex_lock(&dev->struct_mutex);
- sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
- if (!sgt) {
- DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
+ rd = buf->sgt->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with iommu.\n");
+ sgt = ERR_PTR(-EIO);
goto err_unlock;
}
- nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
- npages, buf->size, buf->page_size);
+ exynos_attach->dir = dir;
+ attach->priv = exynos_attach;
+
+ DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
err_unlock:
mutex_unlock(&dev->struct_mutex);
@@ -104,10 +142,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- sg_free_table(sgt);
- kfree(sgt);
- sgt = NULL;
+ /* Nothing to do. */
}
static void exynos_dmabuf_release(struct dma_buf *dmabuf)
@@ -169,6 +204,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
}
static struct dma_buf_ops exynos_dmabuf_ops = {
+ .attach = exynos_gem_attach_dma_buf,
+ .detach = exynos_gem_detach_dma_buf,
.map_dma_buf = exynos_gem_map_dma_buf,
.unmap_dma_buf = exynos_gem_unmap_dma_buf,
.kmap = exynos_gem_dmabuf_kmap,
@@ -196,7 +233,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer;
- struct page *page;
int ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -233,38 +269,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
goto err_unmap_attach;
}
- buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
- if (!buffer->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- ret = -ENOMEM;
- goto err_free_buffer;
- }
-
exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
- goto err_free_pages;
+ goto err_free_buffer;
}
sgl = sgt->sgl;
- if (sgt->nents == 1) {
- buffer->dma_addr = sg_dma_address(sgt->sgl);
- buffer->size = sg_dma_len(sgt->sgl);
+ buffer->size = dma_buf->size;
+ buffer->dma_addr = sg_dma_address(sgl);
+ if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else {
- unsigned int i = 0;
-
- buffer->dma_addr = sg_dma_address(sgl);
- while (i < sgt->nents) {
- buffer->pages[i] = sg_page(sgl);
- buffer->size += sg_dma_len(sgl);
- sgl = sg_next(sgl);
- i++;
- }
-
+ /*
+ * this case could be CONTIG or NONCONTIG type but for now
+ * sets NONCONTIG.
+ * TODO. we have to find a way that exporter can notify
+ * the type of its own buffer to importer.
+ */
exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
}
@@ -277,9 +302,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
return &exynos_gem_obj->base;
-err_free_pages:
- kfree(buffer->pages);
- buffer->pages = NULL;
err_free_buffer:
kfree(buffer);
buffer = NULL;
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 1de7baafddd0..e0a8e8024b01 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -40,6 +40,8 @@
#include "exynos_drm_vidi.h"
#include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -49,6 +51,9 @@
#define VBLANK_OFF_DELAY 50000
+/* platform device pointer for eynos drm device. */
+static struct platform_device *exynos_drm_pdev;
+
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
@@ -66,6 +71,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&private->pageflip_event_list);
dev->dev_private = (void *)private;
+ /*
+ * create mapping to manage iommu table and set a pointer to iommu
+ * mapping structure to iommu_mapping of private data.
+ * also this iommu_mapping can be used to check if iommu is supported
+ * or not.
+ */
+ ret = drm_create_iommu_mapping(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to create iommu mapping.\n");
+ goto err_crtc;
+ }
+
drm_mode_config_init(dev);
/* init kms poll for handling hpd */
@@ -80,7 +97,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
for (nr = 0; nr < MAX_CRTC; nr++) {
ret = exynos_drm_crtc_create(dev, nr);
if (ret)
- goto err_crtc;
+ goto err_release_iommu_mapping;
}
for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +106,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane)
- goto err_crtc;
+ goto err_release_iommu_mapping;
}
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
- goto err_crtc;
+ goto err_release_iommu_mapping;
/*
* probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +143,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
exynos_drm_device_unregister(dev);
err_vblank:
drm_vblank_cleanup(dev);
+err_release_iommu_mapping:
+ drm_release_iommu_mapping(dev);
err_crtc:
drm_mode_config_cleanup(dev);
kfree(private);
@@ -142,6 +161,8 @@ static int exynos_drm_unload(struct drm_device *dev)
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
+
+ drm_release_iommu_mapping(dev);
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -229,6 +250,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
+ exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
+ exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
+ exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
+ exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -279,6 +308,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
{
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
return drm_platform_init(&exynos_drm_driver, pdev);
@@ -324,6 +354,10 @@ static int __init exynos_drm_init(void)
ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
if (ret < 0)
goto out_common_hdmi;
+
+ ret = exynos_platform_device_hdmi_register();
+ if (ret < 0)
+ goto out_common_hdmi_dev;
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -338,24 +372,80 @@ static int __init exynos_drm_init(void)
goto out_g2d;
#endif
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ ret = platform_driver_register(&fimc_driver);
+ if (ret < 0)
+ goto out_fimc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ ret = platform_driver_register(&rotator_driver);
+ if (ret < 0)
+ goto out_rotator;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ ret = platform_driver_register(&gsc_driver);
+ if (ret < 0)
+ goto out_gsc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ ret = platform_driver_register(&ipp_driver);
+ if (ret < 0)
+ goto out_ipp;
+#endif
+
ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret < 0)
+ goto out_drm;
+
+ exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
+ NULL, 0);
+ if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
+ ret = PTR_ERR(exynos_drm_pdev);
goto out;
+ }
return 0;
out:
+ platform_driver_unregister(&exynos_drm_platform_driver);
+
+out_drm:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ platform_driver_unregister(&ipp_driver);
+out_ipp:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+out_gsc:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ platform_driver_unregister(&rotator_driver);
+out_rotator:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+out_fimc:
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
out_g2d:
#endif
#ifdef CONFIG_DRM_EXYNOS_VIDI
-out_vidi:
platform_driver_unregister(&vidi_driver);
+out_vidi:
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
+ exynos_platform_device_hdmi_unregister();
+out_common_hdmi_dev:
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
out_common_hdmi:
platform_driver_unregister(&mixer_driver);
@@ -375,13 +465,32 @@ static void __exit exynos_drm_exit(void)
{
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ platform_device_unregister(exynos_drm_pdev);
+
platform_driver_unregister(&exynos_drm_platform_driver);
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ platform_driver_unregister(&ipp_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+ platform_driver_unregister(&rotator_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_HDMI
+ exynos_platform_device_hdmi_unregister();
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
platform_driver_unregister(&mixer_driver);
platform_driver_unregister(&hdmi_driver);
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a34231036496..f5a97745bf93 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -74,8 +74,6 @@ enum exynos_drm_output_type {
* @commit: apply hardware specific overlay data to registers.
* @enable: enable hardware specific overlay.
* @disable: disable hardware specific overlay.
- * @wait_for_vblank: wait for vblank interrupt to make sure that
- * hardware overlay is disabled.
*/
struct exynos_drm_overlay_ops {
void (*mode_set)(struct device *subdrv_dev,
@@ -83,7 +81,6 @@ struct exynos_drm_overlay_ops {
void (*commit)(struct device *subdrv_dev, int zpos);
void (*enable)(struct device *subdrv_dev, int zpos);
void (*disable)(struct device *subdrv_dev, int zpos);
- void (*wait_for_vblank)(struct device *subdrv_dev);
};
/*
@@ -110,7 +107,6 @@ struct exynos_drm_overlay_ops {
* @pixel_format: fourcc pixel format of this overlay
* @dma_addr: array of bus(accessed by dma) address to the memory region
* allocated for a overlay.
- * @vaddr: array of virtual memory addresss to this overlay.
* @zpos: order of overlay layer(z position).
* @default_win: a window to be enabled.
* @color_key: color key on or off.
@@ -142,7 +138,6 @@ struct exynos_drm_overlay {
unsigned int pitch;
uint32_t pixel_format;
dma_addr_t dma_addr[MAX_FB_BUFFER];
- void __iomem *vaddr[MAX_FB_BUFFER];
int zpos;
bool default_win;
@@ -186,6 +181,8 @@ struct exynos_drm_display_ops {
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @wait_for_vblank: wait for vblank interrupt to make sure that
+ * hardware overlay is updated.
*/
struct exynos_drm_manager_ops {
void (*dpms)(struct device *subdrv_dev, int mode);
@@ -200,6 +197,7 @@ struct exynos_drm_manager_ops {
void (*commit)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev);
void (*disable_vblank)(struct device *subdrv_dev);
+ void (*wait_for_vblank)(struct device *subdrv_dev);
};
/*
@@ -231,16 +229,28 @@ struct exynos_drm_g2d_private {
struct device *dev;
struct list_head inuse_cmdlist;
struct list_head event_list;
- struct list_head gem_list;
- unsigned int gem_nr;
+ struct list_head userptr_list;
+};
+
+struct exynos_drm_ipp_private {
+ struct device *dev;
+ struct list_head event_list;
};
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
+ struct exynos_drm_ipp_private *ipp_priv;
};
/*
* Exynos drm private structure.
+ *
+ * @da_start: start address to device address space.
+ * with iommu, device address space starts from this address
+ * otherwise default one.
+ * @da_space_size: size of device address space.
+ * if 0 then default value is used for it.
+ * @da_space_order: order to device address space.
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
@@ -255,6 +265,10 @@ struct exynos_drm_private {
struct drm_crtc *crtc[MAX_CRTC];
struct drm_property *plane_zpos_property;
struct drm_property *crtc_mode_property;
+
+ unsigned long da_start;
+ unsigned long da_space_size;
+ unsigned long da_space_order;
};
/*
@@ -318,10 +332,25 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
+/*
+ * this function registers exynos drm hdmi platform device. It ensures only one
+ * instance of the device is created.
+ */
+extern int exynos_platform_device_hdmi_register(void);
+
+/*
+ * this function unregisters exynos drm hdmi platform device if it exists.
+ */
+void exynos_platform_device_hdmi_unregister(void);
+
extern struct platform_driver fimd_driver;
extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver;
extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver;
extern struct platform_driver g2d_driver;
+extern struct platform_driver fimc_driver;
+extern struct platform_driver rotator_driver;
+extern struct platform_driver gsc_driver;
+extern struct platform_driver ipp_driver;
#endif
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index f2df06c603f7..301485215a70 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -234,6 +234,32 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
exynos_encoder->dpms = DRM_MODE_DPMS_ON;
}
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
+{
+ struct exynos_drm_encoder *exynos_encoder;
+ struct exynos_drm_manager_ops *ops;
+ struct drm_device *dev = fb->dev;
+ struct drm_encoder *encoder;
+
+ /*
+ * make sure that overlay data are updated to real hardware
+ * for all encoders.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ exynos_encoder = to_exynos_encoder(encoder);
+ ops = exynos_encoder->manager->ops;
+
+ /*
+ * wait for vblank interrupt
+ * - this makes sure that overlay data are updated to
+ * real hardware.
+ */
+ if (ops->wait_for_vblank)
+ ops->wait_for_vblank(exynos_encoder->manager->dev);
+ }
+}
+
+
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_plane *plane;
@@ -505,14 +531,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
if (overlay_ops && overlay_ops->disable)
overlay_ops->disable(manager->dev, zpos);
-
- /*
- * wait for vblank interrupt
- * - this makes sure that hardware overlay is disabled to avoid
- * for the dma accesses to memory after gem buffer was released
- * because the setting for disabling the overlay will be updated
- * at vsync.
- */
- if (overlay_ops && overlay_ops->wait_for_vblank)
- overlay_ops->wait_for_vblank(manager->dev);
}
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 6470d9ddf5a1..88bb25a2a917 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
#endif
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 4ef4cd3f9936..5426cc5a5e8d 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -30,10 +30,13 @@
#include
#include
#include
+#include
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+#include "exynos_drm_encoder.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
@@ -50,6 +53,32 @@ struct exynos_drm_fb {
struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
+static int check_fb_gem_memory_type(struct drm_device *drm_dev,
+ struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+ unsigned int flags;
+
+ /*
+ * if exynos drm driver supports iommu then framebuffer can use
+ * all the buffer types.
+ */
+ if (is_drm_iommu_supported(drm_dev))
+ return 0;
+
+ flags = exynos_gem_obj->flags;
+
+ /*
+ * without iommu support, not support physically non-continuous memory
+ * for framebuffer.
+ */
+ if (IS_NONCONTIG_BUFFER(flags)) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
DRM_DEBUG_KMS("%s\n", __FILE__);
+ /* make sure that overlay data are updated before relesing fb. */
+ exynos_drm_encoder_complete_scanout(fb);
+
drm_framebuffer_cleanup(fb);
for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@@ -128,23 +160,32 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj)
{
struct exynos_drm_fb *exynos_fb;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
+
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
return ERR_PTR(ret);
}
- drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
- exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-
return &exynos_fb->fb;
}
@@ -190,9 +231,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
struct exynos_drm_fb *exynos_fb;
- int i;
+ int i, ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -202,30 +242,56 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
- if (IS_ERR(fb)) {
- drm_gem_object_unreference_unlocked(obj);
- return fb;
+ exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+ if (!exynos_fb) {
+ DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+ return ERR_PTR(-ENOMEM);
}
- exynos_fb = to_exynos_fb(fb);
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < exynos_fb->buf_cnt; i++) {
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
+
obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
- exynos_drm_fb_destroy(fb);
+ kfree(exynos_fb);
return ERR_PTR(-ENOENT);
}
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
+ }
+
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
- return fb;
+ ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+ if (ret) {
+ for (i = 0; i < exynos_fb->buf_cnt; i++) {
+ struct exynos_drm_gem_obj *gem_obj;
+
+ gem_obj = exynos_fb->exynos_gem_obj[i];
+ drm_gem_object_unreference_unlocked(&gem_obj->base);
+ }
+
+ kfree(exynos_fb);
+ return ERR_PTR(ret);
+ }
+
+ return &exynos_fb->fb;
}
struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
@@ -243,9 +309,7 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
if (!buffer)
return NULL;
- DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
- (unsigned long)buffer->kvaddr,
- (unsigned long)buffer->dma_addr);
+ DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
return buffer;
}
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e7466c4414cb..f433eb7533a9 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -46,8 +46,38 @@ struct exynos_drm_fbdev {
struct exynos_drm_gem_obj *exynos_gem_obj;
};
+static int exynos_drm_fb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+ struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+ unsigned long vm_size;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+ vm_size = vma->vm_end - vma->vm_start;
+
+ if (vm_size > buffer->size)
+ return -EINVAL;
+
+ ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
+ buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
+ .fb_mmap = exynos_drm_fb_mmap,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -79,6 +109,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
return -EFAULT;
}
+ /* map pages with kernel virtual space. */
+ if (!buffer->kvaddr) {
+ unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
+ buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!buffer->kvaddr) {
+ DRM_ERROR("failed to map pages to kernel space.\n");
+ return -EIO;
+ }
+ }
+
/* buffer count to framebuffer always is 1 at booting time. */
exynos_drm_fb_set_buf_cnt(fb, 1);
@@ -87,8 +128,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset;
- fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) +
- offset);
+ fbi->fix.smem_start = (unsigned long)
+ (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -134,7 +175,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj);
- goto out;
+ goto err_release_framebuffer;
}
exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -144,7 +185,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
if (IS_ERR_OR_NULL(helper->fb)) {
DRM_ERROR("failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
- goto out;
+ goto err_destroy_gem;
}
helper->fbdev = fbi;
@@ -156,14 +197,24 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
if (ret) {
DRM_ERROR("failed to allocate cmap.\n");
- goto out;
+ goto err_destroy_framebuffer;
}
ret = exynos_drm_fbdev_update(helper, helper->fb);
- if (ret < 0) {
- fb_dealloc_cmap(&fbi->cmap);
- goto out;
- }
+ if (ret < 0)
+ goto err_dealloc_cmap;
+
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+err_dealloc_cmap:
+ fb_dealloc_cmap(&fbi->cmap);
+err_destroy_framebuffer:
+ drm_framebuffer_cleanup(helper->fb);
+err_destroy_gem:
+ exynos_drm_gem_destroy(exynos_gem_obj);
+err_release_framebuffer:
+ framebuffer_release(fbi);
/*
* if failed, all resources allocated above would be released by
@@ -265,8 +316,13 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct drm_fb_helper *fb_helper)
{
+ struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
struct drm_framebuffer *fb;
+ if (exynos_gem_obj->buffer->kvaddr)
+ vunmap(exynos_gem_obj->buffer->kvaddr);
+
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644
index 000000000000..61ea24296b52
--- /dev/null
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -0,0 +1,2001 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim
+ * Jinyoung Jeon
+ * Sangmin Lee
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include "regs-fimc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_fimc.h"
+
+/*
+ * FIMC is stand for Fully Interactive Mobile Camera and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * FIMC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> FIMC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> FIMC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> FIMC H/W ----> FIMD.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define FIMC_MAX_DEVS 4
+#define FIMC_MAX_SRC 2
+#define FIMC_MAX_DST 32
+#define FIMC_SHFACTOR 10
+#define FIMC_BUF_STOP 1
+#define FIMC_BUF_START 2
+#define FIMC_REG_SZ 32
+#define FIMC_WIDTH_ITU_709 1280
+#define FIMC_REFRESH_MAX 60
+#define FIMC_REFRESH_MIN 12
+#define FIMC_CROP_MAX 8192
+#define FIMC_CROP_MIN 32
+#define FIMC_SCALE_MAX 4224
+#define FIMC_SCALE_MIN 32
+
+#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct fimc_context, ippdrv);
+#define fimc_read(offset) readl(ctx->regs + (offset))
+#define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+enum fimc_wb {
+ FIMC_WB_NONE,
+ FIMC_WB_A,
+ FIMC_WB_B,
+};
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @bypass: unused scaler path.
+ * @up_h: horizontal scale up.
+ * @up_v: vertical scale up.
+ * @hratio: horizontal ratio.
+ * @vratio: vertical ratio.
+ */
+struct fimc_scaler {
+ bool range;
+ bool bypass;
+ bool up_h;
+ bool up_v;
+ u32 hratio;
+ u32 vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual table 43-1.
+ * @in_hori: scaler input horizontal size.
+ * @bypass: scaler bypass mode.
+ * @dst_h_wo_rot: target horizontal size without output rotation.
+ * @dst_h_rot: target horizontal size with output rotation.
+ * @rl_w_wo_rot: real width without input rotation.
+ * @rl_h_rot: real height without output rotation.
+ */
+struct fimc_capability {
+ /* scaler */
+ u32 in_hori;
+ u32 bypass;
+ /* output rotator */
+ u32 dst_h_wo_rot;
+ u32 dst_h_rot;
+ /* input rotator */
+ u32 rl_w_wo_rot;
+ u32 rl_h_rot;
+};
+
+/*
+ * A structure of fimc driver data.
+ *
+ * @parent_clk: name of parent clock.
+ */
+struct fimc_driverdata {
+ char *parent_clk;
+};
+
+/*
+ * A structure of fimc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @sclk_fimc_clk: fimc source clock.
+ * @fimc_clk: fimc clock.
+ * @wb_clk: writeback a clock.
+ * @wb_b_clk: writeback b clock.
+ * @sc: scaler infomations.
+ * @odr: ordering of YUV.
+ * @ver: fimc version.
+ * @pol: porarity of writeback.
+ * @id: fimc id.
+ * @irq: irq number.
+ * @suspended: qos operations.
+ */
+struct fimc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *sclk_fimc_clk;
+ struct clk *fimc_clk;
+ struct clk *wb_clk;
+ struct clk *wb_b_clk;
+ struct fimc_scaler sc;
+ struct fimc_driverdata *ddata;
+ struct exynos_drm_ipp_pol pol;
+ int id;
+ int irq;
+ bool suspended;
+};
+
+static void fimc_sw_reset(struct fimc_context *ctx, bool pattern)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:pattern[%d]\n", __func__, pattern);
+
+ cfg = fimc_read(EXYNOS_CISRCFMT);
+ cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
+ if (pattern)
+ cfg |= EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR;
+
+ fimc_write(cfg, EXYNOS_CISRCFMT);
+
+ /* s/w reset */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= (EXYNOS_CIGCTRL_SWRST);
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ /* s/w reset complete */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_SWRST;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ /* reset sequence */
+ fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+}
+
+static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+{
+ u32 camblk_cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ camblk_cfg = readl(SYSREG_CAMERA_BLK);
+ camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
+ camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
+
+ writel(camblk_cfg, SYSREG_CAMERA_BLK);
+}
+
+static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
+ EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
+ EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
+
+ switch (wb) {
+ case FIMC_WB_A:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_B:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_NONE:
+ default:
+ cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
+ EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_A |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
+ break;
+ }
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_set_polarity(struct fimc_context *ctx,
+ struct exynos_drm_ipp_pol *pol)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
+ __func__, pol->inv_pclk, pol->inv_vsync);
+ DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
+ __func__, pol->inv_href, pol->inv_hsync);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
+ EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
+
+ if (pol->inv_pclk)
+ cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
+ if (pol->inv_vsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
+ if (pol->inv_href)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
+ if (pol->inv_hsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
+ else
+ cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
+ bool overflow, bool level)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+ enable, overflow, level);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ if (enable) {
+ cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
+ if (overflow)
+ cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
+ if (level)
+ cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
+ } else
+ cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_clear_irq(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static bool fimc_check_ovf(struct fimc_context *ctx)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, status, flag;
+
+ status = fimc_read(EXYNOS_CISTATUS);
+ flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
+ EXYNOS_CISTATUS_OVFICR;
+
+ DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
+
+ if (status & flag) {
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return true;
+ }
+
+ return false;
+}
+
+static bool fimc_check_frame_end(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ cfg = fimc_read(EXYNOS_CISTATUS);
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
+
+ if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
+ return false;
+
+ cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
+ fimc_write(cfg, EXYNOS_CISTATUS);
+
+ return true;
+}
+
+static int fimc_get_buf_id(struct fimc_context *ctx)
+{
+ u32 cfg;
+ int frame_cnt, buf_id;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cfg = fimc_read(EXYNOS_CISTATUS2);
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
+
+ if (frame_cnt == 0)
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
+
+ DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+
+ if (frame_cnt == 0) {
+ DRM_ERROR("failed to get frame count.\n");
+ return -EIO;
+ }
+
+ buf_id = frame_cnt - 1;
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ return buf_id;
+}
+
+static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ cfg = fimc_read(EXYNOS_CIOCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIOCTRL_LASTENDEN;
+ else
+ cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
+
+ fimc_write(cfg, EXYNOS_CIOCTRL);
+}
+
+
+static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ /* RGB */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE |
+ EXYNOS_MSCTRL_ORDER422_YCBYCR);
+
+ switch (fmt) {
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ return 0;
+}
+
+static int fimc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV12MT:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+
+ fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_src_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg1, cfg2;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg1 = fimc_read(EXYNOS_MSCTRL);
+ cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+
+ cfg2 = fimc_read(EXYNOS_CITRGFMT);
+ cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg1, EXYNOS_MSCTRL);
+ fimc_write(cfg2, EXYNOS_CITRGFMT);
+ *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_set_window(struct fimc_context *ctx,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ u32 cfg, h1, h2, v1, v2;
+
+ /* cropped image */
+ h1 = pos->x;
+ h2 = sz->hsize - pos->w - pos->x;
+ v1 = pos->y;
+ v2 = sz->vsize - pos->h - pos->y;
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
+ DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
+ h1, h2, v1, v2);
+
+ /*
+ * set window offset 1, 2 size
+ * check figure 43-21 in user manual
+ */
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
+ EXYNOS_CIWDOFST_WINVEROFST_MASK);
+ cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
+ EXYNOS_CIWDOFST_WINVEROFST(v1));
+ cfg |= EXYNOS_CIWDOFST_WINOFSEN;
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
+ EXYNOS_CIWDOFST2_WINVEROFST2(v2));
+ fimc_write(cfg, EXYNOS_CIWDOFST2);
+
+ return 0;
+}
+
+static int fimc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(cfg, EXYNOS_ORGISIZE);
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
+ pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* set input DMA image size */
+ cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
+ cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
+ EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
+ cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
+ EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
+ fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
+
+ /*
+ * set input FIFO image size
+ * for now, we support only ITU601 8 bit mode
+ */
+ cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
+ EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
+ EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
+ fimc_write(cfg, EXYNOS_CISRCFMT);
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIIYOFF);
+ cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIICBOFF);
+ cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIICROFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIICROFF);
+
+ return fimc_set_window(ctx, &img_pos, &img_sz);
+}
+
+static int fimc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > FIMC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ config = &property->config[EXYNOS_DRM_OPS_SRC];
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIIYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICRSA(buf_id));
+ } else {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICRSA(buf_id));
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_drm_ipp_ops fimc_src_ops = {
+ .set_fmt = fimc_src_set_fmt,
+ .set_transf = fimc_src_set_transf,
+ .set_size = fimc_src_set_size,
+ .set_addr = fimc_src_set_addr,
+};
+
+static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ /* RGB */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
+ EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(EXYNOS_CIOCTRL);
+ cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
+ EXYNOS_CIOCTRL_ORDER422_MASK |
+ EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CIOCTRL);
+
+ return 0;
+}
+
+static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = fimc_read(EXYNOS_CIEXTEN);
+
+ if (fmt == DRM_FORMAT_AYUV) {
+ cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(cfg, EXYNOS_CIEXTEN);
+ } else {
+ cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(cfg, EXYNOS_CIEXTEN);
+
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV21:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
+ fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+ }
+
+ cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+
+ fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_dst_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip, bool *swap)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
+ cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
+ EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+ *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
+{
+ DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+ if (src >= dst * 64) {
+ DRM_ERROR("failed to make ratio and shift.\n");
+ return -EINVAL;
+ } else if (src >= dst * 32) {
+ *ratio = 32;
+ *shift = 5;
+ } else if (src >= dst * 16) {
+ *ratio = 16;
+ *shift = 4;
+ } else if (src >= dst * 8) {
+ *ratio = 8;
+ *shift = 3;
+ } else if (src >= dst * 4) {
+ *ratio = 4;
+ *shift = 2;
+ } else if (src >= dst * 2) {
+ *ratio = 2;
+ *shift = 1;
+ } else {
+ *ratio = 1;
+ *shift = 0;
+ }
+
+ return 0;
+}
+
+static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, cfg_ext, shfactor;
+ u32 pre_dst_width, pre_dst_height;
+ u32 pre_hratio, hfactor, pre_vratio, vfactor;
+ int ret = 0;
+ u32 src_w, src_h, dst_w, dst_h;
+
+ cfg_ext = fimc_read(EXYNOS_CITRGFMT);
+ if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
+ src_w = src->h;
+ src_h = src->w;
+ } else {
+ src_w = src->w;
+ src_h = src->h;
+ }
+
+ if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
+ dst_w = dst->h;
+ dst_h = dst->w;
+ } else {
+ dst_w = dst->w;
+ dst_h = dst->h;
+ }
+
+ ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return ret;
+ }
+
+ ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return ret;
+ }
+
+ pre_dst_width = src_w / pre_hratio;
+ pre_dst_height = src_h / pre_vratio;
+ DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
+ pre_dst_width, pre_dst_height);
+ DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
+ __func__, pre_hratio, hfactor, pre_vratio, vfactor);
+
+ sc->hratio = (src_w << 14) / (dst_w << hfactor);
+ sc->vratio = (src_h << 14) / (dst_h << vfactor);
+ sc->up_h = (dst_w >= src_w) ? true : false;
+ sc->up_v = (dst_h >= src_h) ? true : false;
+ DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+ __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+
+ shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
+ DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
+
+ cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
+ EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
+ EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
+ fimc_write(cfg, EXYNOS_CISCPRERATIO);
+
+ cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
+ EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
+ fimc_write(cfg, EXYNOS_CISCPREDST);
+
+ return ret;
+}
+
+static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
+{
+ u32 cfg, cfg_ext;
+
+ DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+ __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
+ DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
+ __func__, sc->hratio, sc->vratio);
+
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
+ EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
+ EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
+ EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
+ EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+
+ if (sc->range)
+ cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+ if (sc->bypass)
+ cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
+ if (sc->up_h)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
+ if (sc->up_v)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
+
+ cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
+ EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+
+ cfg_ext = fimc_read(EXYNOS_CIEXTEN);
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
+ cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
+ EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
+ fimc_write(cfg_ext, EXYNOS_CIEXTEN);
+}
+
+static int fimc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(cfg, EXYNOS_ORGOSIZE);
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h);
+
+ /* CSC ITU */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
+
+ if (sz->hsize >= FIMC_WIDTH_ITU_709)
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
+ else
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* target image size */
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
+ EXYNOS_CITRGFMT_TARGETV_MASK);
+ cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
+ EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+
+ /* target area */
+ cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
+ fimc_write(cfg, EXYNOS_CITAREA);
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOYOFF);
+ cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOCBOFF);
+ cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOCROFF);
+
+ return 0;
+}
+
+static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
+{
+ u32 cfg, i, buf_num = 0;
+ u32 mask = 0x00000001;
+
+ cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+ for (i = 0; i < FIMC_REG_SZ; i++)
+ if (cfg & (mask << i))
+ buf_num++;
+
+ DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+ return buf_num;
+}
+
+static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool enable;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+ buf_id, buf_type);
+
+ mutex_lock(&ctx->lock);
+
+ /* mask register set */
+ cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ enable = true;
+ break;
+ case IPP_BUF_DEQUEUE:
+ enable = false;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ /* sequence id */
+ cfg &= (~mask);
+ cfg |= (enable << buf_id);
+ fimc_write(cfg, EXYNOS_CIFCNTSEQ);
+
+ /* interrupt enable */
+ if (buf_type == IPP_BUF_ENQUEUE &&
+ fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
+ fimc_handle_irq(ctx, true, false, true);
+
+ /* interrupt disable */
+ if (buf_type == IPP_BUF_DEQUEUE &&
+ fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
+ fimc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+ mutex_unlock(&ctx->lock);
+ return ret;
+}
+
+static int fimc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_type buf_type)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+ property->prop_id, buf_id, buf_type);
+
+ if (buf_id > FIMC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_type) {
+ case IPP_BUF_ENQUEUE:
+ config = &property->config[EXYNOS_DRM_OPS_DST];
+
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIOYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCRSA(buf_id));
+ } else {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCRSA(buf_id));
+ }
+ break;
+ case IPP_BUF_DEQUEUE:
+ fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
+ fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops fimc_dst_ops = {
+ .set_fmt = fimc_dst_set_fmt,
+ .set_transf = fimc_dst_set_transf,
+ .set_size = fimc_dst_set_size,
+ .set_addr = fimc_dst_set_addr,
+};
+
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ if (enable) {
+ clk_enable(ctx->sclk_fimc_clk);
+ clk_enable(ctx->fimc_clk);
+ clk_enable(ctx->wb_clk);
+ ctx->suspended = false;
+ } else {
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_disable(ctx->fimc_clk);
+ clk_disable(ctx->wb_clk);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
+{
+ struct fimc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_event_work *event_work =
+ c_node->event_work;
+ int buf_id;
+
+ DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
+
+ fimc_clear_irq(ctx);
+ if (fimc_check_ovf(ctx))
+ return IRQ_NONE;
+
+ if (!fimc_check_frame_end(ctx))
+ return IRQ_NONE;
+
+ buf_id = fimc_get_buf_id(ctx);
+ if (buf_id < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+ return IRQ_HANDLED;
+ }
+
+ event_work->ippdrv = ippdrv;
+ event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+ queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
+
+ return IRQ_HANDLED;
+}
+
+static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_prop_list *prop_list;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+ if (!prop_list) {
+ DRM_ERROR("failed to alloc property list.\n");
+ return -ENOMEM;
+ }
+
+ prop_list->version = 1;
+ prop_list->writeback = 1;
+ prop_list->refresh_min = FIMC_REFRESH_MIN;
+ prop_list->refresh_max = FIMC_REFRESH_MAX;
+ prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
+ (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+ (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+ prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+ (1 << EXYNOS_DRM_DEGREE_90) |
+ (1 << EXYNOS_DRM_DEGREE_180) |
+ (1 << EXYNOS_DRM_DEGREE_270);
+ prop_list->csc = 1;
+ prop_list->crop = 1;
+ prop_list->crop_max.hsize = FIMC_CROP_MAX;
+ prop_list->crop_max.vsize = FIMC_CROP_MAX;
+ prop_list->crop_min.hsize = FIMC_CROP_MIN;
+ prop_list->crop_min.vsize = FIMC_CROP_MIN;
+ prop_list->scale = 1;
+ prop_list->scale_max.hsize = FIMC_SCALE_MAX;
+ prop_list->scale_max.vsize = FIMC_SCALE_MAX;
+ prop_list->scale_min.hsize = FIMC_SCALE_MIN;
+ prop_list->scale_min.vsize = FIMC_SCALE_MIN;
+
+ ippdrv->prop_list = prop_list;
+
+ return 0;
+}
+
+static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
+{
+ switch (flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ return true;
+ default:
+ DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+ return false;
+ }
+}
+
+static int fimc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos *pos;
+ struct drm_exynos_sz *sz;
+ bool swap;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ /* check for flip */
+ if (!fimc_check_drm_flip(config->flip)) {
+ DRM_ERROR("invalid flip.\n");
+ goto err_property;
+ }
+
+ /* check for degree */
+ switch (config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ break;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ swap = false;
+ break;
+ default:
+ DRM_ERROR("invalid degree.\n");
+ goto err_property;
+ }
+
+ /* check for buffer bound */
+ if ((pos->x + pos->w > sz->hsize) ||
+ (pos->y + pos->h > sz->vsize)) {
+ DRM_ERROR("out of buf bound.\n");
+ goto err_property;
+ }
+
+ /* check for crop */
+ if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+ if (swap) {
+ if ((pos->h < pp->crop_min.hsize) ||
+ (sz->vsize > pp->crop_max.hsize) ||
+ (pos->w < pp->crop_min.vsize) ||
+ (sz->hsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->crop_min.hsize) ||
+ (sz->hsize > pp->crop_max.hsize) ||
+ (pos->h < pp->crop_min.vsize) ||
+ (sz->vsize > pp->crop_max.vsize)) {
+ DRM_ERROR("out of crop size.\n");
+ goto err_property;
+ }
+ }
+ }
+
+ /* check for scale */
+ if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+ if (swap) {
+ if ((pos->h < pp->scale_min.hsize) ||
+ (sz->vsize > pp->scale_max.hsize) ||
+ (pos->w < pp->scale_min.vsize) ||
+ (sz->hsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ } else {
+ if ((pos->w < pp->scale_min.hsize) ||
+ (sz->hsize > pp->scale_max.hsize) ||
+ (pos->h < pp->scale_min.vsize) ||
+ (sz->vsize > pp->scale_max.vsize)) {
+ DRM_ERROR("out of scale size.\n");
+ goto err_property;
+ }
+ }
+ }
+ }
+
+ return 0;
+
+err_property:
+ for_each_ipp_ops(i) {
+ if ((i == EXYNOS_DRM_OPS_SRC) &&
+ (property->cmd == IPP_CMD_WB))
+ continue;
+
+ config = &property->config[i];
+ pos = &config->pos;
+ sz = &config->sz;
+
+ DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+ i ? "dst" : "src", config->flip, config->degree,
+ pos->x, pos->y, pos->w, pos->h,
+ sz->hsize, sz->vsize);
+ }
+
+ return -EINVAL;
+}
+
+static void fimc_clear_addr(struct fimc_context *ctx)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s:\n", __func__);
+
+ for (i = 0; i < FIMC_MAX_SRC; i++) {
+ fimc_write(0, EXYNOS_CIIYSA(i));
+ fimc_write(0, EXYNOS_CIICBSA(i));
+ fimc_write(0, EXYNOS_CIICRSA(i));
+ }
+
+ for (i = 0; i < FIMC_MAX_DST; i++) {
+ fimc_write(0, EXYNOS_CIOYSA(i));
+ fimc_write(0, EXYNOS_CIOCBSA(i));
+ fimc_write(0, EXYNOS_CIOCRSA(i));
+ }
+}
+
+static int fimc_ippdrv_reset(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* reset h/w block */
+ fimc_sw_reset(ctx, false);
+
+ /* reset scaler capability */
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+
+ fimc_clear_addr(ctx);
+
+ return 0;
+}
+
+static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_set_wb set_wb;
+ int ret, i;
+ u32 cfg0, cfg1;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ if (!c_node) {
+ DRM_ERROR("failed to get c_node.\n");
+ return -EINVAL;
+ }
+
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return -EINVAL;
+ }
+
+ fimc_handle_irq(ctx, true, false, true);
+
+ for_each_ipp_ops(i) {
+ config = &property->config[i];
+ img_pos[i] = config->pos;
+ }
+
+ ret = fimc_set_prescaler(ctx, &ctx->sc,
+ &img_pos[EXYNOS_DRM_OPS_SRC],
+ &img_pos[EXYNOS_DRM_OPS_DST]);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ /* If set ture, we can save jpeg about screen */
+ fimc_handle_jpeg(ctx, false);
+ fimc_set_scaler(ctx, &ctx->sc);
+ fimc_set_polarity(ctx, &ctx->pol);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
+ fimc_handle_lastend(ctx, false);
+
+ /* setup dma */
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ fimc_set_type_ctrl(ctx, FIMC_WB_A);
+ fimc_handle_lastend(ctx, true);
+
+ /* setup FIMD */
+ fimc_set_camblk_fimd0_wb(ctx);
+
+ set_wb.enable = 1;
+ set_wb.refresh = property->refresh_rate;
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ /* Reset status */
+ fimc_write(0x0, EXYNOS_CISTATUS);
+
+ cfg0 = fimc_read(EXYNOS_CIIMGCPT);
+ cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+
+ /* Scaler */
+ cfg1 = fimc_read(EXYNOS_CISCCTRL);
+ cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
+ cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
+ EXYNOS_CISCCTRL_SCALERSTART);
+
+ fimc_write(cfg1, EXYNOS_CISCCTRL);
+
+ /* Enable image capture*/
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
+ fimc_write(cfg0, EXYNOS_CIIMGCPT);
+
+ /* Disable frame end irq */
+ cfg0 = fimc_read(EXYNOS_CIGCTRL);
+ cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+ fimc_write(cfg0, EXYNOS_CIGCTRL);
+
+ cfg0 = fimc_read(EXYNOS_CIOCTRL);
+ cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
+ fimc_write(cfg0, EXYNOS_CIOCTRL);
+
+ if (cmd == IPP_CMD_M2M) {
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 |= EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 |= EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+ }
+
+ return 0;
+}
+
+static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* Source clear */
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg &= ~EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ fimc_handle_irq(ctx, false, false, true);
+
+ /* reset sequence */
+ fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+
+ /* Scaler disable */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+
+ /* Disable image capture */
+ cfg = fimc_read(EXYNOS_CIIMGCPT);
+ cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+ fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+ /* Enable frame end irq */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static int __devinit fimc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx;
+ struct clk *parent_clk;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct exynos_drm_fimc_pdata *pdata;
+ struct fimc_driverdata *ddata;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "no platform data specified.\n");
+ return -EINVAL;
+ }
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ddata = (struct fimc_driverdata *)
+ platform_get_device_id(pdev)->driver_data;
+
+ /* clock control */
+ ctx->sclk_fimc_clk = clk_get(dev, "sclk_fimc");
+ if (IS_ERR(ctx->sclk_fimc_clk)) {
+ dev_err(dev, "failed to get src fimc clock.\n");
+ ret = PTR_ERR(ctx->sclk_fimc_clk);
+ goto err_ctx;
+ }
+ clk_enable(ctx->sclk_fimc_clk);
+
+ ctx->fimc_clk = clk_get(dev, "fimc");
+ if (IS_ERR(ctx->fimc_clk)) {
+ dev_err(dev, "failed to get fimc clock.\n");
+ ret = PTR_ERR(ctx->fimc_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ goto err_ctx;
+ }
+
+ ctx->wb_clk = clk_get(dev, "pxl_async0");
+ if (IS_ERR(ctx->wb_clk)) {
+ dev_err(dev, "failed to get writeback a clock.\n");
+ ret = PTR_ERR(ctx->wb_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ goto err_ctx;
+ }
+
+ ctx->wb_b_clk = clk_get(dev, "pxl_async1");
+ if (IS_ERR(ctx->wb_b_clk)) {
+ dev_err(dev, "failed to get writeback b clock.\n");
+ ret = PTR_ERR(ctx->wb_b_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ goto err_ctx;
+ }
+
+ parent_clk = clk_get(dev, ddata->parent_clk);
+
+ if (IS_ERR(parent_clk)) {
+ dev_err(dev, "failed to get parent clock.\n");
+ ret = PTR_ERR(parent_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+ goto err_ctx;
+ }
+
+ if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
+ dev_err(dev, "failed to set parent.\n");
+ ret = -EINVAL;
+ clk_put(parent_clk);
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+ goto err_ctx;
+ }
+
+ clk_put(parent_clk);
+ clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
+
+ /* resource memory */
+ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ctx->regs_res) {
+ dev_err(dev, "failed to find registers.\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+ ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+ if (!ctx->regs) {
+ dev_err(dev, "failed to map registers.\n");
+ ret = -ENXIO;
+ goto err_clk;
+ }
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ ret = -ENOENT;
+ goto err_get_regs;
+ }
+
+ ctx->irq = res->start;
+ ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
+ IRQF_ONESHOT, "drm_fimc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ goto err_get_regs;
+ }
+
+ /* context initailization */
+ ctx->id = pdev->id;
+ ctx->pol = pdata->pol;
+ ctx->ddata = ddata;
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
+ ippdrv->check_property = fimc_ippdrv_check_property;
+ ippdrv->reset = fimc_ippdrv_reset;
+ ippdrv->start = fimc_ippdrv_start;
+ ippdrv->stop = fimc_ippdrv_stop;
+ ret = fimc_init_prop_list(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to init property list.\n");
+ goto err_get_irq;
+ }
+
+ DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+ (int)ippdrv);
+
+ mutex_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm fimc device.\n");
+ goto err_ippdrv_register;
+ }
+
+ dev_info(&pdev->dev, "drm fimc registered successfully.\n");
+
+ return 0;
+
+err_ippdrv_register:
+ devm_kfree(dev, ippdrv->prop_list);
+ pm_runtime_disable(dev);
+err_get_irq:
+ free_irq(ctx->irq, ctx);
+err_get_regs:
+ devm_iounmap(dev, ctx->regs);
+err_clk:
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+err_ctx:
+ devm_kfree(dev, ctx);
+ return ret;
+}
+
+static int __devexit fimc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ devm_kfree(dev, ippdrv->prop_list);
+ exynos_drm_ippdrv_unregister(ippdrv);
+ mutex_destroy(&ctx->lock);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ free_irq(ctx->irq, ctx);
+ devm_iounmap(dev, ctx->regs);
+
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+
+ devm_kfree(dev, ctx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ if (!pm_runtime_suspended(dev))
+ return fimc_clk_ctrl(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_runtime_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_runtime_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ return fimc_clk_ctrl(ctx, true);
+}
+#endif
+
+static struct fimc_driverdata exynos4210_fimc_data = {
+ .parent_clk = "mout_mpll",
+};
+
+static struct fimc_driverdata exynos4410_fimc_data = {
+ .parent_clk = "mout_mpll_user",
+};
+
+static struct platform_device_id fimc_driver_ids[] = {
+ {
+ .name = "exynos4210-fimc",
+ .driver_data = (unsigned long)&exynos4210_fimc_data,
+ }, {
+ .name = "exynos4412-fimc",
+ .driver_data = (unsigned long)&exynos4410_fimc_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+
+static const struct dev_pm_ops fimc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+ SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+struct platform_driver fimc_driver = {
+ .probe = fimc_probe,
+ .remove = __devexit_p(fimc_remove),
+ .id_table = fimc_driver_ids,
+ .driver = {
+ .name = "exynos-drm-fimc",
+ .owner = THIS_MODULE,
+ .pm = &fimc_pm_ops,
+ },
+};
+
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644
index 000000000000..dc970fa0d888
--- /dev/null
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimc.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim
+ * Jinyoung Jeon
+ * Sangmin Lee
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_FIMC_H_
+#define _EXYNOS_DRM_FIMC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_FIMC_H_ */
diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index e08478f19f1a..bf0d9baca2bc 100644
--- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -17,6 +17,7 @@
#include
#include
#include
+#include
#include
#include